code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from tenable.errors import *
from ..checker import check, single
import uuid, io, pytest
@pytest.mark.vcr()
def test_configure_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.configure('nope', dict())
@pytest.mark.vcr()
def test_configure_policy_typeerror(api):
with pytest.raises(TypeError):
api.policies.configure(1, 'nope')
@pytest.mark.vcr()
def test_configure_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.configure(1, dict())
@pytest.mark.vcr()
def test_configure_policy(api, policy):
details = api.policies.details(policy['policy_id'])
details['settings']['name'] = 'MODIFIED'
api.policies.configure(policy['policy_id'], details)
updated = api.policies.details(policy['policy_id'])
assert updated['settings']['name'] == 'MODIFIED'
@pytest.mark.vcr()
def test_copy_policy_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.copy('nope')
@pytest.mark.vcr()
def test_copy_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.copy(1)
@pytest.mark.vcr()
def test_copy_policy(api, policy):
new = api.policies.copy(policy['policy_id'])
assert isinstance(new, dict)
check(new, 'id', int)
check(new, 'name', str)
assert 'Copy of' in new['name']
api.policies.delete(new['id'])
@pytest.mark.vcr()
def test_create_policy(api, policy):
assert isinstance(policy, dict)
check(policy, 'policy_id', int)
check(policy, 'policy_name', str)
@pytest.mark.vcr()
def test_delete_policy_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.delete('nope')
@pytest.mark.vcr()
def test_delete_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.delete(1)
@pytest.mark.vcr()
def test_delete_policy(api, policy):
api.policies.delete(policy['policy_id'])
@pytest.mark.vcr()
def test_policy_details_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.details('nope')
@pytest.mark.vcr()
def test_policy_details_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.details(1)
@pytest.mark.vcr()
def test_policy_details(api, policy):
policy = api.policies.details(policy['policy_id'])
assert isinstance(policy, dict)
check(policy, 'uuid', 'scanner-uuid')
check(policy, 'settings', dict)
@pytest.mark.vcr()
def test_policy_export_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.policy_export('nope')
@pytest.mark.vcr()
def test_policy_export_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.policy_export(1)
@pytest.mark.vcr()
def test_policy_export(api, policy):
pobj = api.policies.policy_export(policy['policy_id'])
assert isinstance(pobj, io.BytesIO)
@pytest.mark.vcr()
def test_policy_import(api, policy):
pobj = api.policies.policy_export(policy['policy_id'])
resp = api.policies.policy_import(pobj)
assert isinstance(resp, dict)
check(resp, 'creation_date', int)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'name', str)
check(resp, 'no_target', str)
check(resp, 'owner', str)
check(resp, 'owner_id', int)
check(resp, 'shared', int)
check(resp, 'template_uuid', 'scanner-uuid')
check(resp, 'user_permissions', int)
@pytest.mark.vcr()
def test_policy_list(api, policy):
policies = api.policies.list()
assert isinstance(policies, list)
for p in policies:
check(p, 'creation_date', int)
check(p, 'description', str, allow_none=True)
check(p, 'id', int)
check(p, 'last_modification_date', int)
check(p, 'name', str)
check(p, 'no_target', str)
check(p, 'owner', str)
check(p, 'owner_id', int)
check(p, 'shared', int)
check(p, 'template_uuid', 'scanner-uuid')
check(p, 'user_permissions', int)
check(p, 'visibility', str)
@pytest.mark.vcr()
def test_policy_template_details_success(api):
template_detail = api.policies.template_details('agent_advanced')
assert isinstance(template_detail, dict)
check(template_detail, 'compliance', dict)
check(template_detail, 'plugins', dict)
check(template_detail, 'settings', dict)
check(template_detail, 'uuid', 'scanner-uuid')
@pytest.mark.vcr()
def test_policy_template_details_keyerror(api):
with pytest.raises(KeyError):
api.policies.template_details('one')
@pytest.mark.vcr()
def test_policy_template_details_typeerror(api):
with pytest.raises(TypeError):
api.policies.template_details(1) | tests/io/test_policies.py | from tenable.errors import *
from ..checker import check, single
import uuid, io, pytest
@pytest.mark.vcr()
def test_configure_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.configure('nope', dict())
@pytest.mark.vcr()
def test_configure_policy_typeerror(api):
with pytest.raises(TypeError):
api.policies.configure(1, 'nope')
@pytest.mark.vcr()
def test_configure_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.configure(1, dict())
@pytest.mark.vcr()
def test_configure_policy(api, policy):
details = api.policies.details(policy['policy_id'])
details['settings']['name'] = 'MODIFIED'
api.policies.configure(policy['policy_id'], details)
updated = api.policies.details(policy['policy_id'])
assert updated['settings']['name'] == 'MODIFIED'
@pytest.mark.vcr()
def test_copy_policy_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.copy('nope')
@pytest.mark.vcr()
def test_copy_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.copy(1)
@pytest.mark.vcr()
def test_copy_policy(api, policy):
new = api.policies.copy(policy['policy_id'])
assert isinstance(new, dict)
check(new, 'id', int)
check(new, 'name', str)
assert 'Copy of' in new['name']
api.policies.delete(new['id'])
@pytest.mark.vcr()
def test_create_policy(api, policy):
assert isinstance(policy, dict)
check(policy, 'policy_id', int)
check(policy, 'policy_name', str)
@pytest.mark.vcr()
def test_delete_policy_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.delete('nope')
@pytest.mark.vcr()
def test_delete_policy_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.delete(1)
@pytest.mark.vcr()
def test_delete_policy(api, policy):
api.policies.delete(policy['policy_id'])
@pytest.mark.vcr()
def test_policy_details_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.details('nope')
@pytest.mark.vcr()
def test_policy_details_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.details(1)
@pytest.mark.vcr()
def test_policy_details(api, policy):
policy = api.policies.details(policy['policy_id'])
assert isinstance(policy, dict)
check(policy, 'uuid', 'scanner-uuid')
check(policy, 'settings', dict)
@pytest.mark.vcr()
def test_policy_export_id_typeerror(api):
with pytest.raises(TypeError):
api.policies.policy_export('nope')
@pytest.mark.vcr()
def test_policy_export_notfounderror(api):
with pytest.raises(NotFoundError):
api.policies.policy_export(1)
@pytest.mark.vcr()
def test_policy_export(api, policy):
pobj = api.policies.policy_export(policy['policy_id'])
assert isinstance(pobj, io.BytesIO)
@pytest.mark.vcr()
def test_policy_import(api, policy):
pobj = api.policies.policy_export(policy['policy_id'])
resp = api.policies.policy_import(pobj)
assert isinstance(resp, dict)
check(resp, 'creation_date', int)
check(resp, 'description', str, allow_none=True)
check(resp, 'id', int)
check(resp, 'last_modification_date', int)
check(resp, 'name', str)
check(resp, 'no_target', str)
check(resp, 'owner', str)
check(resp, 'owner_id', int)
check(resp, 'shared', int)
check(resp, 'template_uuid', 'scanner-uuid')
check(resp, 'user_permissions', int)
@pytest.mark.vcr()
def test_policy_list(api, policy):
policies = api.policies.list()
assert isinstance(policies, list)
for p in policies:
check(p, 'creation_date', int)
check(p, 'description', str, allow_none=True)
check(p, 'id', int)
check(p, 'last_modification_date', int)
check(p, 'name', str)
check(p, 'no_target', str)
check(p, 'owner', str)
check(p, 'owner_id', int)
check(p, 'shared', int)
check(p, 'template_uuid', 'scanner-uuid')
check(p, 'user_permissions', int)
check(p, 'visibility', str)
@pytest.mark.vcr()
def test_policy_template_details_success(api):
template_detail = api.policies.template_details('agent_advanced')
assert isinstance(template_detail, dict)
check(template_detail, 'compliance', dict)
check(template_detail, 'plugins', dict)
check(template_detail, 'settings', dict)
check(template_detail, 'uuid', 'scanner-uuid')
@pytest.mark.vcr()
def test_policy_template_details_keyerror(api):
with pytest.raises(KeyError):
api.policies.template_details('one')
@pytest.mark.vcr()
def test_policy_template_details_typeerror(api):
with pytest.raises(TypeError):
api.policies.template_details(1) | 0.435061 | 0.44903 |
"""Provides the web interface for adding and editing sheriff rotations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from dashboard import edit_config_handler
from dashboard.models import sheriff
from dashboard import sheriff_pb2
from google.protobuf import text_format
class EditSheriffsHandler(edit_config_handler.EditConfigHandler):
"""Handles editing of Sheriff entities.
The post method is inherited from EditConfigHandler. It takes the request
parameters documented there, as well as the following parameters, which
are properties of Sheriff:
url: A URL at which there is a list of email addresses to send mail to.
email: An email address to send mail to, possibly a mailing list.
internal-only: Whether the data should be considered internal-only.
summarize: Whether to send emails in a summary form.
"""
def __init__(self, request, response):
super(EditSheriffsHandler, self).__init__(
request, response, sheriff.Sheriff)
def get(self):
"""Renders the UI with the form."""
def SheriffData(sheriff_entity):
subscription = sheriff_pb2.Subscription()
subscription.name = sheriff_entity.key.string_id()
subscription.rotation_url = sheriff_entity.url or ''
subscription.notification_email = sheriff_entity.email or ''
if not sheriff_entity.internal_only:
subscription.visibility = sheriff_pb2.Subscription.PUBLIC
# Find the labels, and find the ones that say 'Component-' and turn those
# into components, formatting appropriately.
for label in sorted(sheriff_entity.labels):
if label.startswith('Component-'):
subscription.bug_components.append('>'.join(label.split('-')[1:]))
else:
subscription.bug_labels.append(label)
# Treat all patterns as globs for now.
for pattern in sorted(sheriff_entity.patterns):
p = subscription.patterns.add()
p.glob = pattern
return {
'url': sheriff_entity.url or '',
'email': sheriff_entity.email or '',
'patterns': '\n'.join(sorted(sheriff_entity.patterns)),
'labels': ','.join(sorted(sheriff_entity.labels)),
'internal_only': sheriff_entity.internal_only,
'summarize': sheriff_entity.summarize,
'subscription': text_format.MessageToString(subscription)
}
sheriff_dicts = {entity.key.string_id(): SheriffData(entity)
for entity in sheriff.Sheriff.query()}
self.RenderHtml('edit_sheriffs.html', {
'sheriffs_json': json.dumps(sheriff_dicts),
'sheriff_names': sorted(sheriff_dicts),
})
def post(self):
self.ReportError('Sheriff configs are no longer editable via this page. '
'See go/chromeperf-sheriff-redux', status=403)
def _UpdateFromRequestParameters(self, sheriff_entity):
"""Updates the given Sheriff based on query parameters.
Args:
sheriff_entity: A Sheriff entity.
"""
# This overrides the method in the superclass.
sheriff_entity.url = self.request.get('url') or None
sheriff_entity.email = self.request.get('email') or None
sheriff_entity.internal_only = self.request.get('internal-only') == 'true'
labels = self.request.get('labels')
if labels:
sheriff_entity.labels = labels.split(',')
else:
sheriff_entity.labels = []
sheriff_entity.summarize = self.request.get('summarize') == 'true' | dashboard/dashboard/edit_sheriffs.py |
"""Provides the web interface for adding and editing sheriff rotations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from dashboard import edit_config_handler
from dashboard.models import sheriff
from dashboard import sheriff_pb2
from google.protobuf import text_format
class EditSheriffsHandler(edit_config_handler.EditConfigHandler):
"""Handles editing of Sheriff entities.
The post method is inherited from EditConfigHandler. It takes the request
parameters documented there, as well as the following parameters, which
are properties of Sheriff:
url: A URL at which there is a list of email addresses to send mail to.
email: An email address to send mail to, possibly a mailing list.
internal-only: Whether the data should be considered internal-only.
summarize: Whether to send emails in a summary form.
"""
def __init__(self, request, response):
super(EditSheriffsHandler, self).__init__(
request, response, sheriff.Sheriff)
def get(self):
"""Renders the UI with the form."""
def SheriffData(sheriff_entity):
subscription = sheriff_pb2.Subscription()
subscription.name = sheriff_entity.key.string_id()
subscription.rotation_url = sheriff_entity.url or ''
subscription.notification_email = sheriff_entity.email or ''
if not sheriff_entity.internal_only:
subscription.visibility = sheriff_pb2.Subscription.PUBLIC
# Find the labels, and find the ones that say 'Component-' and turn those
# into components, formatting appropriately.
for label in sorted(sheriff_entity.labels):
if label.startswith('Component-'):
subscription.bug_components.append('>'.join(label.split('-')[1:]))
else:
subscription.bug_labels.append(label)
# Treat all patterns as globs for now.
for pattern in sorted(sheriff_entity.patterns):
p = subscription.patterns.add()
p.glob = pattern
return {
'url': sheriff_entity.url or '',
'email': sheriff_entity.email or '',
'patterns': '\n'.join(sorted(sheriff_entity.patterns)),
'labels': ','.join(sorted(sheriff_entity.labels)),
'internal_only': sheriff_entity.internal_only,
'summarize': sheriff_entity.summarize,
'subscription': text_format.MessageToString(subscription)
}
sheriff_dicts = {entity.key.string_id(): SheriffData(entity)
for entity in sheriff.Sheriff.query()}
self.RenderHtml('edit_sheriffs.html', {
'sheriffs_json': json.dumps(sheriff_dicts),
'sheriff_names': sorted(sheriff_dicts),
})
def post(self):
self.ReportError('Sheriff configs are no longer editable via this page. '
'See go/chromeperf-sheriff-redux', status=403)
def _UpdateFromRequestParameters(self, sheriff_entity):
"""Updates the given Sheriff based on query parameters.
Args:
sheriff_entity: A Sheriff entity.
"""
# This overrides the method in the superclass.
sheriff_entity.url = self.request.get('url') or None
sheriff_entity.email = self.request.get('email') or None
sheriff_entity.internal_only = self.request.get('internal-only') == 'true'
labels = self.request.get('labels')
if labels:
sheriff_entity.labels = labels.split(',')
else:
sheriff_entity.labels = []
sheriff_entity.summarize = self.request.get('summarize') == 'true' | 0.840062 | 0.111628 |
def test_import():
from heart_rate import ECG
first_set = ECG(filename='test_data1.csv')
assert first_set.time[0] == 0
assert first_set.time[-1] == 27.775
assert first_set.voltage[0] == -0.145
assert first_set.voltage[-1] == 0.72
second_set = ECG(filename='test_data27.csv')
assert second_set.time[0] == 0
assert second_set.time[-1] == 39.996
assert second_set.voltage[0] == -0.175
assert second_set.voltage[-1] == -1.7725
def test_attributes():
from heart_rate import ECG
first_set = ECG(filename='test_data8.csv')
assert first_set.voltage_extremes == (-3.105, 1.975)
assert first_set.duration == 27.775
second_set = ECG(filename='test_data18.csv')
assert second_set.voltage_extremes == (-0.19375, 0.7875)
assert second_set.duration == 13.887
def test_beat_finding():
import glob
import os
from heart_rate import ECG
csv_loc = os.path.join(os.path.dirname(__file__), '../test_data/*.csv')
num_beats_actual = [35, 19, 19, 32, 19, 19, 37, 74, 79, 29, 36, 44, 63, 35,
10, 34, 78, 19, 19, 33, 35, 37, 33, 32, 33, 28, 9, 4,
7, 7, 19, 19]
num_beats_found = []
for csv_file in glob.glob(csv_loc):
test = ECG(filename=os.path.basename(csv_file), export=True)
num_beats_found.append(len(test.beats))
tot_beats_actual = sum(num_beats_actual)
tot_beats_found = sum(num_beats_found)
assert abs((tot_beats_actual - tot_beats_found)/(tot_beats_actual)) < 0.01
# net one percent error in the finding of the heart beats was considered
# acceptable for the purposes of the assignment
def test_heart_rate():
from heart_rate import ECG
first_set = ECG(filename='test_data20.csv')
assert abs(int(first_set.mean_hr_bpm) - 81)/81 < 0.05
def test_export():
from heart_rate import ECG
import os
import json
import numpy as np
test = ECG()
json_loc = os.path.join(os.path.dirname(__file__),
'../JSON/test_data1.json')
with open(json_loc, 'r') as fp:
json_import = json.load(fp)
assert np.allclose(test.voltage, json_import['voltage']) | code/test_heart_rate.py | def test_import():
from heart_rate import ECG
first_set = ECG(filename='test_data1.csv')
assert first_set.time[0] == 0
assert first_set.time[-1] == 27.775
assert first_set.voltage[0] == -0.145
assert first_set.voltage[-1] == 0.72
second_set = ECG(filename='test_data27.csv')
assert second_set.time[0] == 0
assert second_set.time[-1] == 39.996
assert second_set.voltage[0] == -0.175
assert second_set.voltage[-1] == -1.7725
def test_attributes():
from heart_rate import ECG
first_set = ECG(filename='test_data8.csv')
assert first_set.voltage_extremes == (-3.105, 1.975)
assert first_set.duration == 27.775
second_set = ECG(filename='test_data18.csv')
assert second_set.voltage_extremes == (-0.19375, 0.7875)
assert second_set.duration == 13.887
def test_beat_finding():
import glob
import os
from heart_rate import ECG
csv_loc = os.path.join(os.path.dirname(__file__), '../test_data/*.csv')
num_beats_actual = [35, 19, 19, 32, 19, 19, 37, 74, 79, 29, 36, 44, 63, 35,
10, 34, 78, 19, 19, 33, 35, 37, 33, 32, 33, 28, 9, 4,
7, 7, 19, 19]
num_beats_found = []
for csv_file in glob.glob(csv_loc):
test = ECG(filename=os.path.basename(csv_file), export=True)
num_beats_found.append(len(test.beats))
tot_beats_actual = sum(num_beats_actual)
tot_beats_found = sum(num_beats_found)
assert abs((tot_beats_actual - tot_beats_found)/(tot_beats_actual)) < 0.01
# net one percent error in the finding of the heart beats was considered
# acceptable for the purposes of the assignment
def test_heart_rate():
from heart_rate import ECG
first_set = ECG(filename='test_data20.csv')
assert abs(int(first_set.mean_hr_bpm) - 81)/81 < 0.05
def test_export():
from heart_rate import ECG
import os
import json
import numpy as np
test = ECG()
json_loc = os.path.join(os.path.dirname(__file__),
'../JSON/test_data1.json')
with open(json_loc, 'r') as fp:
json_import = json.load(fp)
assert np.allclose(test.voltage, json_import['voltage']) | 0.441191 | 0.595022 |
import io
import sys
from textwrap import dedent
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
def import_htpasswd_file(filename, verbosity=1, overwrite=False):
with io.open(filename) as file:
for line in file:
if not ':' in line:
raise ValueError('Found a line without colon separator in the htpassword file %s:'
' "%s"' % (file.name, line))
username, password = line.strip().split(':', 1)
try:
user = User.objects.get(username=username)
if overwrite == True or not user.password:
if password.startswith('{SHA}'):
user.password = "<PASSWORD>" % password[len('{SHA}'):]
elif password.startswith('<PASSWORD>$'):
user.password = "<PASSWORD>" % password[len('$apr1$'):]
else: # Assume crypt
user.password = "<PASSWORD>" % password
user.save()
if verbosity > 0:
sys.stderr.write('.')
if verbosity > 1:
sys.stderr.write(' %s\n' % username)
except User.DoesNotExist:
if verbosity > 1:
sys.stderr.write('\nNo such user: %s\n' % username)
class Command(BaseCommand):
"""
Import passwords from one or more htpasswd files to Django's auth_user table.
This command only imports passwords; it does not import usernames, as that
would leave usernames without associated Person records in the database,
something which is undesirable.
By default the command won't overwrite existing password entries, but
given the --force switch, it will overwrite existing entries too. Without
the --force switch, the command is safe to run repeatedly.
"""
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('--force',
action='store_true', dest='overwrite', default=False,
help='Overwrite existing passwords in the auth_user table.')
args = '[path [path [...]]]'
def handle(self, *filenames, **options):
overwrite = options.get('overwrite', False)
verbosity = int(options.get('verbosity'))
for fn in filenames:
import_htpasswd_file(fn, verbosity=verbosity, overwrite=overwrite) | ietf/utils/management/commands/import_htpasswd.py | import io
import sys
from textwrap import dedent
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
def import_htpasswd_file(filename, verbosity=1, overwrite=False):
with io.open(filename) as file:
for line in file:
if not ':' in line:
raise ValueError('Found a line without colon separator in the htpassword file %s:'
' "%s"' % (file.name, line))
username, password = line.strip().split(':', 1)
try:
user = User.objects.get(username=username)
if overwrite == True or not user.password:
if password.startswith('{SHA}'):
user.password = "<PASSWORD>" % password[len('{SHA}'):]
elif password.startswith('<PASSWORD>$'):
user.password = "<PASSWORD>" % password[len('$apr1$'):]
else: # Assume crypt
user.password = "<PASSWORD>" % password
user.save()
if verbosity > 0:
sys.stderr.write('.')
if verbosity > 1:
sys.stderr.write(' %s\n' % username)
except User.DoesNotExist:
if verbosity > 1:
sys.stderr.write('\nNo such user: %s\n' % username)
class Command(BaseCommand):
"""
Import passwords from one or more htpasswd files to Django's auth_user table.
This command only imports passwords; it does not import usernames, as that
would leave usernames without associated Person records in the database,
something which is undesirable.
By default the command won't overwrite existing password entries, but
given the --force switch, it will overwrite existing entries too. Without
the --force switch, the command is safe to run repeatedly.
"""
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('--force',
action='store_true', dest='overwrite', default=False,
help='Overwrite existing passwords in the auth_user table.')
args = '[path [path [...]]]'
def handle(self, *filenames, **options):
overwrite = options.get('overwrite', False)
verbosity = int(options.get('verbosity'))
for fn in filenames:
import_htpasswd_file(fn, verbosity=verbosity, overwrite=overwrite) | 0.208018 | 0.060391 |
import FWCore.ParameterSet.Config as cms
trackingMaterialAnalyser = cms.EDAnalyzer("TrackingMaterialAnalyser",
MaterialAccounting = cms.InputTag("trackingMaterialProducer"),
SplitMode = cms.string("NearestLayer"),
SkipBeforeFirstDetector = cms.bool(False),
SkipAfterLastDetector = cms.bool(True),
SaveSummaryPlot = cms.bool(True),
SaveDetailedPlots = cms.bool(False),
SaveParameters = cms.bool(True),
SaveXML = cms.bool(True),
isHGCal = cms.bool(False),
Groups = cms.vstring(
"TrackerRecMaterialPixelBarrelLayer0_External",
"TrackerRecMaterialPixelBarrelLayer1_External",
"TrackerRecMaterialPixelBarrelLayer2_External",
"TrackerRecMaterialPixelBarrelLayer3_External",
"TrackerRecMaterialPixelBarrelLayer0",
"TrackerRecMaterialPixelBarrelLayer1",
"TrackerRecMaterialPixelBarrelLayer2",
"TrackerRecMaterialPixelBarrelLayer3",
"TrackerRecMaterialTIBLayer0_Z0",
"TrackerRecMaterialTIBLayer0_Z20",
"TrackerRecMaterialTIBLayer1_Z0",
"TrackerRecMaterialTIBLayer1_Z30",
"TrackerRecMaterialTIBLayer2_Z0",
"TrackerRecMaterialTIBLayer2_Z40",
"TrackerRecMaterialTIBLayer3_Z0",
"TrackerRecMaterialTIBLayer3_Z50",
"TrackerRecMaterialTOBLayer0_Z0",
"TrackerRecMaterialTOBLayer0_Z20",
"TrackerRecMaterialTOBLayer0_Z70",
"TrackerRecMaterialTOBLayer1_Z0",
"TrackerRecMaterialTOBLayer1_Z20",
"TrackerRecMaterialTOBLayer1_Z80",
"TrackerRecMaterialTOBLayer2_Z0",
"TrackerRecMaterialTOBLayer2_Z25",
"TrackerRecMaterialTOBLayer2_Z80",
"TrackerRecMaterialTOBLayer3_Z0",
"TrackerRecMaterialTOBLayer3_Z25",
"TrackerRecMaterialTOBLayer3_Z80",
"TrackerRecMaterialTOBLayer4_Z0",
"TrackerRecMaterialTOBLayer4_Z25",
"TrackerRecMaterialTOBLayer4_Z80",
"TrackerRecMaterialTOBLayer5_Z0",
"TrackerRecMaterialTOBLayer5_Z25",
"TrackerRecMaterialTOBLayer5_Z80",
"TrackerRecMaterialPixelEndcapDisk1Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk1Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk2Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk2Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk3Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk3Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk1Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk1Bw_Outer",
"TrackerRecMaterialPixelEndcapDisk2Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk2Bw_Outer",
"TrackerRecMaterialPixelEndcapDisk3Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk3Bw_Outer",
"TrackerRecMaterialTIDDisk1_R0",
"TrackerRecMaterialTIDDisk1_R30",
"TrackerRecMaterialTIDDisk2_R25",
"TrackerRecMaterialTIDDisk2_R30",
"TrackerRecMaterialTIDDisk2_R40",
"TrackerRecMaterialTIDDisk3_R24",
"TrackerRecMaterialTECDisk0_R20",
"TrackerRecMaterialTECDisk0_R40",
"TrackerRecMaterialTECDisk0_R50",
"TrackerRecMaterialTECDisk0_R60",
"TrackerRecMaterialTECDisk0_R90",
"TrackerRecMaterialTECDisk1_R20",
"TrackerRecMaterialTECDisk2_R20",
"TrackerRecMaterialTECDisk3",
"TrackerRecMaterialTECDisk4_R33",
"TrackerRecMaterialTECDisk5_R33",
"TrackerRecMaterialTECDisk6",
"TrackerRecMaterialTECDisk7_R40",
"TrackerRecMaterialTECDisk8",
)
) | SimTracker/TrackerMaterialAnalysis/python/trackingMaterialAnalyser_ForPhaseI_cfi.py | import FWCore.ParameterSet.Config as cms
trackingMaterialAnalyser = cms.EDAnalyzer("TrackingMaterialAnalyser",
MaterialAccounting = cms.InputTag("trackingMaterialProducer"),
SplitMode = cms.string("NearestLayer"),
SkipBeforeFirstDetector = cms.bool(False),
SkipAfterLastDetector = cms.bool(True),
SaveSummaryPlot = cms.bool(True),
SaveDetailedPlots = cms.bool(False),
SaveParameters = cms.bool(True),
SaveXML = cms.bool(True),
isHGCal = cms.bool(False),
Groups = cms.vstring(
"TrackerRecMaterialPixelBarrelLayer0_External",
"TrackerRecMaterialPixelBarrelLayer1_External",
"TrackerRecMaterialPixelBarrelLayer2_External",
"TrackerRecMaterialPixelBarrelLayer3_External",
"TrackerRecMaterialPixelBarrelLayer0",
"TrackerRecMaterialPixelBarrelLayer1",
"TrackerRecMaterialPixelBarrelLayer2",
"TrackerRecMaterialPixelBarrelLayer3",
"TrackerRecMaterialTIBLayer0_Z0",
"TrackerRecMaterialTIBLayer0_Z20",
"TrackerRecMaterialTIBLayer1_Z0",
"TrackerRecMaterialTIBLayer1_Z30",
"TrackerRecMaterialTIBLayer2_Z0",
"TrackerRecMaterialTIBLayer2_Z40",
"TrackerRecMaterialTIBLayer3_Z0",
"TrackerRecMaterialTIBLayer3_Z50",
"TrackerRecMaterialTOBLayer0_Z0",
"TrackerRecMaterialTOBLayer0_Z20",
"TrackerRecMaterialTOBLayer0_Z70",
"TrackerRecMaterialTOBLayer1_Z0",
"TrackerRecMaterialTOBLayer1_Z20",
"TrackerRecMaterialTOBLayer1_Z80",
"TrackerRecMaterialTOBLayer2_Z0",
"TrackerRecMaterialTOBLayer2_Z25",
"TrackerRecMaterialTOBLayer2_Z80",
"TrackerRecMaterialTOBLayer3_Z0",
"TrackerRecMaterialTOBLayer3_Z25",
"TrackerRecMaterialTOBLayer3_Z80",
"TrackerRecMaterialTOBLayer4_Z0",
"TrackerRecMaterialTOBLayer4_Z25",
"TrackerRecMaterialTOBLayer4_Z80",
"TrackerRecMaterialTOBLayer5_Z0",
"TrackerRecMaterialTOBLayer5_Z25",
"TrackerRecMaterialTOBLayer5_Z80",
"TrackerRecMaterialPixelEndcapDisk1Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk1Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk2Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk2Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk3Fw_Inner",
"TrackerRecMaterialPixelEndcapDisk3Fw_Outer",
"TrackerRecMaterialPixelEndcapDisk1Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk1Bw_Outer",
"TrackerRecMaterialPixelEndcapDisk2Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk2Bw_Outer",
"TrackerRecMaterialPixelEndcapDisk3Bw_Inner",
"TrackerRecMaterialPixelEndcapDisk3Bw_Outer",
"TrackerRecMaterialTIDDisk1_R0",
"TrackerRecMaterialTIDDisk1_R30",
"TrackerRecMaterialTIDDisk2_R25",
"TrackerRecMaterialTIDDisk2_R30",
"TrackerRecMaterialTIDDisk2_R40",
"TrackerRecMaterialTIDDisk3_R24",
"TrackerRecMaterialTECDisk0_R20",
"TrackerRecMaterialTECDisk0_R40",
"TrackerRecMaterialTECDisk0_R50",
"TrackerRecMaterialTECDisk0_R60",
"TrackerRecMaterialTECDisk0_R90",
"TrackerRecMaterialTECDisk1_R20",
"TrackerRecMaterialTECDisk2_R20",
"TrackerRecMaterialTECDisk3",
"TrackerRecMaterialTECDisk4_R33",
"TrackerRecMaterialTECDisk5_R33",
"TrackerRecMaterialTECDisk6",
"TrackerRecMaterialTECDisk7_R40",
"TrackerRecMaterialTECDisk8",
)
) | 0.556279 | 0.174621 |
"""OCR."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import struct
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
@registry.register_problem
class OcrTest(image_utils.Image2TextProblem):
"""OCR test problem."""
@property
def is_small(self):
return True
@property
def is_character_level(self):
return True
@property
def target_space_id(self):
return problem.SpaceID.EN_CHR
@property
def train_shards(self):
return 1
@property
def dev_shards(self):
return 1
def preprocess_example(self, example, mode, _):
# Resize from usual size ~1350x60 to 90x4 in this test.
img = example["inputs"]
img = tf.to_int64(
tf.image.resize_images(img, [90, 4], tf.image.ResizeMethod.AREA))
img = tf.image.per_image_standardization(img)
example["inputs"] = img
return example
def generator(self, data_dir, tmp_dir, is_training):
# In this test problem, we assume that the data is in tmp_dir/ocr/ in
# files names 0.png, 0.txt, 1.png, 1.txt and so on until num_examples.
num_examples = 2
ocr_dir = os.path.join(tmp_dir, "ocr/")
tf.logging.info("Looking for OCR data in %s." % ocr_dir)
for i in range(num_examples):
image_filepath = os.path.join(ocr_dir, "%d.png" % i)
text_filepath = os.path.join(ocr_dir, "%d.txt" % i)
with tf.gfile.Open(text_filepath, "rb") as f:
label = f.read()
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
# In PNG files width and height are stored in these bytes.
width, height = struct.unpack(">ii", encoded_image_data[16:24])
yield {
"image/encoded": [encoded_image_data],
"image/format": ["png"],
"image/class/label": label.strip(),
"image/height": [height],
"image/width": [width]
} | tensor2tensor/data_generators/ocr.py |
"""OCR."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import struct
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
@registry.register_problem
class OcrTest(image_utils.Image2TextProblem):
"""OCR test problem."""
@property
def is_small(self):
return True
@property
def is_character_level(self):
return True
@property
def target_space_id(self):
return problem.SpaceID.EN_CHR
@property
def train_shards(self):
return 1
@property
def dev_shards(self):
return 1
def preprocess_example(self, example, mode, _):
# Resize from usual size ~1350x60 to 90x4 in this test.
img = example["inputs"]
img = tf.to_int64(
tf.image.resize_images(img, [90, 4], tf.image.ResizeMethod.AREA))
img = tf.image.per_image_standardization(img)
example["inputs"] = img
return example
def generator(self, data_dir, tmp_dir, is_training):
# In this test problem, we assume that the data is in tmp_dir/ocr/ in
# files names 0.png, 0.txt, 1.png, 1.txt and so on until num_examples.
num_examples = 2
ocr_dir = os.path.join(tmp_dir, "ocr/")
tf.logging.info("Looking for OCR data in %s." % ocr_dir)
for i in range(num_examples):
image_filepath = os.path.join(ocr_dir, "%d.png" % i)
text_filepath = os.path.join(ocr_dir, "%d.txt" % i)
with tf.gfile.Open(text_filepath, "rb") as f:
label = f.read()
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
# In PNG files width and height are stored in these bytes.
width, height = struct.unpack(">ii", encoded_image_data[16:24])
yield {
"image/encoded": [encoded_image_data],
"image/format": ["png"],
"image/class/label": label.strip(),
"image/height": [height],
"image/width": [width]
} | 0.801354 | 0.234593 |
from Gridworld import Gridworld
from MonteCarlo import MonteCarlo
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
env = Gridworld(shape=[5,5], initialState=25)
print("------------------------------epsilon=0.01-------------------------------------")
MC_1 = MonteCarlo(grid_world = env, epsilon=0.01, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.1-------------------------------------")
MC_2 = MonteCarlo(grid_world = env, epsilon=0.1, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.25-------------------------------------")
MC_3 = MonteCarlo(grid_world = env, epsilon=0.25, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------RESULTS-------------------------------------")
print("------------------------------EPSILON = 0.01 --------------------------------")
policy_1 = MC_1.getPolicy()
q1 = MC_1.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q1[x][(state+1, policy_1[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_1 = pd.concat(frames)
print("Most common 3 policies: ", policy_1)
print("q values: \n", result_1)
print("------------------------------EPSILON = 0.1 --------------------------------")
policy_2 = MC_2.getPolicy()
q2 = MC_2.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q2[x][(state+1, policy_2[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_2 = pd.concat(frames)
print("Most common 3 policies: ", policy_2)
print("q values: \n", result_2)
print("------------------------------EPSILON = 0.25 --------------------------------")
policy_3 = MC_3.getPolicy()
q3 = MC_3.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q3[x][(state+1, policy_3[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_3 = pd.concat(frames)
print("Most common 3 policies: ", policy_3)
print("q values: \n", result_3)
print("---------------------------END OF RESULTS----------------------------------")
# convert q values dictionaries into dataframes and export to excel.
writer = pd.ExcelWriter('MC.xlsx')
result_1.to_excel(writer, '0.01')
result_2.to_excel(writer, '0.1')
result_3.to_excel(writer, '0.25')
writer.save() | MonteCarlo_and_SARSA/run_MonteCarlo.py | from Gridworld import Gridworld
from MonteCarlo import MonteCarlo
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
env = Gridworld(shape=[5,5], initialState=25)
print("------------------------------epsilon=0.01-------------------------------------")
MC_1 = MonteCarlo(grid_world = env, epsilon=0.01, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.1-------------------------------------")
MC_2 = MonteCarlo(grid_world = env, epsilon=0.1, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.25-------------------------------------")
MC_3 = MonteCarlo(grid_world = env, epsilon=0.25, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------RESULTS-------------------------------------")
print("------------------------------EPSILON = 0.01 --------------------------------")
policy_1 = MC_1.getPolicy()
q1 = MC_1.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q1[x][(state+1, policy_1[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_1 = pd.concat(frames)
print("Most common 3 policies: ", policy_1)
print("q values: \n", result_1)
print("------------------------------EPSILON = 0.1 --------------------------------")
policy_2 = MC_2.getPolicy()
q2 = MC_2.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q2[x][(state+1, policy_2[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_2 = pd.concat(frames)
print("Most common 3 policies: ", policy_2)
print("q values: \n", result_2)
print("------------------------------EPSILON = 0.25 --------------------------------")
policy_3 = MC_3.getPolicy()
q3 = MC_3.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q3[x][(state+1, policy_3[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_3 = pd.concat(frames)
print("Most common 3 policies: ", policy_3)
print("q values: \n", result_3)
print("---------------------------END OF RESULTS----------------------------------")
# convert q values dictionaries into dataframes and export to excel.
writer = pd.ExcelWriter('MC.xlsx')
result_1.to_excel(writer, '0.01')
result_2.to_excel(writer, '0.1')
result_3.to_excel(writer, '0.25')
writer.save() | 0.390825 | 0.518424 |
from typing import AsyncGenerator, Tuple
import anyio
from p2pclient.libp2p_stubs.crypto.pb import crypto_pb2 as crypto_pb
from p2pclient.libp2p_stubs.peer.id import ID
from .control import DaemonConnector
from .datastructures import PeerInfo
from .exceptions import ControlFailure
from .pb import p2pd_pb2 as p2pd_pb
from .utils import raise_if_failed, read_pbmsg_safe, write_pbmsg
class DHTClient:
daemon_connector: DaemonConnector
def __init__(self, daemon_connector: DaemonConnector) -> None:
self.daemon_connector = daemon_connector
@staticmethod
async def _read_dht_stream(
stream: anyio.abc.SocketStream,
) -> AsyncGenerator[p2pd_pb.DHTResponse, None]:
while True:
dht_resp = p2pd_pb.DHTResponse() # type: ignore
await read_pbmsg_safe(stream, dht_resp)
if dht_resp.type == dht_resp.END:
break
yield dht_resp
async def _do_dht(
self, dht_req: p2pd_pb.DHTRequest
) -> Tuple[p2pd_pb.DHTResponse, ...]:
stream = await self.daemon_connector.open_connection()
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
raise_if_failed(resp)
try:
dht_resp = resp.dht
except AttributeError as e:
raise ControlFailure(f"resp should contains dht: resp={resp}, e={e}")
if dht_resp.type == dht_resp.VALUE:
return (dht_resp,)
if dht_resp.type != dht_resp.BEGIN:
raise ControlFailure(f"Type should be BEGIN instead of {dht_resp.type}")
# BEGIN/END stream
resps = tuple([i async for i in self._read_dht_stream(stream)])
await stream.close()
return resps
async def find_peer(self, peer_id: ID) -> PeerInfo:
"""FIND_PEER
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PEER, peer=peer_id.to_bytes()
)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(
f"should only get one response from `find_peer`, resps={resps}"
)
dht_resp = resps[0]
try:
pinfo = dht_resp.peer
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: dht_resp={dht_resp}, e={e}"
)
return PeerInfo.from_pb(pinfo)
async def find_peers_connected_to_peer(self, peer_id: ID) -> Tuple[PeerInfo, ...]:
"""FIND_PEERS_CONNECTED_TO_PEER
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PEERS_CONNECTED_TO_PEER,
peer=peer_id.to_bytes(),
)
resps = await self._do_dht(dht_req)
try:
pinfos = tuple(PeerInfo.from_pb(dht_resp.peer) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: resps={resps}, e={e}"
)
return pinfos
async def find_providers(
self, content_id_bytes: bytes, count: int
) -> Tuple[PeerInfo, ...]:
"""FIND_PROVIDERS
"""
# TODO: should have another class ContendID
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PROVIDERS, cid=content_id_bytes, count=count
)
resps = await self._do_dht(dht_req)
try:
pinfos = tuple(PeerInfo.from_pb(dht_resp.peer) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: resps={resps}, e={e}"
)
return pinfos
async def get_closest_peers(self, key: bytes) -> Tuple[ID, ...]:
"""GET_CLOSEST_PEERS
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.GET_CLOSEST_PEERS, key=key)
resps = await self._do_dht(dht_req)
try:
peer_ids = tuple(ID(dht_resp.value) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return peer_ids
async def get_public_key(self, peer_id: ID) -> crypto_pb.PublicKey:
"""GET_PUBLIC_KEY
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.GET_PUBLIC_KEY, peer=peer_id.to_bytes()
)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(f"should only get one response, resps={resps}")
try:
public_key_pb_bytes = resps[0].value
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
public_key_pb = crypto_pb.PublicKey()
public_key_pb.ParseFromString(public_key_pb_bytes)
return public_key_pb
async def get_value(self, key: bytes) -> bytes:
"""GET_VALUE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.GET_VALUE, key=key)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(f"should only get one response, resps={resps}")
try:
value = resps[0].value
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return value
async def search_value(self, key: bytes) -> Tuple[bytes, ...]:
"""SEARCH_VALUE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.SEARCH_VALUE, key=key)
resps = await self._do_dht(dht_req)
try:
values = tuple(resp.value for resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return values
async def put_value(self, key: bytes, value: bytes) -> None:
"""PUT_VALUE
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.PUT_VALUE, key=key, value=value
)
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
stream = await self.daemon_connector.open_connection()
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
await stream.close()
raise_if_failed(resp)
async def provide(self, cid: bytes) -> None:
"""PROVIDE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.PROVIDE, cid=cid)
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
stream = await self.daemon_connector.open_connection()
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
await stream.close()
raise_if_failed(resp) | p2pclient/dht.py | from typing import AsyncGenerator, Tuple
import anyio
from p2pclient.libp2p_stubs.crypto.pb import crypto_pb2 as crypto_pb
from p2pclient.libp2p_stubs.peer.id import ID
from .control import DaemonConnector
from .datastructures import PeerInfo
from .exceptions import ControlFailure
from .pb import p2pd_pb2 as p2pd_pb
from .utils import raise_if_failed, read_pbmsg_safe, write_pbmsg
class DHTClient:
daemon_connector: DaemonConnector
def __init__(self, daemon_connector: DaemonConnector) -> None:
self.daemon_connector = daemon_connector
@staticmethod
async def _read_dht_stream(
stream: anyio.abc.SocketStream,
) -> AsyncGenerator[p2pd_pb.DHTResponse, None]:
while True:
dht_resp = p2pd_pb.DHTResponse() # type: ignore
await read_pbmsg_safe(stream, dht_resp)
if dht_resp.type == dht_resp.END:
break
yield dht_resp
async def _do_dht(
self, dht_req: p2pd_pb.DHTRequest
) -> Tuple[p2pd_pb.DHTResponse, ...]:
stream = await self.daemon_connector.open_connection()
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
raise_if_failed(resp)
try:
dht_resp = resp.dht
except AttributeError as e:
raise ControlFailure(f"resp should contains dht: resp={resp}, e={e}")
if dht_resp.type == dht_resp.VALUE:
return (dht_resp,)
if dht_resp.type != dht_resp.BEGIN:
raise ControlFailure(f"Type should be BEGIN instead of {dht_resp.type}")
# BEGIN/END stream
resps = tuple([i async for i in self._read_dht_stream(stream)])
await stream.close()
return resps
async def find_peer(self, peer_id: ID) -> PeerInfo:
"""FIND_PEER
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PEER, peer=peer_id.to_bytes()
)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(
f"should only get one response from `find_peer`, resps={resps}"
)
dht_resp = resps[0]
try:
pinfo = dht_resp.peer
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: dht_resp={dht_resp}, e={e}"
)
return PeerInfo.from_pb(pinfo)
async def find_peers_connected_to_peer(self, peer_id: ID) -> Tuple[PeerInfo, ...]:
"""FIND_PEERS_CONNECTED_TO_PEER
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PEERS_CONNECTED_TO_PEER,
peer=peer_id.to_bytes(),
)
resps = await self._do_dht(dht_req)
try:
pinfos = tuple(PeerInfo.from_pb(dht_resp.peer) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: resps={resps}, e={e}"
)
return pinfos
async def find_providers(
self, content_id_bytes: bytes, count: int
) -> Tuple[PeerInfo, ...]:
"""FIND_PROVIDERS
"""
# TODO: should have another class ContendID
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.FIND_PROVIDERS, cid=content_id_bytes, count=count
)
resps = await self._do_dht(dht_req)
try:
pinfos = tuple(PeerInfo.from_pb(dht_resp.peer) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains peer info: resps={resps}, e={e}"
)
return pinfos
async def get_closest_peers(self, key: bytes) -> Tuple[ID, ...]:
"""GET_CLOSEST_PEERS
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.GET_CLOSEST_PEERS, key=key)
resps = await self._do_dht(dht_req)
try:
peer_ids = tuple(ID(dht_resp.value) for dht_resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return peer_ids
async def get_public_key(self, peer_id: ID) -> crypto_pb.PublicKey:
"""GET_PUBLIC_KEY
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.GET_PUBLIC_KEY, peer=peer_id.to_bytes()
)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(f"should only get one response, resps={resps}")
try:
public_key_pb_bytes = resps[0].value
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
public_key_pb = crypto_pb.PublicKey()
public_key_pb.ParseFromString(public_key_pb_bytes)
return public_key_pb
async def get_value(self, key: bytes) -> bytes:
"""GET_VALUE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.GET_VALUE, key=key)
resps = await self._do_dht(dht_req)
if len(resps) != 1:
raise ControlFailure(f"should only get one response, resps={resps}")
try:
value = resps[0].value
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return value
async def search_value(self, key: bytes) -> Tuple[bytes, ...]:
"""SEARCH_VALUE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.SEARCH_VALUE, key=key)
resps = await self._do_dht(dht_req)
try:
values = tuple(resp.value for resp in resps)
except AttributeError as e:
raise ControlFailure(
f"dht_resp should contains `value`: resps={resps}, e={e}"
)
return values
async def put_value(self, key: bytes, value: bytes) -> None:
"""PUT_VALUE
"""
dht_req = p2pd_pb.DHTRequest(
type=p2pd_pb.DHTRequest.PUT_VALUE, key=key, value=value
)
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
stream = await self.daemon_connector.open_connection()
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
await stream.close()
raise_if_failed(resp)
async def provide(self, cid: bytes) -> None:
"""PROVIDE
"""
dht_req = p2pd_pb.DHTRequest(type=p2pd_pb.DHTRequest.PROVIDE, cid=cid)
req = p2pd_pb.Request(type=p2pd_pb.Request.DHT, dht=dht_req)
stream = await self.daemon_connector.open_connection()
await write_pbmsg(stream, req)
resp = p2pd_pb.Response() # type: ignore
await read_pbmsg_safe(stream, resp)
await stream.close()
raise_if_failed(resp) | 0.52902 | 0.122655 |
SIMPLE = '''{% for item in seq %}{{ item }}{% endfor %}'''
ELSE = '''{% for item in seq %}XXX{% else %}...{% endfor %}'''
EMPTYBLOCKS = '''<{% for item in seq %}{% else %}{% endfor %}>'''
CONTEXTVARS = '''{% for item in seq %}\
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.even }}|{{ loop.odd }}|{{ loop.length }}###{% endfor %}'''
CYCLING = '''{% for item in seq %}{% cycle '<1>', '<2>' %}{% endfor %}\
{% for item in seq %}{% cycle through %}{% endfor %}'''
SCOPE = '''{% for item in seq %}{% endfor %}{{ item }}'''
VARLEN = '''{% for item in iter %}{{ item }}{% endfor %}'''
NONITER = '''{% for item in none %}...{% endfor %}'''
def test_simple(env):
tmpl = env.from_string(SIMPLE)
assert tmpl.render(seq=range(10)) == '0123456789'
def test_else(env):
tmpl = env.from_string(ELSE)
assert tmpl.render() == '...'
def test_empty_blocks(env):
tmpl = env.from_string(EMPTYBLOCKS)
assert tmpl.render() == '<>'
def test_context_vars(env):
tmpl = env.from_string(CONTEXTVARS)
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_even, one_odd, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_even, two_odd, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_even == 'False' and two_even == 'True'
assert one_odd == 'True' and two_odd == 'False'
assert one_length == two_length == '2'
def test_cycling(env):
tmpl = env.from_string(CYCLING)
output = tmpl.render(seq=range(4), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(env):
tmpl = env.from_string(SCOPE)
output = tmpl.render(seq=range(10))
assert not output
def test_varlen(env):
def inner():
for item in range(5):
yield item
tmpl = env.from_string(VARLEN)
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(env):
tmpl = env.from_string(NONITER)
assert not tmpl.render() | tests/test_forloop.py | SIMPLE = '''{% for item in seq %}{{ item }}{% endfor %}'''
ELSE = '''{% for item in seq %}XXX{% else %}...{% endfor %}'''
EMPTYBLOCKS = '''<{% for item in seq %}{% else %}{% endfor %}>'''
CONTEXTVARS = '''{% for item in seq %}\
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.even }}|{{ loop.odd }}|{{ loop.length }}###{% endfor %}'''
CYCLING = '''{% for item in seq %}{% cycle '<1>', '<2>' %}{% endfor %}\
{% for item in seq %}{% cycle through %}{% endfor %}'''
SCOPE = '''{% for item in seq %}{% endfor %}{{ item }}'''
VARLEN = '''{% for item in iter %}{{ item }}{% endfor %}'''
NONITER = '''{% for item in none %}...{% endfor %}'''
def test_simple(env):
tmpl = env.from_string(SIMPLE)
assert tmpl.render(seq=range(10)) == '0123456789'
def test_else(env):
tmpl = env.from_string(ELSE)
assert tmpl.render() == '...'
def test_empty_blocks(env):
tmpl = env.from_string(EMPTYBLOCKS)
assert tmpl.render() == '<>'
def test_context_vars(env):
tmpl = env.from_string(CONTEXTVARS)
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_even, one_odd, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_even, two_odd, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_even == 'False' and two_even == 'True'
assert one_odd == 'True' and two_odd == 'False'
assert one_length == two_length == '2'
def test_cycling(env):
tmpl = env.from_string(CYCLING)
output = tmpl.render(seq=range(4), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(env):
tmpl = env.from_string(SCOPE)
output = tmpl.render(seq=range(10))
assert not output
def test_varlen(env):
def inner():
for item in range(5):
yield item
tmpl = env.from_string(VARLEN)
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(env):
tmpl = env.from_string(NONITER)
assert not tmpl.render() | 0.386648 | 0.528229 |
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import seaborn as sns
import PIL
from typing import List
# EfficientNet
from tensorflow.keras.applications import EfficientNetB7, ResNet50
from tensorflow.keras.applications.efficientnet import preprocess_input
# Data Augmentation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Model Layers
from tensorflow.keras import Model, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D , Flatten, Dropout, BatchNormalization
# Compiling and Callbacks
from tensorflow.keras.optimizers import SGD,Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
#-----------------------------------------------------------------------------------------------------
# Competition Directory
comp_dir="/kaggle/input/ranzcr-clip-catheter-line-classification/"
# Get Training Data Labels
df_train=pd.read_csv(comp_dir+"train.csv").sample(frac=1).reset_index(drop=True)
# Get Training/Testing Data Paths
test_files = os.listdir(comp_dir+"test")
df_test = pd.DataFrame({"StudyInstanceUID": test_files})
image_size = 512
batch_size = 16
num_epochs = 12
learn_rate = 1e-03
df_train.StudyInstanceUID += ".jpg"
#-----------------------------------------------------------------------------------------------------
label_cols=df_train.columns.tolist()
label_cols.remove("StudyInstanceUID")
label_cols.remove("PatientID")
datagen=ImageDataGenerator(rescale=1./255.)
test_datagen=ImageDataGenerator(rescale=1./255.)
train_generator=datagen.flow_from_dataframe(
dataframe=df_train[:21000],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
valid_generator=test_datagen.flow_from_dataframe(
dataframe=df_train[21000:],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
test_generator=test_datagen.flow_from_dataframe(
dataframe=df_test,
directory=comp_dir+"test",
x_col="StudyInstanceUID",
batch_size=1,
seed=42,
shuffle=False,
color_mode="rgb",
class_mode=None,
target_size=(image_size,image_size),
interpolation="bilinear")
#-----------------------------------------------------------------------------------------------------
base_model = ResNet50(include_top=False,
weights=None,
input_shape=(image_size, image_size, 3))
base_model.load_weights("../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", by_name=True)
base_model.trainable = False
#-----------------------------------------------------------------------------------------------------
inp = Input(shape = (image_size,image_size,3))
x = base_model(inp)
x = Flatten()(x)
output1 = Dense(1, activation = 'sigmoid')(x)
output2 = Dense(1, activation = 'sigmoid')(x)
output3 = Dense(1, activation = 'sigmoid')(x)
output4 = Dense(1, activation = 'sigmoid')(x)
output5 = Dense(1, activation = 'sigmoid')(x)
output6 = Dense(1, activation = 'sigmoid')(x)
output7 = Dense(1, activation = 'sigmoid')(x)
output8 = Dense(1, activation = 'sigmoid')(x)
output9 = Dense(1, activation = 'sigmoid')(x)
output10 = Dense(1, activation = 'sigmoid')(x)
output11 = Dense(1, activation = 'sigmoid')(x)
model = Model(inp,[output1,output2,output3,output4,output5,output6,output7,output8,output9,output10,output11])
sgd = SGD(lr=learn_rate, momentum=.9, nesterov=False)
model.compile(optimizer=sgd,
loss = ["binary_crossentropy" for i in range(11)],
metrics = ["accuracy"])
def generator_wrapper(generator):
for batch_x,batch_y in generator:
yield (batch_x,[batch_y[:,i] for i in range(11)])
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
history = model.fit_generator(generator=generator_wrapper(train_generator),
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=generator_wrapper(valid_generator),
validation_steps=STEP_SIZE_VALID,
epochs=num_epochs,verbose=2)
test_generator.reset()
pred = model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
# Create Submission df
df_submission = pd.DataFrame(np.squeeze(pred)).transpose()
df_submission.rename(columns=dict(zip([str(i) for i in range(12)], label_cols)))
df_submission["StudyInstanceUID"] = test_files
df_submission.to_csv("submission.csv", index=False)
epochs = range(1,num_epochs)
plt.plot(history.history['loss'], label='Training Set')
plt.plot(history.history['val_loss'], label='Validation Data)')
plt.title('Training and Validation loss')
plt.ylabel('MAE')
plt.xlabel('Num Epochs')
plt.legend(loc="upper left")
plt.show()
plt.savefig("loss.png") | big_model.py | import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import seaborn as sns
import PIL
from typing import List
# EfficientNet
from tensorflow.keras.applications import EfficientNetB7, ResNet50
from tensorflow.keras.applications.efficientnet import preprocess_input
# Data Augmentation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Model Layers
from tensorflow.keras import Model, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D , Flatten, Dropout, BatchNormalization
# Compiling and Callbacks
from tensorflow.keras.optimizers import SGD,Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
#-----------------------------------------------------------------------------------------------------
# Competition Directory
comp_dir="/kaggle/input/ranzcr-clip-catheter-line-classification/"
# Get Training Data Labels
df_train=pd.read_csv(comp_dir+"train.csv").sample(frac=1).reset_index(drop=True)
# Get Training/Testing Data Paths
test_files = os.listdir(comp_dir+"test")
df_test = pd.DataFrame({"StudyInstanceUID": test_files})
image_size = 512
batch_size = 16
num_epochs = 12
learn_rate = 1e-03
df_train.StudyInstanceUID += ".jpg"
#-----------------------------------------------------------------------------------------------------
label_cols=df_train.columns.tolist()
label_cols.remove("StudyInstanceUID")
label_cols.remove("PatientID")
datagen=ImageDataGenerator(rescale=1./255.)
test_datagen=ImageDataGenerator(rescale=1./255.)
train_generator=datagen.flow_from_dataframe(
dataframe=df_train[:21000],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
valid_generator=test_datagen.flow_from_dataframe(
dataframe=df_train[21000:],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
test_generator=test_datagen.flow_from_dataframe(
dataframe=df_test,
directory=comp_dir+"test",
x_col="StudyInstanceUID",
batch_size=1,
seed=42,
shuffle=False,
color_mode="rgb",
class_mode=None,
target_size=(image_size,image_size),
interpolation="bilinear")
#-----------------------------------------------------------------------------------------------------
base_model = ResNet50(include_top=False,
weights=None,
input_shape=(image_size, image_size, 3))
base_model.load_weights("../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", by_name=True)
base_model.trainable = False
#-----------------------------------------------------------------------------------------------------
inp = Input(shape = (image_size,image_size,3))
x = base_model(inp)
x = Flatten()(x)
output1 = Dense(1, activation = 'sigmoid')(x)
output2 = Dense(1, activation = 'sigmoid')(x)
output3 = Dense(1, activation = 'sigmoid')(x)
output4 = Dense(1, activation = 'sigmoid')(x)
output5 = Dense(1, activation = 'sigmoid')(x)
output6 = Dense(1, activation = 'sigmoid')(x)
output7 = Dense(1, activation = 'sigmoid')(x)
output8 = Dense(1, activation = 'sigmoid')(x)
output9 = Dense(1, activation = 'sigmoid')(x)
output10 = Dense(1, activation = 'sigmoid')(x)
output11 = Dense(1, activation = 'sigmoid')(x)
model = Model(inp,[output1,output2,output3,output4,output5,output6,output7,output8,output9,output10,output11])
sgd = SGD(lr=learn_rate, momentum=.9, nesterov=False)
model.compile(optimizer=sgd,
loss = ["binary_crossentropy" for i in range(11)],
metrics = ["accuracy"])
def generator_wrapper(generator):
for batch_x,batch_y in generator:
yield (batch_x,[batch_y[:,i] for i in range(11)])
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
history = model.fit_generator(generator=generator_wrapper(train_generator),
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=generator_wrapper(valid_generator),
validation_steps=STEP_SIZE_VALID,
epochs=num_epochs,verbose=2)
test_generator.reset()
pred = model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
# Create Submission df
df_submission = pd.DataFrame(np.squeeze(pred)).transpose()
df_submission.rename(columns=dict(zip([str(i) for i in range(12)], label_cols)))
df_submission["StudyInstanceUID"] = test_files
df_submission.to_csv("submission.csv", index=False)
epochs = range(1,num_epochs)
plt.plot(history.history['loss'], label='Training Set')
plt.plot(history.history['val_loss'], label='Validation Data)')
plt.title('Training and Validation loss')
plt.ylabel('MAE')
plt.xlabel('Num Epochs')
plt.legend(loc="upper left")
plt.show()
plt.savefig("loss.png") | 0.672117 | 0.378402 |
import sys
import os
import json
import math
def init_static(jsonfile, output_dir = ""):
with open(jsonfile, "r") as f:
plan = json.loads(f.read())
output = {}
tracks = []
static = []
for item in plan:
bbox = item['bbox']
x_offset = -(bbox[2]-bbox[0])/2 - bbox[0] #计算y轴位移
y_offset = -(bbox[3]-bbox[1])/2 - bbox[1] #计算x轴位移
model_detailes = {}
type_id = item['type']
points = item['points']
x0, y0 = points[0][0], points[0][1]
x1, y1 = points[1][0], points[1][1]
model_detailes['model'] = item['model']
model_detailes['position'] = [(x0 + x1)/2 + x_offset, (y0 + y1)/2 + y_offset, 350]
model_detailes['name'] = item['name']
model_detailes['scale'] = [1000, 1000, 1000]
direction = {
"upwards":[math.pi/2, math.pi, 0],
"downwards":[math.pi/2, 0, 0],
"leftwards":[math.pi/2, -math.pi/2, 0],
"rightwards":[math.pi/2, math.pi/2, 0]
}
model_detailes['direction'] = direction[item['direction']]
if type_id == 17: #forklift
pass
elif type_id ==18: #agv
pass
elif type_id == 19: #manup_truck
pass
elif type_id == 20: #operating_platform
if item['direction'] == 'leftwards' or item['direction'] == 'rightwards':
model_detailes['scale'] = [(x1 - x0)/1.1 , 1000, (y1 - y0)/0.7 ]
else:
model_detailes['scale'] = [(x1 - x0)/0.7 , 1000, (y1 - y0)/1.1 ]
pass
elif type_id == 21: #warehouse_operator
model_detailes['scale'] = [24.5, 24.5, 24.5]
pass
elif type_id == 22: #convey_line
if item['direction'] == 'leftwards' or item['direction'] == 'rightwards':
ratio = (x1-x0)/(y1-y0)
model_detailes['scale'] = [(y1 - y0), 1000, (x1 - x0)/3]
else:
ratio = (y1-y0)/(x1-x0)
model_detailes['scale'] = [(x1 - x0), 1000, (y1 - y0)/3]
#model_detailes['direction'][1] -= math.pi/2
print(item['direction'] , model_detailes['scale'] )
static.append(model_detailes)
output['tracks'] = tracks
output['static'] = static
json_path = os.path.join(output_dir, "static_fixtures.json")
with open(json_path, 'w') as f:
f.write(json.dumps(output))
pass
if __name__ == '__main__':
#path = os.path.abspath(os.path.dirname(__file__))
#default_session_path = os.path.abspath(os.path.join(path, "session"))
jsonfile = sys.argv[1]
output_dir = sys.argv[2]
init_static(jsonfile, output_dir) | tools/generate/static_fixtures.py | import sys
import os
import json
import math
def init_static(jsonfile, output_dir = ""):
with open(jsonfile, "r") as f:
plan = json.loads(f.read())
output = {}
tracks = []
static = []
for item in plan:
bbox = item['bbox']
x_offset = -(bbox[2]-bbox[0])/2 - bbox[0] #计算y轴位移
y_offset = -(bbox[3]-bbox[1])/2 - bbox[1] #计算x轴位移
model_detailes = {}
type_id = item['type']
points = item['points']
x0, y0 = points[0][0], points[0][1]
x1, y1 = points[1][0], points[1][1]
model_detailes['model'] = item['model']
model_detailes['position'] = [(x0 + x1)/2 + x_offset, (y0 + y1)/2 + y_offset, 350]
model_detailes['name'] = item['name']
model_detailes['scale'] = [1000, 1000, 1000]
direction = {
"upwards":[math.pi/2, math.pi, 0],
"downwards":[math.pi/2, 0, 0],
"leftwards":[math.pi/2, -math.pi/2, 0],
"rightwards":[math.pi/2, math.pi/2, 0]
}
model_detailes['direction'] = direction[item['direction']]
if type_id == 17: #forklift
pass
elif type_id ==18: #agv
pass
elif type_id == 19: #manup_truck
pass
elif type_id == 20: #operating_platform
if item['direction'] == 'leftwards' or item['direction'] == 'rightwards':
model_detailes['scale'] = [(x1 - x0)/1.1 , 1000, (y1 - y0)/0.7 ]
else:
model_detailes['scale'] = [(x1 - x0)/0.7 , 1000, (y1 - y0)/1.1 ]
pass
elif type_id == 21: #warehouse_operator
model_detailes['scale'] = [24.5, 24.5, 24.5]
pass
elif type_id == 22: #convey_line
if item['direction'] == 'leftwards' or item['direction'] == 'rightwards':
ratio = (x1-x0)/(y1-y0)
model_detailes['scale'] = [(y1 - y0), 1000, (x1 - x0)/3]
else:
ratio = (y1-y0)/(x1-x0)
model_detailes['scale'] = [(x1 - x0), 1000, (y1 - y0)/3]
#model_detailes['direction'][1] -= math.pi/2
print(item['direction'] , model_detailes['scale'] )
static.append(model_detailes)
output['tracks'] = tracks
output['static'] = static
json_path = os.path.join(output_dir, "static_fixtures.json")
with open(json_path, 'w') as f:
f.write(json.dumps(output))
pass
if __name__ == '__main__':
#path = os.path.abspath(os.path.dirname(__file__))
#default_session_path = os.path.abspath(os.path.join(path, "session"))
jsonfile = sys.argv[1]
output_dir = sys.argv[2]
init_static(jsonfile, output_dir) | 0.117547 | 0.197715 |
from __future__ import unicode_literals
from django import forms
from django.forms.models import model_to_dict
from django.shortcuts import render
from django.http import HttpResponse, HttpRequest, JsonResponse
from django.views.generic import TemplateView
from django.core import serializers
from django.utils import timezone
from .models import *
from .WeatherReportSimulator import Simulator
from .WeatherReportSimulator.Multithreading import SimulationThreadManager
from .db_interface import *
class SimulationForm(forms.Form):
flight_time = forms.FloatField(label='flight_time', initial=10)
report_time = forms.FloatField(label='report_time', initial=20)
update_time = forms.FloatField(label='update_time', initial=1)
time_per_update = forms.FloatField(label='time_per_update', initial=100)
class SimulationView(TemplateView):
sim_state = 'stopped'
cur_flight_time = 10
cur_report_time = 20
cur_update_time = 1
cur_time_per_update = 100
model_aircrafts = {}
simulation_thread = None
def get(self, request: HttpRequest) -> HttpResponse:
form = SimulationForm()
return render(request, 'simulation.html',
{
'sim_state': SimulationView.sim_state,
'cur_flight_time': SimulationView.cur_flight_time,
'cur_report_time': SimulationView.cur_report_time,
'cur_update_time': SimulationView.cur_update_time,
'cur_time_per_update': SimulationView.cur_time_per_update
})
def post(self, request: HttpRequest) -> HttpResponse:
form = SimulationForm(request.POST)
if 'start_stop' in request.POST:
if SimulationView.sim_state in ['running', 'paused']:
SimulationView.simulation_thread.unpause()
if SimulationView.simulation_thread is not None:
SimulationView.simulation_thread.stop()
SimulationView.simulation_thread = None
SimulationView.sim_state = 'stopped'
elif SimulationView.sim_state == 'stopped':
if form.is_valid():
flight_time = form.cleaned_data['flight_time']
SimulationView.cur_flight_time = flight_time
report_time = form.cleaned_data['report_time']
SimulationView.cur_report_time = report_time
update_time = form.cleaned_data['update_time']
SimulationView.cur_update_time = update_time
time_per_update = form.cleaned_data['time_per_update']
SimulationView.cur_time_per_update = time_per_update
t = SimulationThreadManager(
flight_time, report_time, update_time, time_per_update, 1)
t.start()
SimulationView.simulation_thread = t
SimulationView.sim_state = 'running'
elif 'pause' in request.POST:
if SimulationView.sim_state == 'running':
SimulationView.sim_state = 'paused'
SimulationView.simulation_thread.pause()
elif SimulationView.sim_state == 'paused':
SimulationView.simulation_thread.unpause()
SimulationView.sim_state = 'running'
return render(request, 'simulation.html',
{
'sim_state': SimulationView.sim_state,
'cur_flight_time': SimulationView.cur_flight_time,
'cur_report_time': SimulationView.cur_report_time,
'cur_update_time': SimulationView.cur_update_time,
'cur_time_per_update': SimulationView.cur_time_per_update
})
def display(request: HttpRequest) -> HttpResponse:
max_entries = safe_cast(request.GET.get('max', -1), int, -1)
start_index = safe_cast(request.GET.get('start', 0), int, 0)
table_name = request.GET.get('table', '')
if table_name == 'aircraft':
entries = Aircraft.objects.all()
db_attrs = ['aircraft_type', 'weight']
elif table_name == 'airports':
entries = Airport.objects.all()
db_attrs = ['airport_code', 'latitude', 'longitude', 'altitude']
elif table_name == 'flights':
entries = Flight.objects.all()
db_attrs = ['identifier', 'active', 'start_time',
'latitude', 'longitude', 'bearing', 'altitude']
elif table_name == 'reports':
entries = WeatherReport.objects.all()
db_attrs = ['time', 'latitude', 'longitude', 'altitude',
'wind_x', 'wind_y', 'tke']
else:
return HttpResponse('Invalid table name {}'.format(table_name))
if max_entries < 0:
entries = [[getattr(e, attr) for attr in db_attrs]
for e in entries[start_index:]]
else:
entries = [[getattr(e, attr) for attr in db_attrs]
for e in entries[start_index:start_index + max_entries]]
return render(request, 'display_db.html',
{'entries': entries,
'db_attrs': db_attrs,
'table': table_name,
'max_entries': max_entries,
'start_index': start_index,
'end_index': start_index + len(entries) - 1})
def query(request: HttpRequest) -> HttpResponse:
max_entries = safe_cast(request.GET.get('max', -1), int, -1)
start_index = safe_cast(request.GET.get('start', 0), int, 0)
id = safe_cast(request.GET.get('id', -1), int, -1)
table_name = request.GET.get('table', '')
if table_name == 'airplanes':
entries = Aircraft.objects
elif table_name == 'airports':
entries = Airport.objects
elif table_name == 'reports':
entries = WeatherReport.objects
elif table_name == 'flights':
entries = Flight.objects.filter(active=True)
else:
return JsonResponse({"entries": []})
if id >= 0:
entries = entries.filter(id=id)
entries = entries.all()
if max_entries < 0:
return JsonResponse({"entries": [model_to_dict(r) for r in entries[start_index:]]})
else:
return JsonResponse({"entries": [model_to_dict(r) for r in entries[start_index:start_index + max_entries]]})
def index(request: HttpRequest) -> HttpResponse:
return render(request, 'index.html', {})
def safe_cast(val, typ, default):
try:
return typ(val)
except ValueError:
return default | server/turb/views.py | from __future__ import unicode_literals
from django import forms
from django.forms.models import model_to_dict
from django.shortcuts import render
from django.http import HttpResponse, HttpRequest, JsonResponse
from django.views.generic import TemplateView
from django.core import serializers
from django.utils import timezone
from .models import *
from .WeatherReportSimulator import Simulator
from .WeatherReportSimulator.Multithreading import SimulationThreadManager
from .db_interface import *
class SimulationForm(forms.Form):
flight_time = forms.FloatField(label='flight_time', initial=10)
report_time = forms.FloatField(label='report_time', initial=20)
update_time = forms.FloatField(label='update_time', initial=1)
time_per_update = forms.FloatField(label='time_per_update', initial=100)
class SimulationView(TemplateView):
sim_state = 'stopped'
cur_flight_time = 10
cur_report_time = 20
cur_update_time = 1
cur_time_per_update = 100
model_aircrafts = {}
simulation_thread = None
def get(self, request: HttpRequest) -> HttpResponse:
form = SimulationForm()
return render(request, 'simulation.html',
{
'sim_state': SimulationView.sim_state,
'cur_flight_time': SimulationView.cur_flight_time,
'cur_report_time': SimulationView.cur_report_time,
'cur_update_time': SimulationView.cur_update_time,
'cur_time_per_update': SimulationView.cur_time_per_update
})
def post(self, request: HttpRequest) -> HttpResponse:
form = SimulationForm(request.POST)
if 'start_stop' in request.POST:
if SimulationView.sim_state in ['running', 'paused']:
SimulationView.simulation_thread.unpause()
if SimulationView.simulation_thread is not None:
SimulationView.simulation_thread.stop()
SimulationView.simulation_thread = None
SimulationView.sim_state = 'stopped'
elif SimulationView.sim_state == 'stopped':
if form.is_valid():
flight_time = form.cleaned_data['flight_time']
SimulationView.cur_flight_time = flight_time
report_time = form.cleaned_data['report_time']
SimulationView.cur_report_time = report_time
update_time = form.cleaned_data['update_time']
SimulationView.cur_update_time = update_time
time_per_update = form.cleaned_data['time_per_update']
SimulationView.cur_time_per_update = time_per_update
t = SimulationThreadManager(
flight_time, report_time, update_time, time_per_update, 1)
t.start()
SimulationView.simulation_thread = t
SimulationView.sim_state = 'running'
elif 'pause' in request.POST:
if SimulationView.sim_state == 'running':
SimulationView.sim_state = 'paused'
SimulationView.simulation_thread.pause()
elif SimulationView.sim_state == 'paused':
SimulationView.simulation_thread.unpause()
SimulationView.sim_state = 'running'
return render(request, 'simulation.html',
{
'sim_state': SimulationView.sim_state,
'cur_flight_time': SimulationView.cur_flight_time,
'cur_report_time': SimulationView.cur_report_time,
'cur_update_time': SimulationView.cur_update_time,
'cur_time_per_update': SimulationView.cur_time_per_update
})
def display(request: HttpRequest) -> HttpResponse:
max_entries = safe_cast(request.GET.get('max', -1), int, -1)
start_index = safe_cast(request.GET.get('start', 0), int, 0)
table_name = request.GET.get('table', '')
if table_name == 'aircraft':
entries = Aircraft.objects.all()
db_attrs = ['aircraft_type', 'weight']
elif table_name == 'airports':
entries = Airport.objects.all()
db_attrs = ['airport_code', 'latitude', 'longitude', 'altitude']
elif table_name == 'flights':
entries = Flight.objects.all()
db_attrs = ['identifier', 'active', 'start_time',
'latitude', 'longitude', 'bearing', 'altitude']
elif table_name == 'reports':
entries = WeatherReport.objects.all()
db_attrs = ['time', 'latitude', 'longitude', 'altitude',
'wind_x', 'wind_y', 'tke']
else:
return HttpResponse('Invalid table name {}'.format(table_name))
if max_entries < 0:
entries = [[getattr(e, attr) for attr in db_attrs]
for e in entries[start_index:]]
else:
entries = [[getattr(e, attr) for attr in db_attrs]
for e in entries[start_index:start_index + max_entries]]
return render(request, 'display_db.html',
{'entries': entries,
'db_attrs': db_attrs,
'table': table_name,
'max_entries': max_entries,
'start_index': start_index,
'end_index': start_index + len(entries) - 1})
def query(request: HttpRequest) -> HttpResponse:
max_entries = safe_cast(request.GET.get('max', -1), int, -1)
start_index = safe_cast(request.GET.get('start', 0), int, 0)
id = safe_cast(request.GET.get('id', -1), int, -1)
table_name = request.GET.get('table', '')
if table_name == 'airplanes':
entries = Aircraft.objects
elif table_name == 'airports':
entries = Airport.objects
elif table_name == 'reports':
entries = WeatherReport.objects
elif table_name == 'flights':
entries = Flight.objects.filter(active=True)
else:
return JsonResponse({"entries": []})
if id >= 0:
entries = entries.filter(id=id)
entries = entries.all()
if max_entries < 0:
return JsonResponse({"entries": [model_to_dict(r) for r in entries[start_index:]]})
else:
return JsonResponse({"entries": [model_to_dict(r) for r in entries[start_index:start_index + max_entries]]})
def index(request: HttpRequest) -> HttpResponse:
return render(request, 'index.html', {})
def safe_cast(val, typ, default):
try:
return typ(val)
except ValueError:
return default | 0.457137 | 0.328126 |
import copy
import datetime
import importlib
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
import random
import sys
from contextlib import contextmanager
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid
from sqlalchemy.orm import sessionmaker
from triage.util.random import generate_python_random_seed
from triage.component.results_schema import Model, FeatureImportance
from triage.component.catwalk.exceptions import BaselineFeatureNotInMatrix
from triage.tracking import built_model, skipped_model, errored_model
from .model_grouping import ModelGrouper
from .feature_importances import get_feature_importances
from .utils import (
filename_friendly_hash,
retrieve_model_id_from_hash,
db_retry,
save_db_objects,
)
NO_FEATURE_IMPORTANCE = (
"Algorithm does not support a standard way" + " to calculate feature importance."
)
def flatten_grid_config(grid_config):
"""Flattens a model/parameter grid configuration into individually
trainable model/parameter pairs
Yields: (tuple) classpath and parameters
"""
for class_path, parameter_config in grid_config.items():
for parameters in ParameterGrid(parameter_config):
yield class_path, parameters
class ModelTrainer:
"""Trains a series of classifiers using the same training set
Args:
project_path (string) path to project folder,
under which to cache model pickles
experiment_hash (string) foreign key to the triage_metadata.experiments table
model_storage_engine (catwalk.storage.ModelStorageEngine)
db_engine (sqlalchemy.engine)
replace (bool) whether or not to replace existing versions of models
"""
def __init__(
self,
experiment_hash,
model_storage_engine,
db_engine,
model_grouper=None,
replace=True,
run_id=None,
):
self.experiment_hash = experiment_hash
self.model_storage_engine = model_storage_engine
self.model_grouper = model_grouper or ModelGrouper()
self.db_engine = db_engine
self.replace = replace
self.run_id = run_id
@property
def sessionmaker(self):
return sessionmaker(bind=self.db_engine)
def unique_parameters(self, parameters):
return {key: parameters[key] for key in parameters.keys() if key != "n_jobs"}
def _model_hash(self, matrix_metadata, class_path, parameters, random_seed):
"""Generates a unique identifier for a trained model
based on attributes of the model that together define
equivalence; in other words, if we train a second model with these
same attributes there would be no reason to keep the old one)
Args:
class_path (string): a full class path for the classifier
parameters (dict): all hyperparameters to be passed to the classifier
random_seed (int) an integer suitable for seeding the random generator before training
Returns: (string) a unique identifier
"""
unique = {
"className": class_path,
"parameters": self.unique_parameters(parameters),
"project_path": self.model_storage_engine.project_storage.project_path,
"training_metadata": matrix_metadata,
"random_seed": random_seed,
}
logger.spam(f"Creating model hash from unique data {unique}")
return filename_friendly_hash(unique)
def _train(self, matrix_store, class_path, parameters):
"""Fit a model to a training set. Works on any modeling class that
is available in this package's environment and implements .fit
Args:
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
Returns:
tuple of (fitted model, list of column names without label)
"""
module_name, class_name = class_path.rsplit(".", 1)
module = importlib.import_module(module_name)
cls = getattr(module, class_name)
instance = cls(**parameters)
return instance.fit(matrix_store.design_matrix, matrix_store.labels)
@db_retry
def _save_feature_importances(self, model_id, feature_importances, feature_names):
"""Saves feature importances to the database.
Deletes any existing feature importances for the given model_id.
Args:
model_id (int) The database id for the model
feature_importances (np.ndarray, maybe). Calculated feature importances
for the model
feature_names (list) Feature names for the corresponding entries in feature_importances
"""
self.db_engine.execute(
"delete from train_results.feature_importances where model_id = %s",
model_id,
)
db_objects = []
if isinstance(feature_importances, np.ndarray):
temp_df = pd.DataFrame({"feature_importance": feature_importances})
features_index = temp_df.index.tolist()
rankings_abs = temp_df["feature_importance"].rank(
method="dense", ascending=False
)
rankings_pct = temp_df["feature_importance"].rank(
method="dense", ascending=False, pct=True
)
for feature_index, importance, rank_abs, rank_pct in zip(
features_index, feature_importances, rankings_abs, rankings_pct
):
db_objects.append(
FeatureImportance(
model_id=model_id,
feature_importance=round(float(importance), 10),
feature=feature_names[feature_index],
rank_abs=int(rank_abs),
rank_pct=round(float(rank_pct), 10),
)
)
# get_feature_importances was not able to find
# feature importances
else:
db_objects.append(
FeatureImportance(
model_id=model_id,
feature_importance=0,
feature=NO_FEATURE_IMPORTANCE,
rank_abs=0,
rank_pct=0,
)
)
save_db_objects(self.db_engine, db_objects)
@db_retry
def _write_model_to_db(
self,
class_path,
parameters,
feature_names,
model_hash,
trained_model,
model_group_id,
model_size,
misc_db_parameters,
):
"""Writes model and feature importance data to a database
Will overwrite the data of any previous versions
(any existing model that shares a hash)
If the replace flag on the object is set, the existing version of the model
will have its non-unique attributes (e.g. timestamps) updated,
and feature importances fully replaced.
If the replace flag on the object is not set, the existing model metadata
and feature importances will be used.
Args:
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
feature_names (list) feature names in order given to model
model_hash (string) a unique id for the model
trained_model (object) a trained model object
model_group_id (int) the unique id for the model group
model_size (float) the size of the stored model in kB
misc_db_parameters (dict) params to pass through to the database
"""
model_id = retrieve_model_id_from_hash(self.db_engine, model_hash)
if model_id and not self.replace:
logger.notice(
f"Metadata for model {model_id} found in database. Reusing model metadata."
)
return model_id
else:
model = Model(
model_hash=model_hash,
model_type=class_path,
hyperparameters=parameters,
model_group_id=model_group_id,
built_by_experiment=self.experiment_hash,
built_in_experiment_run=self.run_id,
model_size=model_size,
**misc_db_parameters,
)
session = self.sessionmaker()
if model_id:
logger.notice(
f"Found model {model_id}, updating non-unique attributes"
)
model.model_id = model_id
session.merge(model)
session.commit()
else:
session.add(model)
session.commit()
model_id = model.model_id
logger.notice(f"Model {model_id}, not found from previous runs. Adding the new model")
session.close()
logger.spam(f"Saving feature importances for model_id {model_id}")
self._save_feature_importances(
model_id, get_feature_importances(trained_model), feature_names
)
logger.debug(f"Saved feature importances for model_id {model_id}")
return model_id
def _train_and_store_model(
self, matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed
):
"""Train a model, cache it, and write metadata to a database
Args:
matrix_store(catwalk.storage.MatrixStore) a matrix and metadata
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
model_hash (string) a unique id for the model
misc_db_parameters (dict) params to pass through to the database
Returns: (int) a database id for the model
"""
random.seed(random_seed)
misc_db_parameters["random_seed"] = random_seed
misc_db_parameters["run_time"] = datetime.datetime.now().isoformat()
logger.debug(f"Training and storing model for matrix uuid {matrix_store.uuid}")
trained_model = self._train(matrix_store, class_path, parameters)
unique_parameters = self.unique_parameters(parameters)
model_group_id = self.model_grouper.get_model_group_id(
class_path, unique_parameters, matrix_store.metadata, self.db_engine
)
logger.debug(
f"Trained model: hash {model_hash}, model group {model_group_id} "
)
# Writing th model to storage, then getting its size in kilobytes.
self.model_storage_engine.write(trained_model, model_hash)
model_size = sys.getsizeof(trained_model) / (1024.0)
logger.spam(f"Cached model: {model_hash}")
model_id = self._write_model_to_db(
class_path,
unique_parameters,
matrix_store.columns(include_label=False),
model_hash,
trained_model,
model_group_id,
model_size,
misc_db_parameters,
)
logger.debug(f"Wrote model {model_id} [{model_hash}] to db")
return model_id
@contextmanager
def cache_models(self):
"""Caches each trained model in memory as it is written to storage.
Must be used as a context manager.
The cache is cleared when the context manager goes out of scope
"""
with self.model_storage_engine.cache_models():
yield
def generate_trained_models(self, grid_config, misc_db_parameters, matrix_store):
"""Train and store configured models, yielding the ids one by one
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
matrix_store (catwalk.storage.MatrixStore) a matrix and metadata
Yields: (int) model ids
"""
for train_task in self.generate_train_tasks(
grid_config, misc_db_parameters, matrix_store
):
yield self.process_train_task(**train_task)
def train_models(self, grid_config, misc_db_parameters, matrix_store):
"""Train and store configured models
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
matrix_store(catwalk.storage.MatrixStore) a matrix and metadata
Returns:
(list) of model ids
"""
return [
model_id
for model_id in self.generate_trained_models(
grid_config, misc_db_parameters, matrix_store
)
]
def process_train_task(
self, matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed=None
):
"""Trains and stores a model, or skips it and returns the existing id
Args:
matrix_store (catwalk.storage.MatrixStore) a matrix and metadata
class_path (string): a full class path for the classifier
parameters (dict): all hyperparameters to be passed to the classifier
model_hash (string) a unique id for the model
misc_db_parameters (dict) params to pass through to the database
random_seed (int, optional) a number to use to seed the random number generator before training. if none given, will generate one to store
Returns: (int) model id
"""
try:
saved_model_id = retrieve_model_id_from_hash(self.db_engine, model_hash)
if (
not self.replace
and self.model_storage_engine.exists(model_hash)
and saved_model_id
):
logger.debug(f"Skipping model {saved_model_id} {class_path} {parameters}")
if self.run_id:
skipped_model(self.run_id, self.db_engine)
return saved_model_id
if self.replace:
reason = "replace flag has been set"
elif not self.model_storage_engine.exists(model_hash):
reason = "model pickle not found in store"
elif not saved_model_id:
reason = "model metadata not found"
logger.debug(
f"Training {class_path} with parameters {parameters}"
f"(reason to train: {reason})"
)
try:
model_id = self._train_and_store_model(
matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed
)
except BaselineFeatureNotInMatrix:
logger.warning(
"Tried to train baseline model without required feature in matrix. Skipping."
)
if self.run_id:
skipped_model(self.run_id, self.db_engine)
model_id = None
if self.run_id:
built_model(self.run_id, self.db_engine)
return model_id
except Exception as exc:
logger.exception(f"Model training for matrix {matrix_store.uuid}, estimator {class_path}/{parameters}, model hash {model_hash} failed.")
errored_model(self.run_id, self.db_engine)
@staticmethod
def flattened_grid_config(grid_config):
return flatten_grid_config(grid_config)
def generate_train_tasks(self, grid_config, misc_db_parameters, matrix_store=None):
"""Train and store configured models, yielding the ids one by one
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
Returns: (list) training task definitions, suitable for process_train_task kwargs
"""
matrix_store = matrix_store or self.matrix_store
misc_db_parameters = copy.deepcopy(misc_db_parameters)
misc_db_parameters["batch_run_time"] = datetime.datetime.now().isoformat()
misc_db_parameters["train_end_time"] = matrix_store.metadata["end_time"]
misc_db_parameters["training_label_timespan"] = matrix_store.metadata[
"label_timespan"
]
misc_db_parameters["train_matrix_uuid"] = matrix_store.uuid
tasks = []
for class_path, parameters in self.flattened_grid_config(grid_config):
random_seed = generate_python_random_seed()
model_hash = self._model_hash(
matrix_store.metadata,
class_path,
parameters,
random_seed
)
logger.spam(
f"Computed model hash for {class_path} "
f"with parameters {parameters}: {model_hash}"
)
if any(task["model_hash"] == model_hash for task in tasks):
logger.info(
f"Skipping "
f"Classpath: {class_path}({parameters}) "
f"[{model_hash}] because another "
f"equivalent one found in this batch."
)
continue
tasks.append(
{
"matrix_store": matrix_store,
"class_path": class_path,
"parameters": parameters,
"model_hash": model_hash,
"misc_db_parameters": misc_db_parameters,
"random_seed": random_seed
}
)
logger.debug(f"Task added for model {class_path}({parameters}) [{model_hash}]")
logger.debug(f"Found {len(tasks)} unique model training tasks")
return tasks | src/triage/component/catwalk/model_trainers.py | import copy
import datetime
import importlib
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
import random
import sys
from contextlib import contextmanager
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid
from sqlalchemy.orm import sessionmaker
from triage.util.random import generate_python_random_seed
from triage.component.results_schema import Model, FeatureImportance
from triage.component.catwalk.exceptions import BaselineFeatureNotInMatrix
from triage.tracking import built_model, skipped_model, errored_model
from .model_grouping import ModelGrouper
from .feature_importances import get_feature_importances
from .utils import (
filename_friendly_hash,
retrieve_model_id_from_hash,
db_retry,
save_db_objects,
)
NO_FEATURE_IMPORTANCE = (
"Algorithm does not support a standard way" + " to calculate feature importance."
)
def flatten_grid_config(grid_config):
"""Flattens a model/parameter grid configuration into individually
trainable model/parameter pairs
Yields: (tuple) classpath and parameters
"""
for class_path, parameter_config in grid_config.items():
for parameters in ParameterGrid(parameter_config):
yield class_path, parameters
class ModelTrainer:
"""Trains a series of classifiers using the same training set
Args:
project_path (string) path to project folder,
under which to cache model pickles
experiment_hash (string) foreign key to the triage_metadata.experiments table
model_storage_engine (catwalk.storage.ModelStorageEngine)
db_engine (sqlalchemy.engine)
replace (bool) whether or not to replace existing versions of models
"""
def __init__(
self,
experiment_hash,
model_storage_engine,
db_engine,
model_grouper=None,
replace=True,
run_id=None,
):
self.experiment_hash = experiment_hash
self.model_storage_engine = model_storage_engine
self.model_grouper = model_grouper or ModelGrouper()
self.db_engine = db_engine
self.replace = replace
self.run_id = run_id
@property
def sessionmaker(self):
return sessionmaker(bind=self.db_engine)
def unique_parameters(self, parameters):
return {key: parameters[key] for key in parameters.keys() if key != "n_jobs"}
def _model_hash(self, matrix_metadata, class_path, parameters, random_seed):
"""Generates a unique identifier for a trained model
based on attributes of the model that together define
equivalence; in other words, if we train a second model with these
same attributes there would be no reason to keep the old one)
Args:
class_path (string): a full class path for the classifier
parameters (dict): all hyperparameters to be passed to the classifier
random_seed (int) an integer suitable for seeding the random generator before training
Returns: (string) a unique identifier
"""
unique = {
"className": class_path,
"parameters": self.unique_parameters(parameters),
"project_path": self.model_storage_engine.project_storage.project_path,
"training_metadata": matrix_metadata,
"random_seed": random_seed,
}
logger.spam(f"Creating model hash from unique data {unique}")
return filename_friendly_hash(unique)
def _train(self, matrix_store, class_path, parameters):
"""Fit a model to a training set. Works on any modeling class that
is available in this package's environment and implements .fit
Args:
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
Returns:
tuple of (fitted model, list of column names without label)
"""
module_name, class_name = class_path.rsplit(".", 1)
module = importlib.import_module(module_name)
cls = getattr(module, class_name)
instance = cls(**parameters)
return instance.fit(matrix_store.design_matrix, matrix_store.labels)
@db_retry
def _save_feature_importances(self, model_id, feature_importances, feature_names):
"""Saves feature importances to the database.
Deletes any existing feature importances for the given model_id.
Args:
model_id (int) The database id for the model
feature_importances (np.ndarray, maybe). Calculated feature importances
for the model
feature_names (list) Feature names for the corresponding entries in feature_importances
"""
self.db_engine.execute(
"delete from train_results.feature_importances where model_id = %s",
model_id,
)
db_objects = []
if isinstance(feature_importances, np.ndarray):
temp_df = pd.DataFrame({"feature_importance": feature_importances})
features_index = temp_df.index.tolist()
rankings_abs = temp_df["feature_importance"].rank(
method="dense", ascending=False
)
rankings_pct = temp_df["feature_importance"].rank(
method="dense", ascending=False, pct=True
)
for feature_index, importance, rank_abs, rank_pct in zip(
features_index, feature_importances, rankings_abs, rankings_pct
):
db_objects.append(
FeatureImportance(
model_id=model_id,
feature_importance=round(float(importance), 10),
feature=feature_names[feature_index],
rank_abs=int(rank_abs),
rank_pct=round(float(rank_pct), 10),
)
)
# get_feature_importances was not able to find
# feature importances
else:
db_objects.append(
FeatureImportance(
model_id=model_id,
feature_importance=0,
feature=NO_FEATURE_IMPORTANCE,
rank_abs=0,
rank_pct=0,
)
)
save_db_objects(self.db_engine, db_objects)
@db_retry
def _write_model_to_db(
self,
class_path,
parameters,
feature_names,
model_hash,
trained_model,
model_group_id,
model_size,
misc_db_parameters,
):
"""Writes model and feature importance data to a database
Will overwrite the data of any previous versions
(any existing model that shares a hash)
If the replace flag on the object is set, the existing version of the model
will have its non-unique attributes (e.g. timestamps) updated,
and feature importances fully replaced.
If the replace flag on the object is not set, the existing model metadata
and feature importances will be used.
Args:
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
feature_names (list) feature names in order given to model
model_hash (string) a unique id for the model
trained_model (object) a trained model object
model_group_id (int) the unique id for the model group
model_size (float) the size of the stored model in kB
misc_db_parameters (dict) params to pass through to the database
"""
model_id = retrieve_model_id_from_hash(self.db_engine, model_hash)
if model_id and not self.replace:
logger.notice(
f"Metadata for model {model_id} found in database. Reusing model metadata."
)
return model_id
else:
model = Model(
model_hash=model_hash,
model_type=class_path,
hyperparameters=parameters,
model_group_id=model_group_id,
built_by_experiment=self.experiment_hash,
built_in_experiment_run=self.run_id,
model_size=model_size,
**misc_db_parameters,
)
session = self.sessionmaker()
if model_id:
logger.notice(
f"Found model {model_id}, updating non-unique attributes"
)
model.model_id = model_id
session.merge(model)
session.commit()
else:
session.add(model)
session.commit()
model_id = model.model_id
logger.notice(f"Model {model_id}, not found from previous runs. Adding the new model")
session.close()
logger.spam(f"Saving feature importances for model_id {model_id}")
self._save_feature_importances(
model_id, get_feature_importances(trained_model), feature_names
)
logger.debug(f"Saved feature importances for model_id {model_id}")
return model_id
def _train_and_store_model(
self, matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed
):
"""Train a model, cache it, and write metadata to a database
Args:
matrix_store(catwalk.storage.MatrixStore) a matrix and metadata
class_path (string) A full classpath to the model class
parameters (dict) hyperparameters to give to the model constructor
model_hash (string) a unique id for the model
misc_db_parameters (dict) params to pass through to the database
Returns: (int) a database id for the model
"""
random.seed(random_seed)
misc_db_parameters["random_seed"] = random_seed
misc_db_parameters["run_time"] = datetime.datetime.now().isoformat()
logger.debug(f"Training and storing model for matrix uuid {matrix_store.uuid}")
trained_model = self._train(matrix_store, class_path, parameters)
unique_parameters = self.unique_parameters(parameters)
model_group_id = self.model_grouper.get_model_group_id(
class_path, unique_parameters, matrix_store.metadata, self.db_engine
)
logger.debug(
f"Trained model: hash {model_hash}, model group {model_group_id} "
)
# Writing th model to storage, then getting its size in kilobytes.
self.model_storage_engine.write(trained_model, model_hash)
model_size = sys.getsizeof(trained_model) / (1024.0)
logger.spam(f"Cached model: {model_hash}")
model_id = self._write_model_to_db(
class_path,
unique_parameters,
matrix_store.columns(include_label=False),
model_hash,
trained_model,
model_group_id,
model_size,
misc_db_parameters,
)
logger.debug(f"Wrote model {model_id} [{model_hash}] to db")
return model_id
@contextmanager
def cache_models(self):
"""Caches each trained model in memory as it is written to storage.
Must be used as a context manager.
The cache is cleared when the context manager goes out of scope
"""
with self.model_storage_engine.cache_models():
yield
def generate_trained_models(self, grid_config, misc_db_parameters, matrix_store):
"""Train and store configured models, yielding the ids one by one
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
matrix_store (catwalk.storage.MatrixStore) a matrix and metadata
Yields: (int) model ids
"""
for train_task in self.generate_train_tasks(
grid_config, misc_db_parameters, matrix_store
):
yield self.process_train_task(**train_task)
def train_models(self, grid_config, misc_db_parameters, matrix_store):
"""Train and store configured models
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
matrix_store(catwalk.storage.MatrixStore) a matrix and metadata
Returns:
(list) of model ids
"""
return [
model_id
for model_id in self.generate_trained_models(
grid_config, misc_db_parameters, matrix_store
)
]
def process_train_task(
self, matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed=None
):
"""Trains and stores a model, or skips it and returns the existing id
Args:
matrix_store (catwalk.storage.MatrixStore) a matrix and metadata
class_path (string): a full class path for the classifier
parameters (dict): all hyperparameters to be passed to the classifier
model_hash (string) a unique id for the model
misc_db_parameters (dict) params to pass through to the database
random_seed (int, optional) a number to use to seed the random number generator before training. if none given, will generate one to store
Returns: (int) model id
"""
try:
saved_model_id = retrieve_model_id_from_hash(self.db_engine, model_hash)
if (
not self.replace
and self.model_storage_engine.exists(model_hash)
and saved_model_id
):
logger.debug(f"Skipping model {saved_model_id} {class_path} {parameters}")
if self.run_id:
skipped_model(self.run_id, self.db_engine)
return saved_model_id
if self.replace:
reason = "replace flag has been set"
elif not self.model_storage_engine.exists(model_hash):
reason = "model pickle not found in store"
elif not saved_model_id:
reason = "model metadata not found"
logger.debug(
f"Training {class_path} with parameters {parameters}"
f"(reason to train: {reason})"
)
try:
model_id = self._train_and_store_model(
matrix_store, class_path, parameters, model_hash, misc_db_parameters, random_seed
)
except BaselineFeatureNotInMatrix:
logger.warning(
"Tried to train baseline model without required feature in matrix. Skipping."
)
if self.run_id:
skipped_model(self.run_id, self.db_engine)
model_id = None
if self.run_id:
built_model(self.run_id, self.db_engine)
return model_id
except Exception as exc:
logger.exception(f"Model training for matrix {matrix_store.uuid}, estimator {class_path}/{parameters}, model hash {model_hash} failed.")
errored_model(self.run_id, self.db_engine)
@staticmethod
def flattened_grid_config(grid_config):
return flatten_grid_config(grid_config)
def generate_train_tasks(self, grid_config, misc_db_parameters, matrix_store=None):
"""Train and store configured models, yielding the ids one by one
Args:
grid_config (dict) of format {classpath: hyperparameter dicts}
example: { 'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [1,10,100,1000,10000],
'max_depth': [1,5,10,20,50,100],
'max_features': ['sqrt','log2'],
'min_samples_split': [2,5,10]
} }
misc_db_parameters (dict) params to pass through to the database
Returns: (list) training task definitions, suitable for process_train_task kwargs
"""
matrix_store = matrix_store or self.matrix_store
misc_db_parameters = copy.deepcopy(misc_db_parameters)
misc_db_parameters["batch_run_time"] = datetime.datetime.now().isoformat()
misc_db_parameters["train_end_time"] = matrix_store.metadata["end_time"]
misc_db_parameters["training_label_timespan"] = matrix_store.metadata[
"label_timespan"
]
misc_db_parameters["train_matrix_uuid"] = matrix_store.uuid
tasks = []
for class_path, parameters in self.flattened_grid_config(grid_config):
random_seed = generate_python_random_seed()
model_hash = self._model_hash(
matrix_store.metadata,
class_path,
parameters,
random_seed
)
logger.spam(
f"Computed model hash for {class_path} "
f"with parameters {parameters}: {model_hash}"
)
if any(task["model_hash"] == model_hash for task in tasks):
logger.info(
f"Skipping "
f"Classpath: {class_path}({parameters}) "
f"[{model_hash}] because another "
f"equivalent one found in this batch."
)
continue
tasks.append(
{
"matrix_store": matrix_store,
"class_path": class_path,
"parameters": parameters,
"model_hash": model_hash,
"misc_db_parameters": misc_db_parameters,
"random_seed": random_seed
}
)
logger.debug(f"Task added for model {class_path}({parameters}) [{model_hash}]")
logger.debug(f"Found {len(tasks)} unique model training tasks")
return tasks | 0.638835 | 0.230065 |
# Lint as python3
"""Common configurable image manipulation methods for use in preprocessors."""
from typing import Callable, List, Optional, Sequence
import gin
from six.moves import zip
import tensorflow.compat.v1 as tf
def RandomCropImages(images, input_shape,
target_shape):
"""Crop a part of given shape from a random location in a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Raises:
ValueError: In case the either the input_shape or the target_shape have a
wrong length.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
max_y = int(input_shape[0]) - int(target_shape[0])
max_x = int(input_shape[1]) - int(target_shape[1])
with tf.control_dependencies(
[tf.assert_greater_equal(max_x, 0),
tf.assert_greater_equal(max_y, 0)]):
offset_y = tf.random_uniform((), maxval=max_y + 1, dtype=tf.int32)
offset_x = tf.random_uniform((), maxval=max_x + 1, dtype=tf.int32)
return [
tf.image.crop_to_bounding_box(img, offset_y, offset_x,
int(target_shape[0]),
int(target_shape[1])) for img in images
]
def CenterCropImages(images, input_shape,
target_shape):
"""Take a central crop of given size from a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
# Assert all images have the same shape.
assert_ops = []
for image in images:
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
offset_y = int(input_shape[0] - target_shape[0]) // 2
offset_x = int(input_shape[1] - target_shape[1]) // 2
with tf.control_dependencies(assert_ops):
crops = [
tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_shape[0], target_shape[1])
for image in images
]
return crops
def CustomCropImages(images, input_shape,
target_shape,
target_locations):
"""Crop a list of images at with a custom crop location and size.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
target_locations: List of crop center coordinates tensors of shape [b, 2].
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape + [3].
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if len(images) != len(target_locations):
raise ValueError('There should be one target location per image. Found {} '
'images for {} locations'.format(len(images),
len(target_locations)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
if input_shape[0] < target_shape[0] or input_shape[1] < target_shape[1]:
raise ValueError('The target shape {} is larger than the input image size '
'{}'.format(target_shape, input_shape[:2]))
assert_ops = []
for image, target_location in zip(images, target_locations):
# Assert all images have the same shape.
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
with tf.control_dependencies(assert_ops):
crops = []
for image, target_location in zip(images, target_locations):
# If bounding box is outside of image boundaries, move it
x_coordinates = tf.slice(
target_location,
[0, 1], [tf.shape(target_location)[0], 1])
y_coordinates = tf.slice(
target_location,
[0, 0], [tf.shape(target_location)[0], 1])
x_coordinates = tf.math.maximum(
tf.cast(x_coordinates, tf.float32),
tf.cast(target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.maximum(
tf.cast(y_coordinates, tf.float32),
tf.cast(target_shape[0] // 2, tf.float32))
x_coordinates = tf.math.minimum(
tf.cast(x_coordinates, tf.float32),
tf.cast(tf.shape(image)[2] - target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.minimum(
tf.cast(y_coordinates, tf.float32),
tf.cast(tf.shape(image)[1] - target_shape[0] // 2, tf.float32)
)
target_location = tf.concat([x_coordinates, y_coordinates], 1)
crops.append(
tf.image.extract_glimpse(image, target_shape, tf.cast(
target_location, tf.float32), centered=False, normalized=False))
return crops
@gin.configurable
def ApplyPhotometricImageDistortions(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness, max_delta_brightness)
for i, image in enumerate(images):
images[i] = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
for i, image in enumerate(images):
images[i] = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(images):
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(images):
images[i] = tf.clip_by_value(image, 0.0, 1.0)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsParallel(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5,
custom_distortion_fn = None):
"""Apply photometric distortions to the input images in parallel.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
custom_distortion_fn: A custom distortion fn that takes a tensor of shape
[h, w, 3] and returns a tensor of the same size.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
def SingleImageDistortion(image):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness,
max_delta_brightness)
image = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
image = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
image = tf.reshape(image, img_shape)
if custom_distortion_fn:
image = custom_distortion_fn(image)
# Clip to valid range.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
images = tf.map_fn(SingleImageDistortion, images)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsCheap(
images):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to. Assumed to be normalized
to range (0, 1), float32 encoding.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.name_scope('photometric_distortion'):
channels = tf.unstack(images, axis=-1)
# Per-channel random gamma correction.
# Lower gamma = brighter image, decreased contrast.
# Higher gamma = dark image, increased contrast.
gamma_corrected = [c**tf.random_uniform([], 0.5, 1.5) for c in channels]
images = tf.stack(gamma_corrected, axis=-1)
return images
def ApplyRandomFlips(images):
"""Randomly flips images across x-axis and y-axis."""
with tf.name_scope('random_flips'):
# This is consistent for the entire batch, which guarantees it'll be
# consistent for the episode, but will correlate flips across the batch.
# Seems fine for now.
left_flip = tf.random_uniform([]) > 0.5
up_flip = tf.random_uniform([]) > 0.5
images = tf.cond(
left_flip, lambda: tf.image.flip_left_right(images), lambda: images)
images = tf.cond(
up_flip, lambda: tf.image.flip_up_down(images), lambda: images)
return images
@gin.configurable
def ApplyDepthImageDistortions(depth_images,
random_noise_level = 0.05,
random_noise_apply_probability = 0.5,
scaling_noise = True,
gamma_shape = 1000.0,
gamma_scale_inverse = 1000.0,
min_depth_allowed = 0.25,
max_depth_allowed = 2.5):
"""Apply photometric distortions to the input depth images.
Args:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a batch of
depth images to apply the random photometric distortions to.
random_noise_level: The standard deviation of the Gaussian distribution for
the noise that is applied to the depth image. When 0.0, then no noise is
applied.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
scaling_noise: If True; sample a random variable from a Gamma distribution
to scale the depth image.
gamma_shape: Float; shape parameter of a Gamma distribution.
gamma_scale_inverse: Float; inverse of scale parameter of a Gamma
distribution.
min_depth_allowed: Float; minimum clip value for depth.
max_depth_allowed: Float; max clip value for depth.
Returns:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a
batch of images resulting from applying random photometric distortions to
the inputs.
"""
assert depth_images[0].get_shape().as_list()[-1] == 1
with tf.variable_scope('distortions_depth_images'):
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(depth_images):
img_shape = tf.shape(image)
rnd_noise = tf.random_normal(img_shape, stddev=random_noise_level)
def ReturnImageTensor(value):
return lambda: value
if scaling_noise:
alpha = tf.random_gamma([], gamma_shape, gamma_scale_inverse)
image = tf.cond(
tf.reduce_all(
tf.greater(
tf.random.uniform([1]), random_noise_apply_probability)),
ReturnImageTensor(image),
ReturnImageTensor(alpha * image + rnd_noise))
depth_images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(depth_images):
depth_images[i] = tf.clip_by_value(image, min_depth_allowed,
max_depth_allowed)
return depth_images | preprocessors/image_transformations.py |
# Lint as python3
"""Common configurable image manipulation methods for use in preprocessors."""
from typing import Callable, List, Optional, Sequence
import gin
from six.moves import zip
import tensorflow.compat.v1 as tf
def RandomCropImages(images, input_shape,
target_shape):
"""Crop a part of given shape from a random location in a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Raises:
ValueError: In case the either the input_shape or the target_shape have a
wrong length.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
max_y = int(input_shape[0]) - int(target_shape[0])
max_x = int(input_shape[1]) - int(target_shape[1])
with tf.control_dependencies(
[tf.assert_greater_equal(max_x, 0),
tf.assert_greater_equal(max_y, 0)]):
offset_y = tf.random_uniform((), maxval=max_y + 1, dtype=tf.int32)
offset_x = tf.random_uniform((), maxval=max_x + 1, dtype=tf.int32)
return [
tf.image.crop_to_bounding_box(img, offset_y, offset_x,
int(target_shape[0]),
int(target_shape[1])) for img in images
]
def CenterCropImages(images, input_shape,
target_shape):
"""Take a central crop of given size from a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
# Assert all images have the same shape.
assert_ops = []
for image in images:
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
offset_y = int(input_shape[0] - target_shape[0]) // 2
offset_x = int(input_shape[1] - target_shape[1]) // 2
with tf.control_dependencies(assert_ops):
crops = [
tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_shape[0], target_shape[1])
for image in images
]
return crops
def CustomCropImages(images, input_shape,
target_shape,
target_locations):
"""Crop a list of images at with a custom crop location and size.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
target_locations: List of crop center coordinates tensors of shape [b, 2].
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape + [3].
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if len(images) != len(target_locations):
raise ValueError('There should be one target location per image. Found {} '
'images for {} locations'.format(len(images),
len(target_locations)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
if input_shape[0] < target_shape[0] or input_shape[1] < target_shape[1]:
raise ValueError('The target shape {} is larger than the input image size '
'{}'.format(target_shape, input_shape[:2]))
assert_ops = []
for image, target_location in zip(images, target_locations):
# Assert all images have the same shape.
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
with tf.control_dependencies(assert_ops):
crops = []
for image, target_location in zip(images, target_locations):
# If bounding box is outside of image boundaries, move it
x_coordinates = tf.slice(
target_location,
[0, 1], [tf.shape(target_location)[0], 1])
y_coordinates = tf.slice(
target_location,
[0, 0], [tf.shape(target_location)[0], 1])
x_coordinates = tf.math.maximum(
tf.cast(x_coordinates, tf.float32),
tf.cast(target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.maximum(
tf.cast(y_coordinates, tf.float32),
tf.cast(target_shape[0] // 2, tf.float32))
x_coordinates = tf.math.minimum(
tf.cast(x_coordinates, tf.float32),
tf.cast(tf.shape(image)[2] - target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.minimum(
tf.cast(y_coordinates, tf.float32),
tf.cast(tf.shape(image)[1] - target_shape[0] // 2, tf.float32)
)
target_location = tf.concat([x_coordinates, y_coordinates], 1)
crops.append(
tf.image.extract_glimpse(image, target_shape, tf.cast(
target_location, tf.float32), centered=False, normalized=False))
return crops
@gin.configurable
def ApplyPhotometricImageDistortions(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness, max_delta_brightness)
for i, image in enumerate(images):
images[i] = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
for i, image in enumerate(images):
images[i] = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(images):
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(images):
images[i] = tf.clip_by_value(image, 0.0, 1.0)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsParallel(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5,
custom_distortion_fn = None):
"""Apply photometric distortions to the input images in parallel.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
custom_distortion_fn: A custom distortion fn that takes a tensor of shape
[h, w, 3] and returns a tensor of the same size.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
def SingleImageDistortion(image):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness,
max_delta_brightness)
image = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
image = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
image = tf.reshape(image, img_shape)
if custom_distortion_fn:
image = custom_distortion_fn(image)
# Clip to valid range.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
images = tf.map_fn(SingleImageDistortion, images)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsCheap(
images):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to. Assumed to be normalized
to range (0, 1), float32 encoding.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.name_scope('photometric_distortion'):
channels = tf.unstack(images, axis=-1)
# Per-channel random gamma correction.
# Lower gamma = brighter image, decreased contrast.
# Higher gamma = dark image, increased contrast.
gamma_corrected = [c**tf.random_uniform([], 0.5, 1.5) for c in channels]
images = tf.stack(gamma_corrected, axis=-1)
return images
def ApplyRandomFlips(images):
"""Randomly flips images across x-axis and y-axis."""
with tf.name_scope('random_flips'):
# This is consistent for the entire batch, which guarantees it'll be
# consistent for the episode, but will correlate flips across the batch.
# Seems fine for now.
left_flip = tf.random_uniform([]) > 0.5
up_flip = tf.random_uniform([]) > 0.5
images = tf.cond(
left_flip, lambda: tf.image.flip_left_right(images), lambda: images)
images = tf.cond(
up_flip, lambda: tf.image.flip_up_down(images), lambda: images)
return images
@gin.configurable
def ApplyDepthImageDistortions(depth_images,
random_noise_level = 0.05,
random_noise_apply_probability = 0.5,
scaling_noise = True,
gamma_shape = 1000.0,
gamma_scale_inverse = 1000.0,
min_depth_allowed = 0.25,
max_depth_allowed = 2.5):
"""Apply photometric distortions to the input depth images.
Args:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a batch of
depth images to apply the random photometric distortions to.
random_noise_level: The standard deviation of the Gaussian distribution for
the noise that is applied to the depth image. When 0.0, then no noise is
applied.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
scaling_noise: If True; sample a random variable from a Gamma distribution
to scale the depth image.
gamma_shape: Float; shape parameter of a Gamma distribution.
gamma_scale_inverse: Float; inverse of scale parameter of a Gamma
distribution.
min_depth_allowed: Float; minimum clip value for depth.
max_depth_allowed: Float; max clip value for depth.
Returns:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a
batch of images resulting from applying random photometric distortions to
the inputs.
"""
assert depth_images[0].get_shape().as_list()[-1] == 1
with tf.variable_scope('distortions_depth_images'):
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(depth_images):
img_shape = tf.shape(image)
rnd_noise = tf.random_normal(img_shape, stddev=random_noise_level)
def ReturnImageTensor(value):
return lambda: value
if scaling_noise:
alpha = tf.random_gamma([], gamma_shape, gamma_scale_inverse)
image = tf.cond(
tf.reduce_all(
tf.greater(
tf.random.uniform([1]), random_noise_apply_probability)),
ReturnImageTensor(image),
ReturnImageTensor(alpha * image + rnd_noise))
depth_images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(depth_images):
depth_images[i] = tf.clip_by_value(image, min_depth_allowed,
max_depth_allowed)
return depth_images | 0.964288 | 0.677275 |
"""Experimental Resolver for getting the latest artifact."""
from typing import Dict, List, Optional, Text
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import artifact_utils
from tfx.utils import doc_controls
import ml_metadata as mlmd
# TODO(b/185938426): consider renaming this to XxxResolverStrategy.
class LatestArtifactsResolver(resolver.ResolverStrategy):
"""Resolver that return the latest n artifacts in a given channel.
Note that this Resolver is experimental and is subject to change in terms of
both interface and implementation.
Don't construct LatestArtifactsResolver directly, example usage:
```
model_resolver = Resolver(
instance_name='latest_model_resolver',
strategy_class=LatestArtifactsResolver,
model=Channel(type=Model))
model_resolver.outputs['model']
```
"""
def __init__(self, desired_num_of_artifacts: Optional[int] = 1):
self._desired_num_of_artifact = desired_num_of_artifacts
def _resolve(self, input_dict: Dict[Text, List[types.Artifact]]):
result = {}
for k, artifact_list in input_dict.items():
sorted_artifact_list = sorted(
artifact_list, key=lambda a: a.id, reverse=True)
result[k] = sorted_artifact_list[:min(
len(sorted_artifact_list), self._desired_num_of_artifact)]
return result
@doc_controls.do_not_generate_docs
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> resolver.ResolveResult:
pipeline_context = metadata_handler.get_pipeline_context(pipeline_info)
if pipeline_context is None:
raise RuntimeError('Pipeline context absent for %s' % pipeline_context)
candidate_dict = {}
for k, c in source_channels.items():
candidate_artifacts = metadata_handler.get_qualified_artifacts(
contexts=[pipeline_context],
type_name=c.type_name,
producer_component_id=c.producer_component_id,
output_key=c.output_key)
candidate_dict[k] = [
artifact_utils.deserialize_artifact(a.type, a.artifact)
for a in candidate_artifacts
]
resolved_dict = self._resolve(candidate_dict)
resolve_state_dict = {
k: len(artifact_list) >= self._desired_num_of_artifact
for k, artifact_list in resolved_dict.items()
}
return resolver.ResolveResult(
per_key_resolve_result=resolved_dict,
per_key_resolve_state=resolve_state_dict)
@doc_controls.do_not_generate_docs
def resolve_artifacts(
self, store: mlmd.MetadataStore,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
If `min_count` for every input is met, returns a
Dict[Text, List[Artifact]]. Otherwise, return None.
"""
resolved_dict = self._resolve(input_dict)
all_min_count_met = all(
len(artifact_list) >= self._desired_num_of_artifact
for artifact_list in resolved_dict.values())
return resolved_dict if all_min_count_met else None | tfx/dsl/experimental/latest_artifacts_resolver.py | """Experimental Resolver for getting the latest artifact."""
from typing import Dict, List, Optional, Text
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import artifact_utils
from tfx.utils import doc_controls
import ml_metadata as mlmd
# TODO(b/185938426): consider renaming this to XxxResolverStrategy.
class LatestArtifactsResolver(resolver.ResolverStrategy):
"""Resolver that return the latest n artifacts in a given channel.
Note that this Resolver is experimental and is subject to change in terms of
both interface and implementation.
Don't construct LatestArtifactsResolver directly, example usage:
```
model_resolver = Resolver(
instance_name='latest_model_resolver',
strategy_class=LatestArtifactsResolver,
model=Channel(type=Model))
model_resolver.outputs['model']
```
"""
def __init__(self, desired_num_of_artifacts: Optional[int] = 1):
self._desired_num_of_artifact = desired_num_of_artifacts
def _resolve(self, input_dict: Dict[Text, List[types.Artifact]]):
result = {}
for k, artifact_list in input_dict.items():
sorted_artifact_list = sorted(
artifact_list, key=lambda a: a.id, reverse=True)
result[k] = sorted_artifact_list[:min(
len(sorted_artifact_list), self._desired_num_of_artifact)]
return result
@doc_controls.do_not_generate_docs
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> resolver.ResolveResult:
pipeline_context = metadata_handler.get_pipeline_context(pipeline_info)
if pipeline_context is None:
raise RuntimeError('Pipeline context absent for %s' % pipeline_context)
candidate_dict = {}
for k, c in source_channels.items():
candidate_artifacts = metadata_handler.get_qualified_artifacts(
contexts=[pipeline_context],
type_name=c.type_name,
producer_component_id=c.producer_component_id,
output_key=c.output_key)
candidate_dict[k] = [
artifact_utils.deserialize_artifact(a.type, a.artifact)
for a in candidate_artifacts
]
resolved_dict = self._resolve(candidate_dict)
resolve_state_dict = {
k: len(artifact_list) >= self._desired_num_of_artifact
for k, artifact_list in resolved_dict.items()
}
return resolver.ResolveResult(
per_key_resolve_result=resolved_dict,
per_key_resolve_state=resolve_state_dict)
@doc_controls.do_not_generate_docs
def resolve_artifacts(
self, store: mlmd.MetadataStore,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
If `min_count` for every input is met, returns a
Dict[Text, List[Artifact]]. Otherwise, return None.
"""
resolved_dict = self._resolve(input_dict)
all_min_count_met = all(
len(artifact_list) >= self._desired_num_of_artifact
for artifact_list in resolved_dict.values())
return resolved_dict if all_min_count_met else None | 0.836555 | 0.53607 |
import logging
from django.db.models import Q
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.authorization import Authorization
from tastypie.exceptions import BadRequest
from journalmanager.models import (
Journal,
UseLicense,
Sponsor,
Collection,
Issue,
Section,
RegularPressRelease,
AheadPressRelease,
PressReleaseTranslation,
PressReleaseArticle,
SubjectCategory,
)
from scielomanager.utils import usercontext
logger = logging.getLogger(__name__)
def current_user_active_collection():
return usercontext.get_finder().get_current_user_active_collection()
def current_user_collections():
return usercontext.get_finder().get_current_user_collections()
class ApiKeyAuthMeta:
authentication = ApiKeyAuthentication()
authorization = DjangoAuthorization()
class SectionResource(ModelResource):
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
issues = fields.OneToManyField('api.resources_v1.IssueResource',
'issue_set')
titles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
queryset = Section.objects.all()
resource_name = 'sections'
allowed_methods = ['get']
excludes = ['legacy_code']
filtering = {
"journal": ('exact'),
}
def dehydrate_titles(self, bundle):
return [(title.language.iso_code, title.title)
for title in bundle.obj.titles.all()]
class UseLicenseResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = UseLicense.objects.all()
resource_name = 'uselicenses'
allowed_methods = ['get', ]
class IssueResource(ModelResource):
"""
IMPORTANT: is_press_release was removed on V2
"""
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
sections = fields.ManyToManyField(SectionResource, 'section')
thematic_titles = fields.CharField(readonly=True)
is_press_release = fields.BooleanField(readonly=True)
suppl_volume = fields.CharField(attribute='volume', readonly=True)
suppl_number = fields.CharField(attribute='number', readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True, null=True)
class Meta(ApiKeyAuthMeta):
queryset = Issue.objects.all()
resource_name = 'issues'
allowed_methods = ['get', ]
filtering = {
"journal": ('exact'),
"is_marked_up": ('exact'),
"volume": ('exact'),
"number": ('exact'),
"publication_year": ('exact'),
"suppl_number": ('exact'),
"suppl_volume": ('exact')
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(IssueResource, self).build_filters(filters)
param_filters = {}
if 'collection' in filters:
param_filters['journal__collections__name_slug'] = filters['collection']
if 'eletronic_issn' in filters:
param_filters['journal__eletronic_issn'] = filters['eletronic_issn']
if 'print_issn' in filters:
param_filters['journal__print_issn'] = filters['print_issn']
if 'suppl_number' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = filters['suppl_number']
if 'suppl_volume' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = ''
param_filters['volume'] = filters['suppl_volume']
issues = Issue.objects.filter(**param_filters)
orm_filters['pk__in'] = issues
return orm_filters
def dehydrate_thematic_titles(self, bundle):
return dict([title.language.iso_code, title.title]
for title in bundle.obj.issuetitle_set.all())
def dehydrate_is_press_release(self, bundle):
return False
def dehydrate_suppl_volume(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.volume else ''
else:
return ''
def dehydrate_suppl_number(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.number else ''
else:
return ''
class CollectionResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Collection.objects.all()
resource_name = 'collections'
allowed_methods = ['get', ]
class SubjectCategoryResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = SubjectCategory.objects.all()
resource_name = 'subjectcategory'
allowed_methods = ['get', ]
class SponsorResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Sponsor.objects.all()
resource_name = 'sponsors'
allowed_methods = ['get', ]
class UserResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = User.objects.all()
resource_name = 'users'
allowed_methods = ['get', ]
excludes = [
'email',
'password',
'is_active',
'is_staff',
'is_superuser',
]
class JournalResource(ModelResource):
missions = fields.CharField(readonly=True)
other_titles = fields.CharField(readonly=True)
creator = fields.ForeignKey(UserResource, 'creator')
abstract_keyword_languages = fields.CharField(readonly=True)
languages = fields.CharField(readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True)
sponsors = fields.ManyToManyField(SponsorResource, 'sponsor')
collections = fields.ManyToManyField(CollectionResource, 'collections')
issues = fields.OneToManyField(IssueResource, 'issue_set')
sections = fields.OneToManyField(SectionResource, 'section_set')
subject_categories = fields.ManyToManyField(SubjectCategoryResource, 'subject_categories', readonly=True)
pub_status_history = fields.ListField(readonly=True)
contact = fields.DictField(readonly=True)
study_areas = fields.ListField(readonly=True)
pub_status = fields.CharField(readonly=True)
pub_status_reason = fields.CharField(readonly=True)
national_code = fields.CharField(attribute='ccn_code', readonly=True)
# recursive field
previous_title = fields.ForeignKey('self', 'previous_title', null=True)
succeeding_title = fields.ForeignKey('self', 'succeeding_title', null=True)
class Meta(ApiKeyAuthMeta):
queryset = Journal.objects.all().filter()
resource_name = 'journals'
allowed_methods = ['get', ]
filtering = {
'is_trashed': ('exact',),
'eletronic_issn': ('exact',),
'print_issn': ('exact',),
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(JournalResource, self).build_filters(filters)
if 'collection' in filters:
journals = Journal.objects.filter(
collections__name_slug=filters['collection'])
orm_filters['pk__in'] = journals
if 'pubstatus' in filters:
# keep the previous filtering
try:
j = orm_filters['pk__in']
except KeyError:
j = Journal.objects
statuses = filters.getlist('pubstatus')
journals = j.filter(
membership__status__in=statuses)
orm_filters['pk__in'] = journals
return orm_filters
def dehydrate_missions(self, bundle):
"""
IMPORTANT: Changed to dict on V2
missions: {
en: "To publish articles of clinical and experimental...",
es: "Publicar artÃculos de estudios clÃnicos y experim...",
pt: "Publicar artigos de estudos clÃnicos e experiment..."
},
"""
return [(mission.language.iso_code, mission.description)
for mission in bundle.obj.missions.all()]
def dehydrate_other_titles(self, bundle):
"""
IMPORTANT: Changed to dict on V2
other_titles: {
other: "Arquivos Brasileiros de Cirurgia Digestiva",
paralleltitle: "Brazilian Archives of Digestive Surgery"
},
"""
return [(title.category, title.title)
for title in bundle.obj.other_titles.all()]
def dehydrate_languages(self, bundle):
return [language.iso_code
for language in bundle.obj.languages.all()]
def dehydrate_subject_categories(self, bundle):
return [subject_category.term
for subject_category in bundle.obj.subject_categories.all()]
def dehydrate_pub_status_history(self, bundle):
return [{'date': event.since,
'status': event.status}
for event in bundle.obj.statuses.order_by('-since').all()]
def dehydrate_study_areas(self, bundle):
return [area.study_area
for area in bundle.obj.study_areas.all()]
def dehydrate_collections(self, bundle):
"""
Only works with v1, without multiple collections per journal.
IMPORTANT: This prepare function was removed from V2
"""
try:
return bundle.data['collections'][0]
except IndexError:
return ''
def dehydrate_pub_status(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'status')
def dehydrate_pub_status_reason(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'reason')
def dehydrate(self, bundle):
# garantia de compatibilidade
bundle.data.pop('ccn_code', False)
return bundle
class PressReleaseTranslationResource(ModelResource):
language = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'prtranslations'
queryset = PressReleaseTranslation.objects.all()
allowed_methods = ['get', ]
def dehydrate_language(self, bundle):
return bundle.obj.language.iso_code
class PressReleaseResource(ModelResource):
issue_uri = fields.ForeignKey(IssueResource, 'issue')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
issue_meta = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'pressreleases'
queryset = RegularPressRelease.objects.all()
allowed_methods = ['get', ]
ordering = ['id']
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(PressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = RegularPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = RegularPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
elif 'issue_pid' in filters:
preleases = RegularPressRelease.objects.by_issue_pid(
filters['issue_pid'])
orm_filters['pk__in'] = preleases
return orm_filters
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def dehydrate_issue_meta(self, bundle):
issue = bundle.obj.issue
meta_data = {
'scielo_pid': issue.scielo_pid,
'short_title': issue.journal.short_title,
'volume': issue.volume,
'number': issue.number,
'suppl_volume': issue.suppl_text if issue.type == 'supplement' and issue.volume else '',
'suppl_number': issue.suppl_text if issue.type == 'supplement' and issue.number else '',
'publication_start_month': issue.publication_start_month,
'publication_end_month': issue.publication_end_month,
'publication_city': issue.journal.publication_city,
'publication_year': issue.publication_year,
}
return meta_data
class AheadPressReleaseResource(ModelResource):
journal_uri = fields.ForeignKey(JournalResource, 'journal')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'apressreleases'
queryset = AheadPressRelease.objects.all()
allowed_methods = ['get', ]
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(AheadPressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = AheadPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = AheadPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
return orm_filters | scielomanager/api/resources_v1.py | import logging
from django.db.models import Q
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.authorization import Authorization
from tastypie.exceptions import BadRequest
from journalmanager.models import (
Journal,
UseLicense,
Sponsor,
Collection,
Issue,
Section,
RegularPressRelease,
AheadPressRelease,
PressReleaseTranslation,
PressReleaseArticle,
SubjectCategory,
)
from scielomanager.utils import usercontext
logger = logging.getLogger(__name__)
def current_user_active_collection():
return usercontext.get_finder().get_current_user_active_collection()
def current_user_collections():
return usercontext.get_finder().get_current_user_collections()
class ApiKeyAuthMeta:
authentication = ApiKeyAuthentication()
authorization = DjangoAuthorization()
class SectionResource(ModelResource):
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
issues = fields.OneToManyField('api.resources_v1.IssueResource',
'issue_set')
titles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
queryset = Section.objects.all()
resource_name = 'sections'
allowed_methods = ['get']
excludes = ['legacy_code']
filtering = {
"journal": ('exact'),
}
def dehydrate_titles(self, bundle):
return [(title.language.iso_code, title.title)
for title in bundle.obj.titles.all()]
class UseLicenseResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = UseLicense.objects.all()
resource_name = 'uselicenses'
allowed_methods = ['get', ]
class IssueResource(ModelResource):
"""
IMPORTANT: is_press_release was removed on V2
"""
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
sections = fields.ManyToManyField(SectionResource, 'section')
thematic_titles = fields.CharField(readonly=True)
is_press_release = fields.BooleanField(readonly=True)
suppl_volume = fields.CharField(attribute='volume', readonly=True)
suppl_number = fields.CharField(attribute='number', readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True, null=True)
class Meta(ApiKeyAuthMeta):
queryset = Issue.objects.all()
resource_name = 'issues'
allowed_methods = ['get', ]
filtering = {
"journal": ('exact'),
"is_marked_up": ('exact'),
"volume": ('exact'),
"number": ('exact'),
"publication_year": ('exact'),
"suppl_number": ('exact'),
"suppl_volume": ('exact')
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(IssueResource, self).build_filters(filters)
param_filters = {}
if 'collection' in filters:
param_filters['journal__collections__name_slug'] = filters['collection']
if 'eletronic_issn' in filters:
param_filters['journal__eletronic_issn'] = filters['eletronic_issn']
if 'print_issn' in filters:
param_filters['journal__print_issn'] = filters['print_issn']
if 'suppl_number' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = filters['suppl_number']
if 'suppl_volume' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = ''
param_filters['volume'] = filters['suppl_volume']
issues = Issue.objects.filter(**param_filters)
orm_filters['pk__in'] = issues
return orm_filters
def dehydrate_thematic_titles(self, bundle):
return dict([title.language.iso_code, title.title]
for title in bundle.obj.issuetitle_set.all())
def dehydrate_is_press_release(self, bundle):
return False
def dehydrate_suppl_volume(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.volume else ''
else:
return ''
def dehydrate_suppl_number(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.number else ''
else:
return ''
class CollectionResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Collection.objects.all()
resource_name = 'collections'
allowed_methods = ['get', ]
class SubjectCategoryResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = SubjectCategory.objects.all()
resource_name = 'subjectcategory'
allowed_methods = ['get', ]
class SponsorResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Sponsor.objects.all()
resource_name = 'sponsors'
allowed_methods = ['get', ]
class UserResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = User.objects.all()
resource_name = 'users'
allowed_methods = ['get', ]
excludes = [
'email',
'password',
'is_active',
'is_staff',
'is_superuser',
]
class JournalResource(ModelResource):
missions = fields.CharField(readonly=True)
other_titles = fields.CharField(readonly=True)
creator = fields.ForeignKey(UserResource, 'creator')
abstract_keyword_languages = fields.CharField(readonly=True)
languages = fields.CharField(readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True)
sponsors = fields.ManyToManyField(SponsorResource, 'sponsor')
collections = fields.ManyToManyField(CollectionResource, 'collections')
issues = fields.OneToManyField(IssueResource, 'issue_set')
sections = fields.OneToManyField(SectionResource, 'section_set')
subject_categories = fields.ManyToManyField(SubjectCategoryResource, 'subject_categories', readonly=True)
pub_status_history = fields.ListField(readonly=True)
contact = fields.DictField(readonly=True)
study_areas = fields.ListField(readonly=True)
pub_status = fields.CharField(readonly=True)
pub_status_reason = fields.CharField(readonly=True)
national_code = fields.CharField(attribute='ccn_code', readonly=True)
# recursive field
previous_title = fields.ForeignKey('self', 'previous_title', null=True)
succeeding_title = fields.ForeignKey('self', 'succeeding_title', null=True)
class Meta(ApiKeyAuthMeta):
queryset = Journal.objects.all().filter()
resource_name = 'journals'
allowed_methods = ['get', ]
filtering = {
'is_trashed': ('exact',),
'eletronic_issn': ('exact',),
'print_issn': ('exact',),
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(JournalResource, self).build_filters(filters)
if 'collection' in filters:
journals = Journal.objects.filter(
collections__name_slug=filters['collection'])
orm_filters['pk__in'] = journals
if 'pubstatus' in filters:
# keep the previous filtering
try:
j = orm_filters['pk__in']
except KeyError:
j = Journal.objects
statuses = filters.getlist('pubstatus')
journals = j.filter(
membership__status__in=statuses)
orm_filters['pk__in'] = journals
return orm_filters
def dehydrate_missions(self, bundle):
"""
IMPORTANT: Changed to dict on V2
missions: {
en: "To publish articles of clinical and experimental...",
es: "Publicar artÃculos de estudios clÃnicos y experim...",
pt: "Publicar artigos de estudos clÃnicos e experiment..."
},
"""
return [(mission.language.iso_code, mission.description)
for mission in bundle.obj.missions.all()]
def dehydrate_other_titles(self, bundle):
"""
IMPORTANT: Changed to dict on V2
other_titles: {
other: "Arquivos Brasileiros de Cirurgia Digestiva",
paralleltitle: "Brazilian Archives of Digestive Surgery"
},
"""
return [(title.category, title.title)
for title in bundle.obj.other_titles.all()]
def dehydrate_languages(self, bundle):
return [language.iso_code
for language in bundle.obj.languages.all()]
def dehydrate_subject_categories(self, bundle):
return [subject_category.term
for subject_category in bundle.obj.subject_categories.all()]
def dehydrate_pub_status_history(self, bundle):
return [{'date': event.since,
'status': event.status}
for event in bundle.obj.statuses.order_by('-since').all()]
def dehydrate_study_areas(self, bundle):
return [area.study_area
for area in bundle.obj.study_areas.all()]
def dehydrate_collections(self, bundle):
"""
Only works with v1, without multiple collections per journal.
IMPORTANT: This prepare function was removed from V2
"""
try:
return bundle.data['collections'][0]
except IndexError:
return ''
def dehydrate_pub_status(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'status')
def dehydrate_pub_status_reason(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'reason')
def dehydrate(self, bundle):
# garantia de compatibilidade
bundle.data.pop('ccn_code', False)
return bundle
class PressReleaseTranslationResource(ModelResource):
language = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'prtranslations'
queryset = PressReleaseTranslation.objects.all()
allowed_methods = ['get', ]
def dehydrate_language(self, bundle):
return bundle.obj.language.iso_code
class PressReleaseResource(ModelResource):
issue_uri = fields.ForeignKey(IssueResource, 'issue')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
issue_meta = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'pressreleases'
queryset = RegularPressRelease.objects.all()
allowed_methods = ['get', ]
ordering = ['id']
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(PressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = RegularPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = RegularPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
elif 'issue_pid' in filters:
preleases = RegularPressRelease.objects.by_issue_pid(
filters['issue_pid'])
orm_filters['pk__in'] = preleases
return orm_filters
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def dehydrate_issue_meta(self, bundle):
issue = bundle.obj.issue
meta_data = {
'scielo_pid': issue.scielo_pid,
'short_title': issue.journal.short_title,
'volume': issue.volume,
'number': issue.number,
'suppl_volume': issue.suppl_text if issue.type == 'supplement' and issue.volume else '',
'suppl_number': issue.suppl_text if issue.type == 'supplement' and issue.number else '',
'publication_start_month': issue.publication_start_month,
'publication_end_month': issue.publication_end_month,
'publication_city': issue.journal.publication_city,
'publication_year': issue.publication_year,
}
return meta_data
class AheadPressReleaseResource(ModelResource):
journal_uri = fields.ForeignKey(JournalResource, 'journal')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'apressreleases'
queryset = AheadPressRelease.objects.all()
allowed_methods = ['get', ]
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(AheadPressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = AheadPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = AheadPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
return orm_filters | 0.593727 | 0.107907 |
import torch
import torch.nn as nn
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor, Compose, Resize, InterpolationMode
from mighty.loss import TripletLossSampler
from mighty.trainer import TrainerEmbedding, TrainerGrad
from mighty.utils.common import set_seed
from mighty.utils.data import DataLoader
from mighty.utils.domain import MonitorLevel
from mighty.utils.var_online import MeanOnline, MeanOnlineLabels
from nn.kwta import *
from nn.utils import l0_sparsity, sample_bernoulli
from nn.trainer import TrainerIWTA
set_seed(0)
# N_x = 28 ** 2
# N_h = N_y = 1024
size = 28
N_x = N_h = N_y = size ** 2
s_w_xh = s_w_xy = s_w_hy = s_w_yy = s_w_hh = s_w_yh = 0.05
K_FIXED = int(0.05 * N_y)
class TrainerIWTAMnist(TrainerIWTA):
N_CHOOSE = 10
LEARNING_RATE = 0.01
def _init_online_measures(self):
online = super()._init_online_measures()
online['sparsity-h'] = MeanOnline()
online['clusters-h'] = MeanOnlineLabels()
return online
def _epoch_finished(self, loss):
self.monitor.param_records.plot_sign_flips(self.monitor.viz)
self.monitor.update_contribution(self.model.weight_contribution())
self.monitor.update_kwta_thresholds(self.model.kwta_thresholds())
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
self.monitor.update_sparsity(self.online['sparsity'].get_mean().item(), mode='y')
self.monitor.update_sparsity(self.online['sparsity-h'].get_mean().item(), mode='h')
self.monitor.clusters_heatmap(self.online['clusters'].get_mean(), title="Embeddings 'y'")
self.monitor.clusters_heatmap(self.online['clusters-h'].get_mean(), title="Embeddings 'h'")
TrainerGrad._epoch_finished(self, loss)
def train_batch(self, batch):
def centroids(tensor):
return torch.stack([tensor[labels == l].mean(dim=0)
for l in labels.unique()])
x, labels = batch
h, y = self.model(x)
self.update_contribution(h, y)
loss = self._get_loss(batch, (h, y))
self.model.update_weights(x, h, y, n_choose=self.N_CHOOSE,
lr=self.LEARNING_RATE)
if self.timer.epoch == 0:
self.monitor.viz.images(x[:10], nrow=5, win="samples", opts=dict(
width=500,
))
self.online['clusters'].update(y, labels)
self.online['clusters-h'].update(h, labels)
self.online['sparsity'].update(torch.Tensor([l0_sparsity(y)]))
self.online['sparsity-h'].update(torch.Tensor([l0_sparsity(h)]))
if self.timer.batch_id > 0:
self.monitor.param_records.plot_sign_flips(self.monitor.viz)
self.monitor.update_weight_histogram()
self.monitor.update_contribution(self.model.weight_contribution())
self.monitor.update_kwta_thresholds(self.model.kwta_thresholds())
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
self.monitor.update_sparsity(self.online['sparsity'].get_mean().item(), mode='y')
self.monitor.update_sparsity(self.online['sparsity-h'].get_mean().item(), mode='h')
self.monitor.clusters_heatmap(centroids(y), title="Embeddings 'y'")
self.monitor.clusters_heatmap(centroids(h), title="Embeddings 'h'")
return loss
def training_started(self):
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
def _on_forward_pass_batch(self, batch, output, train):
h, y = output
input, labels = batch
if train:
self.online['sparsity'].update(torch.Tensor([l0_sparsity(y)]))
self.online['sparsity-h'].update(torch.Tensor([l0_sparsity(h)]))
self.online['clusters'].update(y, labels)
self.online['clusters-h'].update(h, labels)
TrainerGrad._on_forward_pass_batch(self, batch, y, train)
Permanence = PermanenceVaryingSparsity
w_xy = Permanence(sample_bernoulli((N_x, N_y), p=s_w_xy), excitatory=True, learn=True)
w_xh = Permanence(sample_bernoulli((N_x, N_h), p=s_w_xh), excitatory=True, learn=True)
w_hy = Permanence(sample_bernoulli((N_h, N_y), p=s_w_hy), excitatory=False, learn=True)
w_hh = Permanence(sample_bernoulli((N_h, N_h), p=s_w_hy), excitatory=False, learn=True)
w_yh = Permanence(sample_bernoulli((N_y, N_h), p=s_w_yh), excitatory=True, learn=True)
w_yy = None
class BinarizeMnist(nn.Module):
def forward(self, tensor: torch.Tensor):
bern = torch.distributions.bernoulli.Bernoulli(0.05)
noise = bern.sample(tensor.shape).bool()
tensor = tensor > 0
tensor ^= noise
return tensor.float()
transform = [ToTensor(), BinarizeMnist()]
if size != 28:
transform.insert(0, Resize(size, interpolation=InterpolationMode.NEAREST))
data_loader = DataLoader(MNIST, transform=Compose(transform), batch_size=256)
criterion = TripletLossSampler(nn.TripletMarginLoss())
iwta = IterativeWTA(w_xy=w_xy, w_xh=w_xh, w_hy=w_hy, w_hh=w_hh, w_yy=w_yy, w_yh=w_yh)
# iwta = KWTANet(w_xy=w_xy, w_xh=w_xh, w_hy=w_hy, kh=K_FIXED, ky=K_FIXED)
print(iwta)
trainer = TrainerIWTAMnist(model=iwta, criterion=criterion,
data_loader=data_loader, verbosity=2)
trainer.monitor.advanced_monitoring(
level=MonitorLevel.SIGN_FLIPS | MonitorLevel.WEIGHT_HISTOGRAM)
trainer.train(n_epochs=10) | nn/mnist.py | import torch
import torch.nn as nn
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor, Compose, Resize, InterpolationMode
from mighty.loss import TripletLossSampler
from mighty.trainer import TrainerEmbedding, TrainerGrad
from mighty.utils.common import set_seed
from mighty.utils.data import DataLoader
from mighty.utils.domain import MonitorLevel
from mighty.utils.var_online import MeanOnline, MeanOnlineLabels
from nn.kwta import *
from nn.utils import l0_sparsity, sample_bernoulli
from nn.trainer import TrainerIWTA
set_seed(0)
# N_x = 28 ** 2
# N_h = N_y = 1024
size = 28
N_x = N_h = N_y = size ** 2
s_w_xh = s_w_xy = s_w_hy = s_w_yy = s_w_hh = s_w_yh = 0.05
K_FIXED = int(0.05 * N_y)
class TrainerIWTAMnist(TrainerIWTA):
N_CHOOSE = 10
LEARNING_RATE = 0.01
def _init_online_measures(self):
online = super()._init_online_measures()
online['sparsity-h'] = MeanOnline()
online['clusters-h'] = MeanOnlineLabels()
return online
def _epoch_finished(self, loss):
self.monitor.param_records.plot_sign_flips(self.monitor.viz)
self.monitor.update_contribution(self.model.weight_contribution())
self.monitor.update_kwta_thresholds(self.model.kwta_thresholds())
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
self.monitor.update_sparsity(self.online['sparsity'].get_mean().item(), mode='y')
self.monitor.update_sparsity(self.online['sparsity-h'].get_mean().item(), mode='h')
self.monitor.clusters_heatmap(self.online['clusters'].get_mean(), title="Embeddings 'y'")
self.monitor.clusters_heatmap(self.online['clusters-h'].get_mean(), title="Embeddings 'h'")
TrainerGrad._epoch_finished(self, loss)
def train_batch(self, batch):
def centroids(tensor):
return torch.stack([tensor[labels == l].mean(dim=0)
for l in labels.unique()])
x, labels = batch
h, y = self.model(x)
self.update_contribution(h, y)
loss = self._get_loss(batch, (h, y))
self.model.update_weights(x, h, y, n_choose=self.N_CHOOSE,
lr=self.LEARNING_RATE)
if self.timer.epoch == 0:
self.monitor.viz.images(x[:10], nrow=5, win="samples", opts=dict(
width=500,
))
self.online['clusters'].update(y, labels)
self.online['clusters-h'].update(h, labels)
self.online['sparsity'].update(torch.Tensor([l0_sparsity(y)]))
self.online['sparsity-h'].update(torch.Tensor([l0_sparsity(h)]))
if self.timer.batch_id > 0:
self.monitor.param_records.plot_sign_flips(self.monitor.viz)
self.monitor.update_weight_histogram()
self.monitor.update_contribution(self.model.weight_contribution())
self.monitor.update_kwta_thresholds(self.model.kwta_thresholds())
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
self.monitor.update_sparsity(self.online['sparsity'].get_mean().item(), mode='y')
self.monitor.update_sparsity(self.online['sparsity-h'].get_mean().item(), mode='h')
self.monitor.clusters_heatmap(centroids(y), title="Embeddings 'y'")
self.monitor.clusters_heatmap(centroids(h), title="Embeddings 'h'")
return loss
def training_started(self):
self.monitor.update_weight_sparsity(self.model.weight_sparsity())
self.monitor.update_s_w(self.model.s_w())
def _on_forward_pass_batch(self, batch, output, train):
h, y = output
input, labels = batch
if train:
self.online['sparsity'].update(torch.Tensor([l0_sparsity(y)]))
self.online['sparsity-h'].update(torch.Tensor([l0_sparsity(h)]))
self.online['clusters'].update(y, labels)
self.online['clusters-h'].update(h, labels)
TrainerGrad._on_forward_pass_batch(self, batch, y, train)
Permanence = PermanenceVaryingSparsity
w_xy = Permanence(sample_bernoulli((N_x, N_y), p=s_w_xy), excitatory=True, learn=True)
w_xh = Permanence(sample_bernoulli((N_x, N_h), p=s_w_xh), excitatory=True, learn=True)
w_hy = Permanence(sample_bernoulli((N_h, N_y), p=s_w_hy), excitatory=False, learn=True)
w_hh = Permanence(sample_bernoulli((N_h, N_h), p=s_w_hy), excitatory=False, learn=True)
w_yh = Permanence(sample_bernoulli((N_y, N_h), p=s_w_yh), excitatory=True, learn=True)
w_yy = None
class BinarizeMnist(nn.Module):
def forward(self, tensor: torch.Tensor):
bern = torch.distributions.bernoulli.Bernoulli(0.05)
noise = bern.sample(tensor.shape).bool()
tensor = tensor > 0
tensor ^= noise
return tensor.float()
transform = [ToTensor(), BinarizeMnist()]
if size != 28:
transform.insert(0, Resize(size, interpolation=InterpolationMode.NEAREST))
data_loader = DataLoader(MNIST, transform=Compose(transform), batch_size=256)
criterion = TripletLossSampler(nn.TripletMarginLoss())
iwta = IterativeWTA(w_xy=w_xy, w_xh=w_xh, w_hy=w_hy, w_hh=w_hh, w_yy=w_yy, w_yh=w_yh)
# iwta = KWTANet(w_xy=w_xy, w_xh=w_xh, w_hy=w_hy, kh=K_FIXED, ky=K_FIXED)
print(iwta)
trainer = TrainerIWTAMnist(model=iwta, criterion=criterion,
data_loader=data_loader, verbosity=2)
trainer.monitor.advanced_monitoring(
level=MonitorLevel.SIGN_FLIPS | MonitorLevel.WEIGHT_HISTOGRAM)
trainer.train(n_epochs=10) | 0.857515 | 0.435361 |
import numpy as np
import cirq
def test_inconclusive():
class No:
pass
assert not cirq.has_unitary(object())
assert not cirq.has_unitary('boo')
assert not cirq.has_unitary(No())
def test_via_unitary():
class No1:
def _unitary_(self):
return NotImplemented
class No2:
def _unitary_(self):
return None
class Yes:
def _unitary_(self):
return np.array([[1]])
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_via_apply_unitary():
class No1(EmptyOp):
def _apply_unitary_(self, args):
return None
class No2(EmptyOp):
def _apply_unitary_(self, args):
return NotImplemented
class No3(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return NotImplemented
class No4: # A non-operation non-gate.
def _apply_unitary_(self, args):
assert False # Because has_unitary doesn't understand how to call.
class Yes1(EmptyOp):
def _apply_unitary_(self, args):
return args.target_tensor
class Yes2(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return args.target_tensor
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
assert not cirq.has_unitary(No4())
def test_via_decompose():
class Yes1:
def _decompose_(self):
return []
class Yes2:
def _decompose_(self):
return [cirq.X(cirq.LineQubit(0))]
class No1:
def _decompose_(self):
return [cirq.depolarize(0.5).on(cirq.LineQubit(0))]
class No2:
def _decompose_(self):
return None
class No3:
def _decompose_(self):
return NotImplemented
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
def test_via_has_unitary():
class No1:
def _has_unitary_(self):
return NotImplemented
class No2:
def _has_unitary_(self):
return False
class Yes:
def _has_unitary_(self):
return True
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_order():
class Yes1(EmptyOp):
def _has_unitary_(self):
return True
def _decompose_(self):
assert False
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes2(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return []
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes3(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return args.target_tensor
def _unitary_(self):
assert False
class Yes4(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return NotImplemented
def _unitary_(self):
return np.array([[1]])
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert cirq.has_unitary(Yes3())
assert cirq.has_unitary(Yes4())
class EmptyOp(cirq.Operation):
"""A trivial operation that will be recognized as `_apply_unitary_`-able."""
@property
def qubits(self):
# coverage: ignore
return ()
def with_qubits(self, *new_qubits):
# coverage: ignore
return self | cirq/protocols/has_unitary_protocol_test.py |
import numpy as np
import cirq
def test_inconclusive():
class No:
pass
assert not cirq.has_unitary(object())
assert not cirq.has_unitary('boo')
assert not cirq.has_unitary(No())
def test_via_unitary():
class No1:
def _unitary_(self):
return NotImplemented
class No2:
def _unitary_(self):
return None
class Yes:
def _unitary_(self):
return np.array([[1]])
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_via_apply_unitary():
class No1(EmptyOp):
def _apply_unitary_(self, args):
return None
class No2(EmptyOp):
def _apply_unitary_(self, args):
return NotImplemented
class No3(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return NotImplemented
class No4: # A non-operation non-gate.
def _apply_unitary_(self, args):
assert False # Because has_unitary doesn't understand how to call.
class Yes1(EmptyOp):
def _apply_unitary_(self, args):
return args.target_tensor
class Yes2(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return args.target_tensor
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
assert not cirq.has_unitary(No4())
def test_via_decompose():
class Yes1:
def _decompose_(self):
return []
class Yes2:
def _decompose_(self):
return [cirq.X(cirq.LineQubit(0))]
class No1:
def _decompose_(self):
return [cirq.depolarize(0.5).on(cirq.LineQubit(0))]
class No2:
def _decompose_(self):
return None
class No3:
def _decompose_(self):
return NotImplemented
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
def test_via_has_unitary():
class No1:
def _has_unitary_(self):
return NotImplemented
class No2:
def _has_unitary_(self):
return False
class Yes:
def _has_unitary_(self):
return True
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_order():
class Yes1(EmptyOp):
def _has_unitary_(self):
return True
def _decompose_(self):
assert False
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes2(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return []
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes3(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return args.target_tensor
def _unitary_(self):
assert False
class Yes4(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return NotImplemented
def _unitary_(self):
return np.array([[1]])
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert cirq.has_unitary(Yes3())
assert cirq.has_unitary(Yes4())
class EmptyOp(cirq.Operation):
"""A trivial operation that will be recognized as `_apply_unitary_`-able."""
@property
def qubits(self):
# coverage: ignore
return ()
def with_qubits(self, *new_qubits):
# coverage: ignore
return self | 0.796609 | 0.709768 |
import warnings
import time
import sys
import os
import requests
from dateutil import parser
import datetime, time
from ....core.BaseAgent3 import BaseAgent
import json
class snowAgent(BaseAgent):
warnings.filterwarnings('ignore')
@BaseAgent.timed
def process(self):
self.baseLogger.info('Inside process')
try:
self.BaseUrl = self.config.get("baseUrl", '')
self.CR_sysid_url = self.config.get("CR_sysid_url", '')
self.CR_Url = self.config.get("CR_Url", '')
self.CR_Approval_Url = self.config.get("CR_Approval_Url", '')
self.IN_sysid_url = self.config.get("IN_sysid_url", '')
self.IN_Url = self.config.get("IN_Url", '')
self.IN_Approval_Url = self.config.get("IN_Approval_Url", '')
self.username = self.getCredential("userid")
self.password = self.getCredential("<PASSWORD>")
self.tracking_time={}
self.CR_sys_id=[]
self.IN_sys_id=[]
self.response=[]
self.start_time = self.tracking.get("start_time", '')
if self.start_time == '':
self.start_time = self.config.get("startFrom", '')
self.end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
self.tracking_time["start_time"]=self.end_time
self.printdata()
except Exception as e:
self.baseLogger.error(e)
def printdata(self):
try:
CR_sys_url = self.BaseUrl + self.CR_sysid_url + '%27' + self.start_time + '%27)%40javascript%3Ags.dateGenerate(%27' + self.end_time + '%27)'
IN_sys_url = self.BaseUrl + self.IN_sysid_url + '%27' + self.start_time + '%27)%40javascript%3Ags.dateGenerate(%27' + self.end_time + '%27)'
json_headers = {"Content-Type":"application/json","Accept":"application/json"}
CR_sys_response = requests.get(CR_sys_url, auth=(self.username, self.password), headers=json_headers )
IN_sys_response = requests.get(IN_sys_url, auth=(self.username, self.password), headers=json_headers )
CR_sys_data = CR_sys_response.json()
IN_sys_data = IN_sys_response.json()
for k in CR_sys_data['result']:
CR_sysid=k['sys_id']
self.CR_sys_id.append(CR_sysid)
for k in IN_sys_data['result']:
IN_sysid=k['sys_id']
self.IN_sys_id.append(IN_sysid)
if self.CR_sys_id and self.IN_sys_id != []:
for i in self.CR_sys_id:
CR_response_data={}
CR_approval_data={}
CR_url = self.BaseUrl + self.CR_Url + i
CR_response = requests.get(CR_url, auth=(self.username, self.password), headers=json_headers )
CR_data = CR_response.json()
CR_response_data["data"]=CR_data['result'][0]
CR_approval_url = self.BaseUrl + self.CR_Approval_Url + i
CR_approval_response = requests.get(CR_approval_url, auth=(self.username, self.password), headers=json_headers )
CR_app_data = CR_approval_response.json()
CR_approval_data["approval_details"]=CR_app_data['result']
CR_response_data["data"].update(CR_approval_data)
CR_response_data["data"]["Ticket_type"]="Change Ticket"
self.response.append(CR_response_data)
for i in self.IN_sys_id:
IN_response_data={}
IN_approval_data={}
IN_url = self.BaseUrl + self.IN_Url + i
IN_response = requests.get(IN_url, auth=(self.username, self.password), headers=json_headers )
IN_data = IN_response.json()
IN_response_data["data"]=IN_data['result'][0]
IN_approval_url = self.BaseUrl + self.IN_Approval_Url + i
IN_approval_response = requests.get(IN_approval_url, auth=(self.username, self.password), headers=json_headers )
IN_app_data = IN_approval_response.json()
IN_approval_data["approval_details"]=IN_app_data['result']
IN_response_data["data"].update(IN_approval_data)
IN_response_data["data"]["Ticket_type"]="Incident Ticket"
self.response.append(IN_response_data)
self.updateTrackingJson(self.tracking_time)
except Exception as e:
self.baseLogger.error(e)
if __name__ == "__main__":
snowAgent() | PlatformAgents/com/cognizant/devops/platformagents/agents/itsm/snow/snowAgent3.py | import warnings
import time
import sys
import os
import requests
from dateutil import parser
import datetime, time
from ....core.BaseAgent3 import BaseAgent
import json
class snowAgent(BaseAgent):
warnings.filterwarnings('ignore')
@BaseAgent.timed
def process(self):
self.baseLogger.info('Inside process')
try:
self.BaseUrl = self.config.get("baseUrl", '')
self.CR_sysid_url = self.config.get("CR_sysid_url", '')
self.CR_Url = self.config.get("CR_Url", '')
self.CR_Approval_Url = self.config.get("CR_Approval_Url", '')
self.IN_sysid_url = self.config.get("IN_sysid_url", '')
self.IN_Url = self.config.get("IN_Url", '')
self.IN_Approval_Url = self.config.get("IN_Approval_Url", '')
self.username = self.getCredential("userid")
self.password = self.getCredential("<PASSWORD>")
self.tracking_time={}
self.CR_sys_id=[]
self.IN_sys_id=[]
self.response=[]
self.start_time = self.tracking.get("start_time", '')
if self.start_time == '':
self.start_time = self.config.get("startFrom", '')
self.end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
self.tracking_time["start_time"]=self.end_time
self.printdata()
except Exception as e:
self.baseLogger.error(e)
def printdata(self):
try:
CR_sys_url = self.BaseUrl + self.CR_sysid_url + '%27' + self.start_time + '%27)%40javascript%3Ags.dateGenerate(%27' + self.end_time + '%27)'
IN_sys_url = self.BaseUrl + self.IN_sysid_url + '%27' + self.start_time + '%27)%40javascript%3Ags.dateGenerate(%27' + self.end_time + '%27)'
json_headers = {"Content-Type":"application/json","Accept":"application/json"}
CR_sys_response = requests.get(CR_sys_url, auth=(self.username, self.password), headers=json_headers )
IN_sys_response = requests.get(IN_sys_url, auth=(self.username, self.password), headers=json_headers )
CR_sys_data = CR_sys_response.json()
IN_sys_data = IN_sys_response.json()
for k in CR_sys_data['result']:
CR_sysid=k['sys_id']
self.CR_sys_id.append(CR_sysid)
for k in IN_sys_data['result']:
IN_sysid=k['sys_id']
self.IN_sys_id.append(IN_sysid)
if self.CR_sys_id and self.IN_sys_id != []:
for i in self.CR_sys_id:
CR_response_data={}
CR_approval_data={}
CR_url = self.BaseUrl + self.CR_Url + i
CR_response = requests.get(CR_url, auth=(self.username, self.password), headers=json_headers )
CR_data = CR_response.json()
CR_response_data["data"]=CR_data['result'][0]
CR_approval_url = self.BaseUrl + self.CR_Approval_Url + i
CR_approval_response = requests.get(CR_approval_url, auth=(self.username, self.password), headers=json_headers )
CR_app_data = CR_approval_response.json()
CR_approval_data["approval_details"]=CR_app_data['result']
CR_response_data["data"].update(CR_approval_data)
CR_response_data["data"]["Ticket_type"]="Change Ticket"
self.response.append(CR_response_data)
for i in self.IN_sys_id:
IN_response_data={}
IN_approval_data={}
IN_url = self.BaseUrl + self.IN_Url + i
IN_response = requests.get(IN_url, auth=(self.username, self.password), headers=json_headers )
IN_data = IN_response.json()
IN_response_data["data"]=IN_data['result'][0]
IN_approval_url = self.BaseUrl + self.IN_Approval_Url + i
IN_approval_response = requests.get(IN_approval_url, auth=(self.username, self.password), headers=json_headers )
IN_app_data = IN_approval_response.json()
IN_approval_data["approval_details"]=IN_app_data['result']
IN_response_data["data"].update(IN_approval_data)
IN_response_data["data"]["Ticket_type"]="Incident Ticket"
self.response.append(IN_response_data)
self.updateTrackingJson(self.tracking_time)
except Exception as e:
self.baseLogger.error(e)
if __name__ == "__main__":
snowAgent() | 0.082913 | 0.046249 |
import torch
from rlkit.torch.vpg.vpg import VPGTrainer
from rlkit.torch.optimizers import OptimizerWrapper
class PPOTrainer(VPGTrainer):
"""Proximal Policy Optimization (PPO).
Args:
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_episode_length (int): Maximum length of a single rollout.
lr_clip_range (float): The limit on the likelihood ratio between
policies.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(self,
policy,
value_function,
policy_lr=2.5e-4,
vf_lr=2.5e-4,
policy_optimizer=None,
vf_optimizer=None,
lr_clip_range=2e-1,
discount=0.99,
gae_lambda=0.97,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
**kwargs):
if policy_optimizer is None:
policy_optimizer = OptimizerWrapper(
torch.optim.Adam,
dict(lr=policy_lr),
policy,
max_optimization_epochs=10,
minibatch_size=64)
if vf_optimizer is None:
vf_optimizer = OptimizerWrapper(
torch.optim.Adam,
dict(lr=vf_lr),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
super().__init__(policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
**kwargs)
self._lr_clip_range = lr_clip_range
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
# Compute constraint
with torch.no_grad():
old_ll = self._old_policy.log_prob(obs, actions)
new_ll = self.policy.log_prob(obs, actions)
likelihood_ratio = (new_ll - old_ll).exp()
# Calculate surrogate
surrogate = likelihood_ratio * advantages
# Clipping the constraint
likelihood_ratio_clip = torch.clamp(likelihood_ratio,
min=1 - self._lr_clip_range,
max=1 + self._lr_clip_range)
# Calculate surrotate clip
surrogate_clip = likelihood_ratio_clip * advantages
return torch.min(surrogate, surrogate_clip) | rlkit/torch/vpg/ppo.py | import torch
from rlkit.torch.vpg.vpg import VPGTrainer
from rlkit.torch.optimizers import OptimizerWrapper
class PPOTrainer(VPGTrainer):
"""Proximal Policy Optimization (PPO).
Args:
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_episode_length (int): Maximum length of a single rollout.
lr_clip_range (float): The limit on the likelihood ratio between
policies.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(self,
policy,
value_function,
policy_lr=2.5e-4,
vf_lr=2.5e-4,
policy_optimizer=None,
vf_optimizer=None,
lr_clip_range=2e-1,
discount=0.99,
gae_lambda=0.97,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
**kwargs):
if policy_optimizer is None:
policy_optimizer = OptimizerWrapper(
torch.optim.Adam,
dict(lr=policy_lr),
policy,
max_optimization_epochs=10,
minibatch_size=64)
if vf_optimizer is None:
vf_optimizer = OptimizerWrapper(
torch.optim.Adam,
dict(lr=vf_lr),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
super().__init__(policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
**kwargs)
self._lr_clip_range = lr_clip_range
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
# Compute constraint
with torch.no_grad():
old_ll = self._old_policy.log_prob(obs, actions)
new_ll = self.policy.log_prob(obs, actions)
likelihood_ratio = (new_ll - old_ll).exp()
# Calculate surrogate
surrogate = likelihood_ratio * advantages
# Clipping the constraint
likelihood_ratio_clip = torch.clamp(likelihood_ratio,
min=1 - self._lr_clip_range,
max=1 + self._lr_clip_range)
# Calculate surrotate clip
surrogate_clip = likelihood_ratio_clip * advantages
return torch.min(surrogate, surrogate_clip) | 0.915606 | 0.446977 |
import re
from datetime import datetime
from typing import Optional, Tuple
from email_validator import EmailNotValidError, validate_email
from authx.backend.base import Base
class UsersCRUDMixin(Base):
"""User CRUD MIXIN"""
async def get(self, id: int) -> Optional[dict]:
"""
Get a user by id.
"""
return await self._database.get(id)
async def get_by_email(self, email: str) -> Optional[dict]:
"""
Get a user by email.
"""
return await self._database.get_by_email(email)
async def get_by_username(self, username: str) -> Optional[dict]:
"""
Get a user by username.
"""
return await self._database.get_by_username(username)
async def get_by_social(self, provider: str, sid: str) -> Optional[dict]:
"""
Get a user by social id.
"""
return await self._database.get_by_social(provider, sid) # pragma: no cover
async def get_by_login(self, login: str) -> Optional[dict]:
"""
Get a user by login.
"""
try:
valid_email = validate_email(login).email
return await self.get_by_email(valid_email)
except EmailNotValidError:
return await self.get_by_username(login)
async def create(self, obj: dict) -> int:
"""
Create a user.
"""
return await self._database.create(obj) # pragma: no cover
async def update(self, id: int, obj: dict) -> None:
"""
Update a user.
"""
await self._database.update(id, obj)
return None
async def delete(self, id: int) -> None:
"""
Delete a user.
"""
await self._database.delete(id) # pragma: no cover
return None # pragma: no cover
async def update_last_login(self, id: int) -> None:
"""
Update the last login of a user.
"""
await self.update(id, {"last_login": datetime.utcnow()}) # pragma: no cover
async def search(
self, id: int, username: str, p: int, size: int
) -> Tuple[dict, int]:
"""
Search for users.
"""
if id is not None: # pragma: no cover
f = {"id": id} # pragma: no cover
elif username is not None and username.strip() != "": # pragma: no cover
f = {"username": re.compile(username, re.IGNORECASE)} # pragma: no cover
else: # pragma: no cover
f = {} # pragma: no cover
return await self._database.search(f, p, size) # pragma: no cover | authx/backend/api/crud.py | import re
from datetime import datetime
from typing import Optional, Tuple
from email_validator import EmailNotValidError, validate_email
from authx.backend.base import Base
class UsersCRUDMixin(Base):
"""User CRUD MIXIN"""
async def get(self, id: int) -> Optional[dict]:
"""
Get a user by id.
"""
return await self._database.get(id)
async def get_by_email(self, email: str) -> Optional[dict]:
"""
Get a user by email.
"""
return await self._database.get_by_email(email)
async def get_by_username(self, username: str) -> Optional[dict]:
"""
Get a user by username.
"""
return await self._database.get_by_username(username)
async def get_by_social(self, provider: str, sid: str) -> Optional[dict]:
"""
Get a user by social id.
"""
return await self._database.get_by_social(provider, sid) # pragma: no cover
async def get_by_login(self, login: str) -> Optional[dict]:
"""
Get a user by login.
"""
try:
valid_email = validate_email(login).email
return await self.get_by_email(valid_email)
except EmailNotValidError:
return await self.get_by_username(login)
async def create(self, obj: dict) -> int:
"""
Create a user.
"""
return await self._database.create(obj) # pragma: no cover
async def update(self, id: int, obj: dict) -> None:
"""
Update a user.
"""
await self._database.update(id, obj)
return None
async def delete(self, id: int) -> None:
"""
Delete a user.
"""
await self._database.delete(id) # pragma: no cover
return None # pragma: no cover
async def update_last_login(self, id: int) -> None:
"""
Update the last login of a user.
"""
await self.update(id, {"last_login": datetime.utcnow()}) # pragma: no cover
async def search(
self, id: int, username: str, p: int, size: int
) -> Tuple[dict, int]:
"""
Search for users.
"""
if id is not None: # pragma: no cover
f = {"id": id} # pragma: no cover
elif username is not None and username.strip() != "": # pragma: no cover
f = {"username": re.compile(username, re.IGNORECASE)} # pragma: no cover
else: # pragma: no cover
f = {} # pragma: no cover
return await self._database.search(f, p, size) # pragma: no cover | 0.669745 | 0.124532 |
from __future__ import (absolute_import, print_function, unicode_literals,
with_statement)
import random
import zlib
from wolframclient.serializers.wxfencoder.streaming import (
ExactSizeReader, ZipCompressedReader, ZipCompressedWriter)
from wolframclient.utils import six
from wolframclient.utils.tests import TestCase as BaseTestCase
if six.PY2:
def _bytes(value):
return chr(value)
else:
def _bytes(value):
return bytes((value, ))
class TestCase(BaseTestCase):
def test_compress(self):
stream = six.BytesIO()
with ZipCompressedWriter(stream) as z_writer:
z_writer.write(b'abc')
zipped = b"x\x9cKLJ\x06\x00\x02M\x01'"
self.assertSequenceEqual(stream.getvalue(), zipped)
def test_multi_write_compress(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
stream = six.BytesIO()
with ZipCompressedWriter(stream) as z_writer:
for i in byte_list:
z_writer.write(_bytes(i))
zipped = zlib.compress(data)
self.assertSequenceEqual(stream.getvalue(), zipped)
def test_uncompress(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
zipped = zlib.compress(data)
total = len(zipped)
num_of_chunk = 20
chunk_size = total // num_of_chunk
in_buffer = six.BytesIO(zipped)
reader = ZipCompressedReader(in_buffer)
buff = six.BytesIO()
for i in range(num_of_chunk):
buff.write(reader.read(chunk_size))
self.assertEqual(buff.getvalue(), data[:(i + 1) * chunk_size])
buff.write(reader.read())
self.assertEqual(buff.getvalue(), data)
self.assertEqual(reader.read(), b'')
def test_uncompress_exact_len(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
zipped = zlib.compress(data)
total = len(zipped)
num_of_chunk = 20
chunk_size = total // num_of_chunk
in_buffer = six.BytesIO(zipped)
reader = ExactSizeReader(ZipCompressedReader(in_buffer))
buff = six.BytesIO()
for i in range(num_of_chunk):
buff.write(reader.read(chunk_size))
self.assertEqual(buff.getvalue(), data[:(i + 1) * chunk_size])
buff.write(reader.read())
self.assertEqual(buff.getvalue(), data)
def test_uncompress_exact_len_err(self):
data = six.binary_type(bytearray(range(100)))
zipped = zlib.compress(data)
total = len(zipped)
reader = ExactSizeReader(ZipCompressedReader(six.BytesIO(zipped)))
with self.assertRaises(EOFError):
reader.read(size=total + 1) | wolframclient/tests/serializers/wxf_compress.py |
from __future__ import (absolute_import, print_function, unicode_literals,
with_statement)
import random
import zlib
from wolframclient.serializers.wxfencoder.streaming import (
ExactSizeReader, ZipCompressedReader, ZipCompressedWriter)
from wolframclient.utils import six
from wolframclient.utils.tests import TestCase as BaseTestCase
if six.PY2:
def _bytes(value):
return chr(value)
else:
def _bytes(value):
return bytes((value, ))
class TestCase(BaseTestCase):
def test_compress(self):
stream = six.BytesIO()
with ZipCompressedWriter(stream) as z_writer:
z_writer.write(b'abc')
zipped = b"x\x9cKLJ\x06\x00\x02M\x01'"
self.assertSequenceEqual(stream.getvalue(), zipped)
def test_multi_write_compress(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
stream = six.BytesIO()
with ZipCompressedWriter(stream) as z_writer:
for i in byte_list:
z_writer.write(_bytes(i))
zipped = zlib.compress(data)
self.assertSequenceEqual(stream.getvalue(), zipped)
def test_uncompress(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
zipped = zlib.compress(data)
total = len(zipped)
num_of_chunk = 20
chunk_size = total // num_of_chunk
in_buffer = six.BytesIO(zipped)
reader = ZipCompressedReader(in_buffer)
buff = six.BytesIO()
for i in range(num_of_chunk):
buff.write(reader.read(chunk_size))
self.assertEqual(buff.getvalue(), data[:(i + 1) * chunk_size])
buff.write(reader.read())
self.assertEqual(buff.getvalue(), data)
self.assertEqual(reader.read(), b'')
def test_uncompress_exact_len(self):
byte_list = [random.randint(0, 255) for i in range(10000)]
data = six.binary_type(bytearray(byte_list))
zipped = zlib.compress(data)
total = len(zipped)
num_of_chunk = 20
chunk_size = total // num_of_chunk
in_buffer = six.BytesIO(zipped)
reader = ExactSizeReader(ZipCompressedReader(in_buffer))
buff = six.BytesIO()
for i in range(num_of_chunk):
buff.write(reader.read(chunk_size))
self.assertEqual(buff.getvalue(), data[:(i + 1) * chunk_size])
buff.write(reader.read())
self.assertEqual(buff.getvalue(), data)
def test_uncompress_exact_len_err(self):
data = six.binary_type(bytearray(range(100)))
zipped = zlib.compress(data)
total = len(zipped)
reader = ExactSizeReader(ZipCompressedReader(six.BytesIO(zipped)))
with self.assertRaises(EOFError):
reader.read(size=total + 1) | 0.579162 | 0.32826 |
from pix2pix import pix2pix
import argparse
from utils import *
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of jh_GAN"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='train or test ?')
parser.add_argument('--dataset', type=str, default='meet', help='dataset_name')
parser.add_argument('--epoch', type=int, default=200, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch per gpu')
parser.add_argument('--print_freq', type=int, default=100, help='The number of image_print_freq')
parser.add_argument('--lr', type=float, default=0.0002, help='The learning rate')
parser.add_argument('--L1_weight', type=float, default=10.0, help='The L1 lambda')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--repeat', type=int, default=9, help='img size : 256 -> 9, 128 -> 6')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--gray_to_RGB', type=bool, default=False, help='Gray -> RGB')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples',
help='Directory name to save the samples on training')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
check_folder(args.checkpoint_dir)
# --result_dir
check_folder(args.result_dir)
# --result_dir
check_folder(args.log_dir)
# --sample_dir
check_folder(args.sample_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
# open session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = pix2pix(sess, args)
# build graph
gan.build_model()
# show network architecture
show_all_variables()
if args.phase == 'train' :
# launch the graph in a session
gan.train()
print(" [*] Training finished!")
if args.phase == 'test' :
gan.test()
print(" [*] Test finished!")
if __name__ == '__main__':
main() | main.py | from pix2pix import pix2pix
import argparse
from utils import *
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of jh_GAN"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='train or test ?')
parser.add_argument('--dataset', type=str, default='meet', help='dataset_name')
parser.add_argument('--epoch', type=int, default=200, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch per gpu')
parser.add_argument('--print_freq', type=int, default=100, help='The number of image_print_freq')
parser.add_argument('--lr', type=float, default=0.0002, help='The learning rate')
parser.add_argument('--L1_weight', type=float, default=10.0, help='The L1 lambda')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--repeat', type=int, default=9, help='img size : 256 -> 9, 128 -> 6')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--gray_to_RGB', type=bool, default=False, help='Gray -> RGB')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples',
help='Directory name to save the samples on training')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
check_folder(args.checkpoint_dir)
# --result_dir
check_folder(args.result_dir)
# --result_dir
check_folder(args.log_dir)
# --sample_dir
check_folder(args.sample_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
# open session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = pix2pix(sess, args)
# build graph
gan.build_model()
# show network architecture
show_all_variables()
if args.phase == 'train' :
# launch the graph in a session
gan.train()
print(" [*] Training finished!")
if args.phase == 'test' :
gan.test()
print(" [*] Test finished!")
if __name__ == '__main__':
main() | 0.553988 | 0.167185 |
import unittest
from binascii import unhexlify, hexlify
from pyelliptic import Cipher, OpenSSL
class TestCipher(unittest.TestCase):
@unittest.skipIf('aes-256-ctr' not in OpenSSL.cipher_algo,
'aes-256-ctr is not supported by the SSL library')
def test_aes256ctr(self):
ciphername = "aes-256-ctr"
iv_hex = b"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"
iv = unhexlify(iv_hex)
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc))
def test_aes256cfb(self):
ciphername = "aes-256-cfb"
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
iv_hex = b"000102030405060708090A0B0C0D0E0F"
iv = unhexlify(iv_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc))
def test_aes256cbc(self):
ciphername = "aes-256-cbc"
iv_hex = b"000102030405060708090A0B0C0D0E0F"
iv = unhexlify(iv_hex)
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc)) | tests/test_cipher.py | import unittest
from binascii import unhexlify, hexlify
from pyelliptic import Cipher, OpenSSL
class TestCipher(unittest.TestCase):
@unittest.skipIf('aes-256-ctr' not in OpenSSL.cipher_algo,
'aes-256-ctr is not supported by the SSL library')
def test_aes256ctr(self):
ciphername = "aes-256-ctr"
iv_hex = b"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"
iv = unhexlify(iv_hex)
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc))
def test_aes256cfb(self):
ciphername = "aes-256-cfb"
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
iv_hex = b"000102030405060708090A0B0C0D0E0F"
iv = unhexlify(iv_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc))
def test_aes256cbc(self):
ciphername = "aes-256-cbc"
iv_hex = b"000102030405060708090A0B0C0D0E0F"
iv = unhexlify(iv_hex)
key_hex = b"603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"
key = unhexlify(key_hex)
plain_hex = b"6bc1bee22e409f96e93d7e117393172a"
plaintext = unhexlify(plain_hex)
ctx = Cipher(key, iv, 1, ciphername=ciphername)
enc = ctx.ciphering(plaintext)
print(hexlify(enc))
ctx = Cipher(key, iv, 0, ciphername=ciphername)
self.assertEqual(plaintext, ctx.ciphering(enc)) | 0.499023 | 0.442396 |
import os
import sys
import fnmatch
import subprocess
## prepare to run PyTest as a command
from distutils.core import Command
from setuptools import setup, find_packages
from version import get_git_version
VERSION, SOURCE_LABEL = get_git_version()
PROJECT = 'streamcorpus_filter'
AUTHOR = 'Diffeo, Inc.'
AUTHOR_EMAIL = '<EMAIL>'
DESC = 'example illustration of interfaces to use in making a faster filter in C++'
LICENSE = 'MIT/X11 license http://opensource.org/licenses/MIT'
URL = 'http://github.com/trec-kba/streamcorpus-filter'
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
def recursive_glob(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
def recursive_glob_with_tree(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
one_dir_results = []
for f in goodfiles:
one_dir_results.append(os.path.join(base, f))
results.append((base, one_dir_results))
return results
def _myinstall(pkgspec):
setup(
script_args = ['-q', 'easy_install', '-v', pkgspec],
script_name = 'easy_install'
)
class InstallTestDependencies(Command):
'''install test dependencies'''
description = 'installs all dependencies required to run all tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def easy_install(self, packages):
cmd = ['easy_install']
if packages:
cmd.extend(packages)
errno = subprocess.call(cmd)
if errno:
raise SystemExit(errno)
def run(self):
if self.distribution.install_requires:
self.easy_install(self.distribution.install_requires)
if self.distribution.tests_require:
self.easy_install(self.distribution.tests_require)
class PyTest(Command):
'''run py.test'''
description = 'runs py.test to execute all tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# reload sys.path for any new libraries installed
import site
site.main()
# use pytest to run tests
pytest = __import__('pytest')
if pytest.main(['-n', '3', '-vvs', 'src/tests']):
sys.exit(1)
setup(
name=PROJECT,
version=VERSION,
source_label=SOURCE_LABEL,
description=DESC,
license=LICENSE,
long_description=read_file('README.md'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
packages=find_packages('src', exclude=('tests', 'tests.*')),
package_dir={'': 'src'},
include_package_data=True,
entry_points={
'streamcorpus_pipeline.stages': 'textfilter_batch = streamcorpus_filter.pipeline_stage:FastFilterBatch',
},
cmdclass={'test': PyTest,
'install_test': InstallTestDependencies},
# We can select proper classifiers later
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License', ## MIT/X11 license http://opensource.org/licenses/MIT
],
tests_require=[
'pytest',
'ipdb',
'pytest-cov',
'pytest-xdist',
'pytest-timeout',
'pytest-incremental',
'pytest-capturelog',
'epydoc',
],
install_requires=[
'thrift',
'streamcorpus >= 0.3.0',
],
) | py/setup.py |
import os
import sys
import fnmatch
import subprocess
## prepare to run PyTest as a command
from distutils.core import Command
from setuptools import setup, find_packages
from version import get_git_version
VERSION, SOURCE_LABEL = get_git_version()
PROJECT = 'streamcorpus_filter'
AUTHOR = 'Diffeo, Inc.'
AUTHOR_EMAIL = '<EMAIL>'
DESC = 'example illustration of interfaces to use in making a faster filter in C++'
LICENSE = 'MIT/X11 license http://opensource.org/licenses/MIT'
URL = 'http://github.com/trec-kba/streamcorpus-filter'
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
def recursive_glob(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
def recursive_glob_with_tree(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
one_dir_results = []
for f in goodfiles:
one_dir_results.append(os.path.join(base, f))
results.append((base, one_dir_results))
return results
def _myinstall(pkgspec):
setup(
script_args = ['-q', 'easy_install', '-v', pkgspec],
script_name = 'easy_install'
)
class InstallTestDependencies(Command):
'''install test dependencies'''
description = 'installs all dependencies required to run all tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def easy_install(self, packages):
cmd = ['easy_install']
if packages:
cmd.extend(packages)
errno = subprocess.call(cmd)
if errno:
raise SystemExit(errno)
def run(self):
if self.distribution.install_requires:
self.easy_install(self.distribution.install_requires)
if self.distribution.tests_require:
self.easy_install(self.distribution.tests_require)
class PyTest(Command):
'''run py.test'''
description = 'runs py.test to execute all tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# reload sys.path for any new libraries installed
import site
site.main()
# use pytest to run tests
pytest = __import__('pytest')
if pytest.main(['-n', '3', '-vvs', 'src/tests']):
sys.exit(1)
setup(
name=PROJECT,
version=VERSION,
source_label=SOURCE_LABEL,
description=DESC,
license=LICENSE,
long_description=read_file('README.md'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
packages=find_packages('src', exclude=('tests', 'tests.*')),
package_dir={'': 'src'},
include_package_data=True,
entry_points={
'streamcorpus_pipeline.stages': 'textfilter_batch = streamcorpus_filter.pipeline_stage:FastFilterBatch',
},
cmdclass={'test': PyTest,
'install_test': InstallTestDependencies},
# We can select proper classifiers later
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License', ## MIT/X11 license http://opensource.org/licenses/MIT
],
tests_require=[
'pytest',
'ipdb',
'pytest-cov',
'pytest-xdist',
'pytest-timeout',
'pytest-incremental',
'pytest-capturelog',
'epydoc',
],
install_requires=[
'thrift',
'streamcorpus >= 0.3.0',
],
) | 0.296858 | 0.124186 |
from spack import *
class Jsoncpp(CMakePackage):
"""JsonCpp is a C++ library that allows manipulating JSON values,
including serialization and deserialization to and from strings.
It can also preserve existing comment in unserialization/serialization
steps, making it a convenient format to store user input files."""
homepage = "https://github.com/open-source-parsers/jsoncpp"
url = "https://github.com/open-source-parsers/jsoncpp/archive/1.7.3.tar.gz"
version('1.9.4', sha256='e34a628a8142643b976c7233ef381457efad79468c67cb1ae0b83a33d7493999')
version('1.9.3', sha256='8593c1d69e703563d94d8c12244e2e18893eeb9a8a9f8aa3d09a327aa45c8f7d')
version('1.9.2', sha256='77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0')
version('1.9.1', sha256='c7b40f5605dd972108f503f031b20186f5e5bca2b65cd4b8bd6c3e4ba8126697')
version('1.9.0', sha256='bdd3ba9ed1f110b3eb57474d9094e90ab239b93b4803b4f9b1722c281e85a4ac')
version('1.8.4', sha256='c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6')
version('1.8.3', sha256='3671ba6051e0f30849942cc66d1798fdf0362d089343a83f704c09ee7156604f')
version('1.8.2', sha256='811f5aee20df2ef0868a73a976ec6f9aab61f4ca71c66eddf38094b2b3078eef')
version('1.8.1', sha256='858db2faf348f89fdf1062bd3e79256772e897e7f17df73e0624edf004f2f9ac')
version('1.8.0', sha256='5deb2462cbf0c0121c9d6c9823ec72fe71417e34242e3509bc7c003d526465bc')
version('1.7.7', sha256='087640ebcf7fbcfe8e2717a0b9528fff89c52fcf69fa2a18cc2b538008098f97')
version('1.7.6', sha256='07cf5d4f184394ec0a9aa657dd4c13ea682c52a1ab4da2fb176cb2d5501101e8')
version('1.7.5', sha256='4338c6cab8af8dee6cdfd54e6218bd0533785f552c6162bb083f8dd28bf8fbbe')
version('1.7.4', sha256='10dcd0677e80727e572a1e462193e51a5fde3e023b99e144b2ee1a469835f769')
version('1.7.3', sha256='1cfcad14054039ba97c22531888796cb9369e6353f257aacaad34fda956ada53')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo',
'MinSizeRel', 'Coverage'))
variant('cxxstd',
default='default',
values=('default', '98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
depends_on('cmake@3.1:', type='build')
depends_on('python', type='test')
# Ref: https://github.com/open-source-parsers/jsoncpp/pull/1023
# Released in 1.9.2, patch does not apply cleanly across releases.
# May apply to more compilers in the future.
@when('@:1.9.1 %clang@10.0.0:')
def patch(self):
filter_file(
'return d >= min && d <= max;',
'return d >= static_cast<double>(min) && '
'd <= static_cast<double>(max);',
'src/lib_json/json_value.cpp')
def cmake_args(self):
args = ['-DBUILD_SHARED_LIBS=ON']
cxxstd = self.spec.variants['cxxstd'].value
if cxxstd != 'default':
args.append('-DCMAKE_CXX_STANDARD={0}'.format(cxxstd))
if self.run_tests:
args.append('-DJSONCPP_WITH_TESTS=ON')
else:
args.append('-DJSONCPP_WITH_TESTS=OFF')
return args | var/spack/repos/builtin/packages/jsoncpp/package.py |
from spack import *
class Jsoncpp(CMakePackage):
"""JsonCpp is a C++ library that allows manipulating JSON values,
including serialization and deserialization to and from strings.
It can also preserve existing comment in unserialization/serialization
steps, making it a convenient format to store user input files."""
homepage = "https://github.com/open-source-parsers/jsoncpp"
url = "https://github.com/open-source-parsers/jsoncpp/archive/1.7.3.tar.gz"
version('1.9.4', sha256='e34a628a8142643b976c7233ef381457efad79468c67cb1ae0b83a33d7493999')
version('1.9.3', sha256='8593c1d69e703563d94d8c12244e2e18893eeb9a8a9f8aa3d09a327aa45c8f7d')
version('1.9.2', sha256='77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0')
version('1.9.1', sha256='c7b40f5605dd972108f503f031b20186f5e5bca2b65cd4b8bd6c3e4ba8126697')
version('1.9.0', sha256='bdd3ba9ed1f110b3eb57474d9094e90ab239b93b4803b4f9b1722c281e85a4ac')
version('1.8.4', sha256='c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6')
version('1.8.3', sha256='3671ba6051e0f30849942cc66d1798fdf0362d089343a83f704c09ee7156604f')
version('1.8.2', sha256='811f5aee20df2ef0868a73a976ec6f9aab61f4ca71c66eddf38094b2b3078eef')
version('1.8.1', sha256='858db2faf348f89fdf1062bd3e79256772e897e7f17df73e0624edf004f2f9ac')
version('1.8.0', sha256='5deb2462cbf0c0121c9d6c9823ec72fe71417e34242e3509bc7c003d526465bc')
version('1.7.7', sha256='087640ebcf7fbcfe8e2717a0b9528fff89c52fcf69fa2a18cc2b538008098f97')
version('1.7.6', sha256='07cf5d4f184394ec0a9aa657dd4c13ea682c52a1ab4da2fb176cb2d5501101e8')
version('1.7.5', sha256='4338c6cab8af8dee6cdfd54e6218bd0533785f552c6162bb083f8dd28bf8fbbe')
version('1.7.4', sha256='10dcd0677e80727e572a1e462193e51a5fde3e023b99e144b2ee1a469835f769')
version('1.7.3', sha256='1cfcad14054039ba97c22531888796cb9369e6353f257aacaad34fda956ada53')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo',
'MinSizeRel', 'Coverage'))
variant('cxxstd',
default='default',
values=('default', '98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
depends_on('cmake@3.1:', type='build')
depends_on('python', type='test')
# Ref: https://github.com/open-source-parsers/jsoncpp/pull/1023
# Released in 1.9.2, patch does not apply cleanly across releases.
# May apply to more compilers in the future.
@when('@:1.9.1 %clang@10.0.0:')
def patch(self):
filter_file(
'return d >= min && d <= max;',
'return d >= static_cast<double>(min) && '
'd <= static_cast<double>(max);',
'src/lib_json/json_value.cpp')
def cmake_args(self):
args = ['-DBUILD_SHARED_LIBS=ON']
cxxstd = self.spec.variants['cxxstd'].value
if cxxstd != 'default':
args.append('-DCMAKE_CXX_STANDARD={0}'.format(cxxstd))
if self.run_tests:
args.append('-DJSONCPP_WITH_TESTS=ON')
else:
args.append('-DJSONCPP_WITH_TESTS=OFF')
return args | 0.775265 | 0.314774 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from colour.plotting import CONSTANTS_COLOUR_STYLE, override_style, render
from colour.utilities import as_float_array
from colour_hdri.exposure import adjust_exposure
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'plot_radiance_image_strip',
]
@override_style()
def plot_radiance_image_strip(
image,
count=5,
ev_steps=-2,
cctf_encoding=CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding,
**kwargs):
"""
Plots given HDRI / radiance image as strip of images of varying exposure.
Parameters
----------
image : array_like
HDRI / radiance image to plot.
count : int, optional
Strip images count.
ev_steps : numeric, optional
Exposure variation for each image of the strip.
cctf_encoding : callable, optional
Encoding colour component transfer function / opto-electronic
transfer function used for plotting.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.display`},
Please refer to the documentation of the previously listed definition.
Returns
-------
tuple
Current figure and axes.
"""
image = as_float_array(image)
grid = matplotlib.gridspec.GridSpec(1, count)
grid.update(wspace=0, hspace=0)
height, width, _channel = image.shape
for i in range(count):
ev = i * ev_steps
axis = plt.subplot(grid[i])
axis.imshow(np.clip(cctf_encoding(adjust_exposure(image, ev)), 0, 1))
axis.text(
width * 0.05,
height - height * 0.05,
'EV {0}'.format(ev),
color=(1, 1, 1))
axis.set_xticks([])
axis.set_yticks([])
axis.set_aspect('equal')
return render(**kwargs) | colour_hdri/plotting/radiance.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from colour.plotting import CONSTANTS_COLOUR_STYLE, override_style, render
from colour.utilities import as_float_array
from colour_hdri.exposure import adjust_exposure
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'plot_radiance_image_strip',
]
@override_style()
def plot_radiance_image_strip(
image,
count=5,
ev_steps=-2,
cctf_encoding=CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding,
**kwargs):
"""
Plots given HDRI / radiance image as strip of images of varying exposure.
Parameters
----------
image : array_like
HDRI / radiance image to plot.
count : int, optional
Strip images count.
ev_steps : numeric, optional
Exposure variation for each image of the strip.
cctf_encoding : callable, optional
Encoding colour component transfer function / opto-electronic
transfer function used for plotting.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.display`},
Please refer to the documentation of the previously listed definition.
Returns
-------
tuple
Current figure and axes.
"""
image = as_float_array(image)
grid = matplotlib.gridspec.GridSpec(1, count)
grid.update(wspace=0, hspace=0)
height, width, _channel = image.shape
for i in range(count):
ev = i * ev_steps
axis = plt.subplot(grid[i])
axis.imshow(np.clip(cctf_encoding(adjust_exposure(image, ev)), 0, 1))
axis.text(
width * 0.05,
height - height * 0.05,
'EV {0}'.format(ev),
color=(1, 1, 1))
axis.set_xticks([])
axis.set_yticks([])
axis.set_aspect('equal')
return render(**kwargs) | 0.859855 | 0.361982 |
import tracta_ml.pool_maintenance as pm
import numpy as np
import collections
def converge(parents, best_parent):
mean_fitness = np.mean([i.fitness['fit_mod'] for i in parents])
best_fitness = best_parent.fitness['fit_mod']
return abs(best_fitness - mean_fitness)/mean_fitness
def model_tuner(X, Y, mod, param_dict, cv, scoring, verbose,\
look_back=1000, n_gen=2000, known_best=None):
'''Initializing parent pool'''
pool_size = 5
if type(known_best) != type(None):
inter_param_dict = collections.OrderedDict()
for i in range(len(known_best.hpGene)):
key = [j for j in known_best.param_list.keys()][i]
value = param_dict[key]
inter_param_dict[key] = value
param_dict = inter_param_dict
else:
param_dict = collections.OrderedDict(param_dict)
parents = pm.gen_random_parents(X, Y, mod, param_dict, cv, scoring, 10*pool_size)
parents.sort(reverse=True)
parents = parents[0:pool_size]
if type(known_best) == type(parents[0]):
parents[0] = known_best
best_parent = parents[0]
previous_best = best_parent
best_mod_fit = []
best_feat_fit = []
best_stdev_fit = []
lb_cntr = 0
restart_count = 0
for gen_cnt in range(n_gen):
'''Crossover Pool'''
crossover_pool = pm.crossover_pool(parents, X, Y, mod, cv, scoring, pool_size-1)
parents = crossover_pool + [best_parent]
'''Elitism for next generation'''
parents.sort(reverse=True)
best_parent = parents[0]
'''Checking for Nominal convergence'''
if converge(parents,parents[0]) < 10**-6:
restart_count += 1
parents = pm.gen_random_parents(X, Y, mod, param_dict, cv, scoring, pool_size-1)
parents = parents + [best_parent]
parents.sort(reverse=True)
best_parent = parents[0]
current_best = best_parent
'''Checking for Solution convergence'''
if previous_best == current_best:
lb_cntr += 1
else:
lb_cntr = 0
if lb_cntr >= look_back:
break
previous_best = current_best
best_mod_fit.append(best_parent.fitness['fit_mod'])
best_feat_fit.append(best_parent.fitness['fit_feat'])
best_stdev_fit.append(best_parent.fitness['fit_stdev'])
if verbose == True:
print("Iteration -",gen_cnt+1,"complete","......",\
"best_score : {:.4f}, score_stdev: {:.4f}, feat_fitness: {:.4f}".\
format(best_mod_fit[-1], best_stdev_fit[-1], best_feat_fit[-1]))
monitor = {'Model_Fitness': best_mod_fit,
'Feature_Fitness': best_feat_fit,
'Stdev': best_stdev_fit}
return best_parent, monitor | tracta_ml/evolve.py | import tracta_ml.pool_maintenance as pm
import numpy as np
import collections
def converge(parents, best_parent):
mean_fitness = np.mean([i.fitness['fit_mod'] for i in parents])
best_fitness = best_parent.fitness['fit_mod']
return abs(best_fitness - mean_fitness)/mean_fitness
def model_tuner(X, Y, mod, param_dict, cv, scoring, verbose,\
look_back=1000, n_gen=2000, known_best=None):
'''Initializing parent pool'''
pool_size = 5
if type(known_best) != type(None):
inter_param_dict = collections.OrderedDict()
for i in range(len(known_best.hpGene)):
key = [j for j in known_best.param_list.keys()][i]
value = param_dict[key]
inter_param_dict[key] = value
param_dict = inter_param_dict
else:
param_dict = collections.OrderedDict(param_dict)
parents = pm.gen_random_parents(X, Y, mod, param_dict, cv, scoring, 10*pool_size)
parents.sort(reverse=True)
parents = parents[0:pool_size]
if type(known_best) == type(parents[0]):
parents[0] = known_best
best_parent = parents[0]
previous_best = best_parent
best_mod_fit = []
best_feat_fit = []
best_stdev_fit = []
lb_cntr = 0
restart_count = 0
for gen_cnt in range(n_gen):
'''Crossover Pool'''
crossover_pool = pm.crossover_pool(parents, X, Y, mod, cv, scoring, pool_size-1)
parents = crossover_pool + [best_parent]
'''Elitism for next generation'''
parents.sort(reverse=True)
best_parent = parents[0]
'''Checking for Nominal convergence'''
if converge(parents,parents[0]) < 10**-6:
restart_count += 1
parents = pm.gen_random_parents(X, Y, mod, param_dict, cv, scoring, pool_size-1)
parents = parents + [best_parent]
parents.sort(reverse=True)
best_parent = parents[0]
current_best = best_parent
'''Checking for Solution convergence'''
if previous_best == current_best:
lb_cntr += 1
else:
lb_cntr = 0
if lb_cntr >= look_back:
break
previous_best = current_best
best_mod_fit.append(best_parent.fitness['fit_mod'])
best_feat_fit.append(best_parent.fitness['fit_feat'])
best_stdev_fit.append(best_parent.fitness['fit_stdev'])
if verbose == True:
print("Iteration -",gen_cnt+1,"complete","......",\
"best_score : {:.4f}, score_stdev: {:.4f}, feat_fitness: {:.4f}".\
format(best_mod_fit[-1], best_stdev_fit[-1], best_feat_fit[-1]))
monitor = {'Model_Fitness': best_mod_fit,
'Feature_Fitness': best_feat_fit,
'Stdev': best_stdev_fit}
return best_parent, monitor | 0.268654 | 0.283986 |
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from common import permissions
from .models import System, Station, Commodity, StationCommodity
from .serializers import CommoditySerializer, StationSerializer, \
SystemSerializer, MinimizedSystemSerializer, StationCommoditySerializer
import django_filters
from common.views import WrappedModelViewSet, wrap_response
# Create your views here.
class SystemViewSet(WrappedModelViewSet):
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = System.objects.all()
serializer_class = SystemSerializer
search_fields = ('name',)
template_name = 'frontend/system/instance.html'
list_template_name = 'frontend/system/list.html'
@detail_route()
def stations(self, request, *args, **kwargs):
"""
A route to display only the stations this System contains.
:param request:
:param pk:
:return:
"""
system = self.get_object()
stations = Station.objects.filter(system=system)
serializer = StationSerializer(stations, context={'request': request}, many=True)
return wrap_response(Response({'results': serializer.data}, template_name='frontend/system/list_station.html'))
@detail_route()
def min(self, request, *args, **kwargs):
"""
A route to display the minimized System view.
:param request:
:param pk:
:return:
"""
serializer = MinimizedSystemSerializer(self.get_object(), context={'request': request})
data = serializer.data
data['min'] = True
return wrap_response(Response(data))
class StationViewSet(WrappedModelViewSet):
class StationFilter(django_filters.FilterSet):
distance_to_star = django_filters.NumberFilter(lookup_type='lt')
class Meta:
model = Station
fields = ('distance_to_star',)
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Station.objects.all()
serializer_class = StationSerializer
filter_class = StationFilter
search_fields = ('name', )
template_name = 'frontend/station/instance.html'
list_template_name = 'frontend/station/list.html'
class CommodityViewSet(WrappedModelViewSet):
class CommodityFilter(django_filters.FilterSet):
average_price = django_filters.NumberFilter(lookup_type='lt')
name = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = Commodity
fields = ('average_price', 'name')
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Commodity.objects.all()
serializer_class = CommoditySerializer
filter_class = CommodityFilter
search_fields = ('name',)
template_name = 'frontend/commodity/instance.html'
list_template_name = 'frontend/commodity/list.html'
class StationCommodityViewSet(WrappedModelViewSet):
class StationCommodityFilter(django_filters.FilterSet):
class Meta:
model = StationCommodity
fields = {
'station': ['exact'],
'commodity': ['exact'],
'supply_level': ['exact'],
'demand_level': ['exact'],
'buy_price': ['lt', 'gt'],
'sell_price': ['lt', 'gt'],
'supply': ['lt', 'gt'],
'demand': ['lt', 'gt'],
}
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = StationCommodity.objects.all()
serializer_class = StationCommoditySerializer
template_name = 'frontend/station_commodity/instance.html'
list_template_name = 'frontend/station_commodity/list.html'
filter_class = StationCommodityFilter
search_fields = ('commodity__name', 'station__name', 'commodity__category_name') | elitedata/views.py | from rest_framework.decorators import detail_route
from rest_framework.response import Response
from common import permissions
from .models import System, Station, Commodity, StationCommodity
from .serializers import CommoditySerializer, StationSerializer, \
SystemSerializer, MinimizedSystemSerializer, StationCommoditySerializer
import django_filters
from common.views import WrappedModelViewSet, wrap_response
# Create your views here.
class SystemViewSet(WrappedModelViewSet):
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = System.objects.all()
serializer_class = SystemSerializer
search_fields = ('name',)
template_name = 'frontend/system/instance.html'
list_template_name = 'frontend/system/list.html'
@detail_route()
def stations(self, request, *args, **kwargs):
"""
A route to display only the stations this System contains.
:param request:
:param pk:
:return:
"""
system = self.get_object()
stations = Station.objects.filter(system=system)
serializer = StationSerializer(stations, context={'request': request}, many=True)
return wrap_response(Response({'results': serializer.data}, template_name='frontend/system/list_station.html'))
@detail_route()
def min(self, request, *args, **kwargs):
"""
A route to display the minimized System view.
:param request:
:param pk:
:return:
"""
serializer = MinimizedSystemSerializer(self.get_object(), context={'request': request})
data = serializer.data
data['min'] = True
return wrap_response(Response(data))
class StationViewSet(WrappedModelViewSet):
class StationFilter(django_filters.FilterSet):
distance_to_star = django_filters.NumberFilter(lookup_type='lt')
class Meta:
model = Station
fields = ('distance_to_star',)
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Station.objects.all()
serializer_class = StationSerializer
filter_class = StationFilter
search_fields = ('name', )
template_name = 'frontend/station/instance.html'
list_template_name = 'frontend/station/list.html'
class CommodityViewSet(WrappedModelViewSet):
class CommodityFilter(django_filters.FilterSet):
average_price = django_filters.NumberFilter(lookup_type='lt')
name = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = Commodity
fields = ('average_price', 'name')
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Commodity.objects.all()
serializer_class = CommoditySerializer
filter_class = CommodityFilter
search_fields = ('name',)
template_name = 'frontend/commodity/instance.html'
list_template_name = 'frontend/commodity/list.html'
class StationCommodityViewSet(WrappedModelViewSet):
class StationCommodityFilter(django_filters.FilterSet):
class Meta:
model = StationCommodity
fields = {
'station': ['exact'],
'commodity': ['exact'],
'supply_level': ['exact'],
'demand_level': ['exact'],
'buy_price': ['lt', 'gt'],
'sell_price': ['lt', 'gt'],
'supply': ['lt', 'gt'],
'demand': ['lt', 'gt'],
}
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = StationCommodity.objects.all()
serializer_class = StationCommoditySerializer
template_name = 'frontend/station_commodity/instance.html'
list_template_name = 'frontend/station_commodity/list.html'
filter_class = StationCommodityFilter
search_fields = ('commodity__name', 'station__name', 'commodity__category_name') | 0.733643 | 0.097777 |
import unittest
from bson import SON
from mongoengine import *
from mongoengine.pymongo_support import list_collection_names
from tests.utils import MongoDBTestCase
class TestDelta(MongoDBTestCase):
def setUp(self):
super(TestDelta, self).setUp()
class Person(Document):
name = StringField()
age = IntField()
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
def tearDown(self):
for collection in list_collection_names(self.db):
self.db.drop_collection(collection)
def test_delta(self):
self.delta(Document)
self.delta(DynamicDocument)
@staticmethod
def delta(DocClass):
class Doc(DocClass):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["string_field"]
assert doc._delta() == ({"string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["int_field"]
assert doc._delta() == ({"int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({"dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({"list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({}, {"dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({}, {"list_field": 1})
def test_delta_recursive(self):
self.delta_recursive(Document, EmbeddedDocument)
self.delta_recursive(DynamicDocument, EmbeddedDocument)
self.delta_recursive(Document, DynamicEmbeddedDocument)
self.delta_recursive(DynamicDocument, DynamicEmbeddedDocument)
def delta_recursive(self, DocClass, EmbeddedClass):
class Embedded(EmbeddedClass):
id = StringField()
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
class Doc(DocClass):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
embedded_1 = Embedded()
embedded_1.id = "010101"
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_1.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field = embedded_1
assert doc._get_changed_fields() == ["embedded_field"]
embedded_delta = {
"id": "010101",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ["1", 2, {"hello": "world"}],
}
assert doc.embedded_field._delta() == (embedded_delta, {})
assert doc._delta() == ({"embedded_field": embedded_delta}, {})
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
assert doc._get_changed_fields() == ["embedded_field.dict_field"]
assert doc.embedded_field._delta() == ({}, {"dict_field": 1})
assert doc._delta() == ({}, {"embedded_field.dict_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.dict_field == {}
doc.embedded_field.list_field = []
assert doc._get_changed_fields() == ["embedded_field.list_field"]
assert doc.embedded_field._delta() == ({}, {"list_field": 1})
assert doc._delta() == ({}, {"embedded_field.list_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field == []
embedded_2 = Embedded()
embedded_2.string_field = "hello"
embedded_2.int_field = 1
embedded_2.dict_field = {"hello": "world"}
embedded_2.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field.list_field = ["1", 2, embedded_2]
assert doc._get_changed_fields() == ["embedded_field.list_field"]
assert doc.embedded_field._delta() == (
{
"list_field": [
"1",
2,
{
"_cls": "Embedded",
"string_field": "hello",
"dict_field": {"hello": "world"},
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
assert doc._delta() == (
{
"embedded_field.list_field": [
"1",
2,
{
"_cls": "Embedded",
"string_field": "hello",
"dict_field": {"hello": "world"},
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[0] == "1"
assert doc.embedded_field.list_field[1] == 2
for k in doc.embedded_field.list_field[2]._fields:
assert doc.embedded_field.list_field[2][k] == embedded_2[k]
doc.embedded_field.list_field[2].string_field = "world"
assert doc._get_changed_fields() == ["embedded_field.list_field.2.string_field"]
assert doc.embedded_field._delta() == (
{"list_field.2.string_field": "world"},
{},
)
assert doc._delta() == (
{"embedded_field.list_field.2.string_field": "world"},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "world"
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = "hello world"
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
assert doc._get_changed_fields() == ["embedded_field.list_field.2"]
assert doc.embedded_field._delta() == (
{
"list_field.2": {
"_cls": "Embedded",
"string_field": "hello world",
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
"dict_field": {"hello": "world"},
}
},
{},
)
assert doc._delta() == (
{
"embedded_field.list_field.2": {
"_cls": "Embedded",
"string_field": "hello world",
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
"dict_field": {"hello": "world"},
}
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "hello world"
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
assert doc._delta() == (
{"embedded_field.list_field.2.list_field": [2, {"hello": "world"}]},
{},
)
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
assert doc._delta() == (
{"embedded_field.list_field.2.list_field": [2, {"hello": "world"}, 1]},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [2, {"hello": "world"}, 1]
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [1, 2, {"hello": "world"}]
del doc.embedded_field.list_field[2].list_field[2]["hello"]
assert doc._delta() == (
{},
{"embedded_field.list_field.2.list_field.2.hello": 1},
)
doc.save()
doc = doc.reload(10)
del doc.embedded_field.list_field[2].list_field
assert doc._delta() == ({}, {"embedded_field.list_field.2.list_field": 1})
doc.save()
doc = doc.reload(10)
doc.dict_field["Embedded"] = embedded_1
doc.save()
doc = doc.reload(10)
doc.dict_field["Embedded"].string_field = "Hello World"
assert doc._get_changed_fields() == ["dict_field.Embedded.string_field"]
assert doc._delta() == ({"dict_field.Embedded.string_field": "Hello World"}, {})
def test_circular_reference_deltas(self):
self.circular_reference_deltas(Document, Document)
self.circular_reference_deltas(Document, DynamicDocument)
self.circular_reference_deltas(DynamicDocument, Document)
self.circular_reference_deltas(DynamicDocument, DynamicDocument)
def circular_reference_deltas(self, DocClass1, DocClass2):
class Person(DocClass1):
name = StringField()
owns = ListField(ReferenceField("Organization"))
class Organization(DocClass2):
name = StringField()
owner = ReferenceField("Person")
Person.drop_collection()
Organization.drop_collection()
person = Person(name="owner").save()
organization = Organization(name="company").save()
person.owns.append(organization)
organization.owner = person
person.save()
organization.save()
p = Person.objects[0].select_related()
o = Organization.objects.first()
assert p.owns[0] == o
assert o.owner == p
def test_circular_reference_deltas_2(self):
self.circular_reference_deltas_2(Document, Document)
self.circular_reference_deltas_2(Document, DynamicDocument)
self.circular_reference_deltas_2(DynamicDocument, Document)
self.circular_reference_deltas_2(DynamicDocument, DynamicDocument)
def circular_reference_deltas_2(self, DocClass1, DocClass2, dbref=True):
class Person(DocClass1):
name = StringField()
owns = ListField(ReferenceField("Organization", dbref=dbref))
employer = ReferenceField("Organization", dbref=dbref)
class Organization(DocClass2):
name = StringField()
owner = ReferenceField("Person", dbref=dbref)
employees = ListField(ReferenceField("Person", dbref=dbref))
Person.drop_collection()
Organization.drop_collection()
person = Person(name="owner").save()
employee = Person(name="employee").save()
organization = Organization(name="company").save()
person.owns.append(organization)
organization.owner = person
organization.employees.append(employee)
employee.employer = organization
person.save()
organization.save()
employee.save()
p = Person.objects.get(name="owner")
e = Person.objects.get(name="employee")
o = Organization.objects.first()
assert p.owns[0] == o
assert o.owner == p
assert e.employer == o
return person, organization, employee
def test_delta_db_field(self):
self.delta_db_field(Document)
self.delta_db_field(DynamicDocument)
def delta_db_field(self, DocClass):
class Doc(DocClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["db_string_field"]
assert doc._delta() == ({"db_string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["db_int_field"]
assert doc._delta() == ({"db_int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["db_dict_field"]
assert doc._delta() == ({"db_dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["db_list_field"]
assert doc._delta() == ({"db_list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["db_dict_field"]
assert doc._delta() == ({}, {"db_dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["db_list_field"]
assert doc._delta() == ({}, {"db_list_field": 1})
# Test it saves that data
doc = Doc()
doc.save()
doc.string_field = "hello"
doc.int_field = 1
doc.dict_field = {"hello": "world"}
doc.list_field = ["1", 2, {"hello": "world"}]
doc.save()
doc = doc.reload(10)
assert doc.string_field == "hello"
assert doc.int_field == 1
assert doc.dict_field == {"hello": "world"}
assert doc.list_field == ["1", 2, {"hello": "world"}]
def test_delta_recursive_db_field_on_doc_and_embeddeddoc(self):
self.delta_recursive_db_field(Document, EmbeddedDocument)
def test_delta_recursive_db_field_on_doc_and_dynamicembeddeddoc(self):
self.delta_recursive_db_field(Document, DynamicEmbeddedDocument)
def test_delta_recursive_db_field_on_dynamicdoc_and_embeddeddoc(self):
self.delta_recursive_db_field(DynamicDocument, EmbeddedDocument)
def test_delta_recursive_db_field_on_dynamicdoc_and_dynamicembeddeddoc(self):
self.delta_recursive_db_field(DynamicDocument, DynamicEmbeddedDocument)
@staticmethod
def delta_recursive_db_field(DocClass, EmbeddedClass):
class Embedded(EmbeddedClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
class Doc(DocClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
embedded_field = EmbeddedDocumentField(
Embedded, db_field="db_embedded_field"
)
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
embedded_1 = Embedded()
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_1.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field = embedded_1
assert doc._get_changed_fields() == ["db_embedded_field"]
embedded_delta = {
"db_string_field": "hello",
"db_int_field": 1,
"db_dict_field": {"hello": "world"},
"db_list_field": ["1", 2, {"hello": "world"}],
}
assert doc.embedded_field._delta() == (embedded_delta, {})
assert doc._delta() == ({"db_embedded_field": embedded_delta}, {})
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
assert doc._get_changed_fields() == ["db_embedded_field.db_dict_field"]
assert doc.embedded_field._delta() == ({}, {"db_dict_field": 1})
assert doc._delta() == ({}, {"db_embedded_field.db_dict_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.dict_field == {}
assert doc._get_changed_fields() == []
doc.embedded_field.list_field = []
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field"]
assert doc.embedded_field._delta() == ({}, {"db_list_field": 1})
assert doc._delta() == ({}, {"db_embedded_field.db_list_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field == []
embedded_2 = Embedded()
embedded_2.string_field = "hello"
embedded_2.int_field = 1
embedded_2.dict_field = {"hello": "world"}
embedded_2.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field.list_field = ["1", 2, embedded_2]
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field"]
assert doc.embedded_field._delta() == (
{
"db_list_field": [
"1",
2,
{
"_cls": "Embedded",
"db_string_field": "hello",
"db_dict_field": {"hello": "world"},
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
assert doc._delta() == (
{
"db_embedded_field.db_list_field": [
"1",
2,
{
"_cls": "Embedded",
"db_string_field": "hello",
"db_dict_field": {"hello": "world"},
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
doc.save()
assert doc._get_changed_fields() == []
doc = doc.reload(10)
assert doc.embedded_field.list_field[0] == "1"
assert doc.embedded_field.list_field[1] == 2
for k in doc.embedded_field.list_field[2]._fields:
assert doc.embedded_field.list_field[2][k] == embedded_2[k]
doc.embedded_field.list_field[2].string_field = "world"
assert doc._get_changed_fields() == [
"db_embedded_field.db_list_field.2.db_string_field"
]
assert doc.embedded_field._delta() == (
{"db_list_field.2.db_string_field": "world"},
{},
)
assert doc._delta() == (
{"db_embedded_field.db_list_field.2.db_string_field": "world"},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "world"
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = "hello world"
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field.2"]
assert doc.embedded_field._delta() == (
{
"db_list_field.2": {
"_cls": "Embedded",
"db_string_field": "hello world",
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
"db_dict_field": {"hello": "world"},
}
},
{},
)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2": {
"_cls": "Embedded",
"db_string_field": "hello world",
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
"db_dict_field": {"hello": "world"},
}
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "hello world"
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2.db_list_field": [
2,
{"hello": "world"},
]
},
{},
)
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2.db_list_field": [
2,
{"hello": "world"},
1,
]
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [2, {"hello": "world"}, 1]
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [1, 2, {"hello": "world"}]
del doc.embedded_field.list_field[2].list_field[2]["hello"]
assert doc._delta() == (
{},
{"db_embedded_field.db_list_field.2.db_list_field.2.hello": 1},
)
doc.save()
doc = doc.reload(10)
assert doc._delta() == ({}, {},)
del doc.embedded_field.list_field[2].list_field
assert doc._delta() == (
{},
{"db_embedded_field.db_list_field.2.db_list_field": 1},
)
def test_delta_for_dynamic_documents(self):
class Person(DynamicDocument):
name = StringField()
meta = {"allow_inheritance": True}
Person.drop_collection()
p = Person(name="James", age=34)
assert p._delta() == (
SON([("_cls", "Person"), ("name", "James"), ("age", 34)]),
{},
)
p.doc = 123
del p.doc
assert p._delta() == (
SON([("_cls", "Person"), ("name", "James"), ("age", 34)]),
{},
)
p = Person()
p.name = "Dean"
p.age = 22
p.save()
p.age = 24
assert p.age == 24
assert p._get_changed_fields() == ["age"]
assert p._delta() == ({"age": 24}, {})
p = Person.objects(age=22).get()
p.age = 24
assert p.age == 24
assert p._get_changed_fields() == ["age"]
assert p._delta() == ({"age": 24}, {})
p.save()
assert 1 == Person.objects(age=24).count()
def test_dynamic_delta(self):
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["string_field"]
assert doc._delta() == ({"string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["int_field"]
assert doc._delta() == ({"int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({"dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({"list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({}, {"dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({}, {"list_field": 1})
def test_delta_with_dbref_true(self):
person, organization, employee = self.circular_reference_deltas_2(
Document, Document, True
)
employee.name = "test"
assert organization._get_changed_fields() == []
updates, removals = organization._delta()
assert removals == {}
assert updates == {}
organization.employees.append(person)
updates, removals = organization._delta()
assert removals == {}
assert "employees" in updates
def test_delta_with_dbref_false(self):
person, organization, employee = self.circular_reference_deltas_2(
Document, Document, False
)
employee.name = "test"
assert organization._get_changed_fields() == []
updates, removals = organization._delta()
assert removals == {}
assert updates == {}
organization.employees.append(person)
updates, removals = organization._delta()
assert removals == {}
assert "employees" in updates
def test_nested_nested_fields_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(MapField(EmbeddedDocumentField(EmbeddedDoc)))
name = StringField()
MyDoc.drop_collection()
MyDoc(name="testcase1", subs={"a": {"b": EmbeddedDoc(name="foo")}}).save()
mydoc = MyDoc.objects.first()
subdoc = mydoc.subs["a"]["b"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a.b.name"]
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_nested_nested_fields_db_field_set__gets_mark_as_changed_and_cleaned(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField(db_field="db_name")
class MyDoc(Document):
embed = EmbeddedDocumentField(EmbeddedDoc, db_field="db_embed")
name = StringField(db_field="db_name")
MyDoc.drop_collection()
MyDoc(name="testcase1", embed=EmbeddedDoc(name="foo")).save()
mydoc = MyDoc.objects.first()
mydoc.embed.name = "foo1"
assert mydoc.embed._get_changed_fields() == ["db_name"]
assert mydoc._get_changed_fields() == ["db_embed.db_name"]
mydoc = MyDoc.objects.first()
embed = EmbeddedDoc(name="foo2")
embed.name = "bar"
mydoc.embed = embed
assert embed._get_changed_fields() == ["db_name"]
assert mydoc._get_changed_fields() == ["db_embed"]
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_lower_level_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(EmbeddedDocumentField(EmbeddedDoc))
MyDoc.drop_collection()
MyDoc().save()
mydoc = MyDoc.objects.first()
mydoc.subs["a"] = EmbeddedDoc()
assert mydoc._get_changed_fields() == ["subs.a"]
subdoc = mydoc.subs["a"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a"]
mydoc.save()
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_upper_level_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(EmbeddedDocumentField(EmbeddedDoc))
MyDoc.drop_collection()
MyDoc(subs={"a": EmbeddedDoc(name="foo")}).save()
mydoc = MyDoc.objects.first()
subdoc = mydoc.subs["a"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a.name"]
mydoc.subs["a"] = EmbeddedDoc()
assert mydoc._get_changed_fields() == ["subs.a"]
mydoc.save()
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_referenced_object_changed_attributes(self):
"""Ensures that when you save a new reference to a field, the referenced object isn't altered"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
org = ReferenceField("Organization", required=True)
Organization.drop_collection()
User.drop_collection()
org1 = Organization(name="Org 1")
org1.save()
org2 = Organization(name="Org 2")
org2.save()
user = User(name="Fred", org=org1)
user.save()
org1.reload()
org2.reload()
user.reload()
assert org1.name == "Org 1"
assert org2.name == "Org 2"
assert user.name == "Fred"
user.name = "Harold"
user.org = org2
org2.name = "New Org 2"
assert org2.name == "New Org 2"
user.save()
org2.save()
assert org2.name == "New Org 2"
org2.reload()
assert org2.name == "New Org 2"
def test_delta_for_nested_map_fields(self):
class UInfoDocument(Document):
phone = StringField()
class EmbeddedRole(EmbeddedDocument):
type = StringField()
class EmbeddedUser(EmbeddedDocument):
name = StringField()
roles = MapField(field=EmbeddedDocumentField(EmbeddedRole))
rolist = ListField(field=EmbeddedDocumentField(EmbeddedRole))
info = ReferenceField(UInfoDocument)
class Doc(Document):
users = MapField(field=EmbeddedDocumentField(EmbeddedUser))
num = IntField(default=-1)
Doc.drop_collection()
doc = Doc(num=1)
doc.users["007"] = EmbeddedUser(name="Agent007")
doc.save()
uinfo = UInfoDocument(phone="79089269066")
uinfo.save()
d = Doc.objects(num=1).first()
d.users["007"]["roles"]["666"] = EmbeddedRole(type="superadmin")
d.users["007"]["rolist"].append(EmbeddedRole(type="oops"))
d.users["007"]["info"] = uinfo
delta = d._delta()
assert True == ("users.007.roles.666" in delta[0])
assert True == ("users.007.rolist" in delta[0])
assert True == ("users.007.info" in delta[0])
assert "superadmin" == delta[0]["users.007.roles.666"]["type"]
assert "oops" == delta[0]["users.007.rolist"][0]["type"]
assert uinfo.id == delta[0]["users.007.info"]
if __name__ == "__main__":
unittest.main() | tests/document/test_delta.py | import unittest
from bson import SON
from mongoengine import *
from mongoengine.pymongo_support import list_collection_names
from tests.utils import MongoDBTestCase
class TestDelta(MongoDBTestCase):
def setUp(self):
super(TestDelta, self).setUp()
class Person(Document):
name = StringField()
age = IntField()
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
def tearDown(self):
for collection in list_collection_names(self.db):
self.db.drop_collection(collection)
def test_delta(self):
self.delta(Document)
self.delta(DynamicDocument)
@staticmethod
def delta(DocClass):
class Doc(DocClass):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["string_field"]
assert doc._delta() == ({"string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["int_field"]
assert doc._delta() == ({"int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({"dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({"list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({}, {"dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({}, {"list_field": 1})
def test_delta_recursive(self):
self.delta_recursive(Document, EmbeddedDocument)
self.delta_recursive(DynamicDocument, EmbeddedDocument)
self.delta_recursive(Document, DynamicEmbeddedDocument)
self.delta_recursive(DynamicDocument, DynamicEmbeddedDocument)
def delta_recursive(self, DocClass, EmbeddedClass):
class Embedded(EmbeddedClass):
id = StringField()
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
class Doc(DocClass):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
embedded_1 = Embedded()
embedded_1.id = "010101"
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_1.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field = embedded_1
assert doc._get_changed_fields() == ["embedded_field"]
embedded_delta = {
"id": "010101",
"string_field": "hello",
"int_field": 1,
"dict_field": {"hello": "world"},
"list_field": ["1", 2, {"hello": "world"}],
}
assert doc.embedded_field._delta() == (embedded_delta, {})
assert doc._delta() == ({"embedded_field": embedded_delta}, {})
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
assert doc._get_changed_fields() == ["embedded_field.dict_field"]
assert doc.embedded_field._delta() == ({}, {"dict_field": 1})
assert doc._delta() == ({}, {"embedded_field.dict_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.dict_field == {}
doc.embedded_field.list_field = []
assert doc._get_changed_fields() == ["embedded_field.list_field"]
assert doc.embedded_field._delta() == ({}, {"list_field": 1})
assert doc._delta() == ({}, {"embedded_field.list_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field == []
embedded_2 = Embedded()
embedded_2.string_field = "hello"
embedded_2.int_field = 1
embedded_2.dict_field = {"hello": "world"}
embedded_2.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field.list_field = ["1", 2, embedded_2]
assert doc._get_changed_fields() == ["embedded_field.list_field"]
assert doc.embedded_field._delta() == (
{
"list_field": [
"1",
2,
{
"_cls": "Embedded",
"string_field": "hello",
"dict_field": {"hello": "world"},
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
assert doc._delta() == (
{
"embedded_field.list_field": [
"1",
2,
{
"_cls": "Embedded",
"string_field": "hello",
"dict_field": {"hello": "world"},
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[0] == "1"
assert doc.embedded_field.list_field[1] == 2
for k in doc.embedded_field.list_field[2]._fields:
assert doc.embedded_field.list_field[2][k] == embedded_2[k]
doc.embedded_field.list_field[2].string_field = "world"
assert doc._get_changed_fields() == ["embedded_field.list_field.2.string_field"]
assert doc.embedded_field._delta() == (
{"list_field.2.string_field": "world"},
{},
)
assert doc._delta() == (
{"embedded_field.list_field.2.string_field": "world"},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "world"
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = "hello world"
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
assert doc._get_changed_fields() == ["embedded_field.list_field.2"]
assert doc.embedded_field._delta() == (
{
"list_field.2": {
"_cls": "Embedded",
"string_field": "hello world",
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
"dict_field": {"hello": "world"},
}
},
{},
)
assert doc._delta() == (
{
"embedded_field.list_field.2": {
"_cls": "Embedded",
"string_field": "hello world",
"int_field": 1,
"list_field": ["1", 2, {"hello": "world"}],
"dict_field": {"hello": "world"},
}
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "hello world"
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
assert doc._delta() == (
{"embedded_field.list_field.2.list_field": [2, {"hello": "world"}]},
{},
)
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
assert doc._delta() == (
{"embedded_field.list_field.2.list_field": [2, {"hello": "world"}, 1]},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [2, {"hello": "world"}, 1]
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [1, 2, {"hello": "world"}]
del doc.embedded_field.list_field[2].list_field[2]["hello"]
assert doc._delta() == (
{},
{"embedded_field.list_field.2.list_field.2.hello": 1},
)
doc.save()
doc = doc.reload(10)
del doc.embedded_field.list_field[2].list_field
assert doc._delta() == ({}, {"embedded_field.list_field.2.list_field": 1})
doc.save()
doc = doc.reload(10)
doc.dict_field["Embedded"] = embedded_1
doc.save()
doc = doc.reload(10)
doc.dict_field["Embedded"].string_field = "Hello World"
assert doc._get_changed_fields() == ["dict_field.Embedded.string_field"]
assert doc._delta() == ({"dict_field.Embedded.string_field": "Hello World"}, {})
def test_circular_reference_deltas(self):
self.circular_reference_deltas(Document, Document)
self.circular_reference_deltas(Document, DynamicDocument)
self.circular_reference_deltas(DynamicDocument, Document)
self.circular_reference_deltas(DynamicDocument, DynamicDocument)
def circular_reference_deltas(self, DocClass1, DocClass2):
class Person(DocClass1):
name = StringField()
owns = ListField(ReferenceField("Organization"))
class Organization(DocClass2):
name = StringField()
owner = ReferenceField("Person")
Person.drop_collection()
Organization.drop_collection()
person = Person(name="owner").save()
organization = Organization(name="company").save()
person.owns.append(organization)
organization.owner = person
person.save()
organization.save()
p = Person.objects[0].select_related()
o = Organization.objects.first()
assert p.owns[0] == o
assert o.owner == p
def test_circular_reference_deltas_2(self):
self.circular_reference_deltas_2(Document, Document)
self.circular_reference_deltas_2(Document, DynamicDocument)
self.circular_reference_deltas_2(DynamicDocument, Document)
self.circular_reference_deltas_2(DynamicDocument, DynamicDocument)
def circular_reference_deltas_2(self, DocClass1, DocClass2, dbref=True):
class Person(DocClass1):
name = StringField()
owns = ListField(ReferenceField("Organization", dbref=dbref))
employer = ReferenceField("Organization", dbref=dbref)
class Organization(DocClass2):
name = StringField()
owner = ReferenceField("Person", dbref=dbref)
employees = ListField(ReferenceField("Person", dbref=dbref))
Person.drop_collection()
Organization.drop_collection()
person = Person(name="owner").save()
employee = Person(name="employee").save()
organization = Organization(name="company").save()
person.owns.append(organization)
organization.owner = person
organization.employees.append(employee)
employee.employer = organization
person.save()
organization.save()
employee.save()
p = Person.objects.get(name="owner")
e = Person.objects.get(name="employee")
o = Organization.objects.first()
assert p.owns[0] == o
assert o.owner == p
assert e.employer == o
return person, organization, employee
def test_delta_db_field(self):
self.delta_db_field(Document)
self.delta_db_field(DynamicDocument)
def delta_db_field(self, DocClass):
class Doc(DocClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["db_string_field"]
assert doc._delta() == ({"db_string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["db_int_field"]
assert doc._delta() == ({"db_int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["db_dict_field"]
assert doc._delta() == ({"db_dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["db_list_field"]
assert doc._delta() == ({"db_list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["db_dict_field"]
assert doc._delta() == ({}, {"db_dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["db_list_field"]
assert doc._delta() == ({}, {"db_list_field": 1})
# Test it saves that data
doc = Doc()
doc.save()
doc.string_field = "hello"
doc.int_field = 1
doc.dict_field = {"hello": "world"}
doc.list_field = ["1", 2, {"hello": "world"}]
doc.save()
doc = doc.reload(10)
assert doc.string_field == "hello"
assert doc.int_field == 1
assert doc.dict_field == {"hello": "world"}
assert doc.list_field == ["1", 2, {"hello": "world"}]
def test_delta_recursive_db_field_on_doc_and_embeddeddoc(self):
self.delta_recursive_db_field(Document, EmbeddedDocument)
def test_delta_recursive_db_field_on_doc_and_dynamicembeddeddoc(self):
self.delta_recursive_db_field(Document, DynamicEmbeddedDocument)
def test_delta_recursive_db_field_on_dynamicdoc_and_embeddeddoc(self):
self.delta_recursive_db_field(DynamicDocument, EmbeddedDocument)
def test_delta_recursive_db_field_on_dynamicdoc_and_dynamicembeddeddoc(self):
self.delta_recursive_db_field(DynamicDocument, DynamicEmbeddedDocument)
@staticmethod
def delta_recursive_db_field(DocClass, EmbeddedClass):
class Embedded(EmbeddedClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
class Doc(DocClass):
string_field = StringField(db_field="db_string_field")
int_field = IntField(db_field="db_int_field")
dict_field = DictField(db_field="db_dict_field")
list_field = ListField(db_field="db_list_field")
embedded_field = EmbeddedDocumentField(
Embedded, db_field="db_embedded_field"
)
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
embedded_1 = Embedded()
embedded_1.string_field = "hello"
embedded_1.int_field = 1
embedded_1.dict_field = {"hello": "world"}
embedded_1.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field = embedded_1
assert doc._get_changed_fields() == ["db_embedded_field"]
embedded_delta = {
"db_string_field": "hello",
"db_int_field": 1,
"db_dict_field": {"hello": "world"},
"db_list_field": ["1", 2, {"hello": "world"}],
}
assert doc.embedded_field._delta() == (embedded_delta, {})
assert doc._delta() == ({"db_embedded_field": embedded_delta}, {})
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
assert doc._get_changed_fields() == ["db_embedded_field.db_dict_field"]
assert doc.embedded_field._delta() == ({}, {"db_dict_field": 1})
assert doc._delta() == ({}, {"db_embedded_field.db_dict_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.dict_field == {}
assert doc._get_changed_fields() == []
doc.embedded_field.list_field = []
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field"]
assert doc.embedded_field._delta() == ({}, {"db_list_field": 1})
assert doc._delta() == ({}, {"db_embedded_field.db_list_field": 1})
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field == []
embedded_2 = Embedded()
embedded_2.string_field = "hello"
embedded_2.int_field = 1
embedded_2.dict_field = {"hello": "world"}
embedded_2.list_field = ["1", 2, {"hello": "world"}]
doc.embedded_field.list_field = ["1", 2, embedded_2]
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field"]
assert doc.embedded_field._delta() == (
{
"db_list_field": [
"1",
2,
{
"_cls": "Embedded",
"db_string_field": "hello",
"db_dict_field": {"hello": "world"},
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
assert doc._delta() == (
{
"db_embedded_field.db_list_field": [
"1",
2,
{
"_cls": "Embedded",
"db_string_field": "hello",
"db_dict_field": {"hello": "world"},
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
},
]
},
{},
)
doc.save()
assert doc._get_changed_fields() == []
doc = doc.reload(10)
assert doc.embedded_field.list_field[0] == "1"
assert doc.embedded_field.list_field[1] == 2
for k in doc.embedded_field.list_field[2]._fields:
assert doc.embedded_field.list_field[2][k] == embedded_2[k]
doc.embedded_field.list_field[2].string_field = "world"
assert doc._get_changed_fields() == [
"db_embedded_field.db_list_field.2.db_string_field"
]
assert doc.embedded_field._delta() == (
{"db_list_field.2.db_string_field": "world"},
{},
)
assert doc._delta() == (
{"db_embedded_field.db_list_field.2.db_string_field": "world"},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "world"
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = "hello world"
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
assert doc._get_changed_fields() == ["db_embedded_field.db_list_field.2"]
assert doc.embedded_field._delta() == (
{
"db_list_field.2": {
"_cls": "Embedded",
"db_string_field": "hello world",
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
"db_dict_field": {"hello": "world"},
}
},
{},
)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2": {
"_cls": "Embedded",
"db_string_field": "hello world",
"db_int_field": 1,
"db_list_field": ["1", 2, {"hello": "world"}],
"db_dict_field": {"hello": "world"},
}
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].string_field == "hello world"
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2.db_list_field": [
2,
{"hello": "world"},
]
},
{},
)
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
assert doc._delta() == (
{
"db_embedded_field.db_list_field.2.db_list_field": [
2,
{"hello": "world"},
1,
]
},
{},
)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [2, {"hello": "world"}, 1]
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
assert doc.embedded_field.list_field[2].list_field == [1, 2, {"hello": "world"}]
del doc.embedded_field.list_field[2].list_field[2]["hello"]
assert doc._delta() == (
{},
{"db_embedded_field.db_list_field.2.db_list_field.2.hello": 1},
)
doc.save()
doc = doc.reload(10)
assert doc._delta() == ({}, {},)
del doc.embedded_field.list_field[2].list_field
assert doc._delta() == (
{},
{"db_embedded_field.db_list_field.2.db_list_field": 1},
)
def test_delta_for_dynamic_documents(self):
class Person(DynamicDocument):
name = StringField()
meta = {"allow_inheritance": True}
Person.drop_collection()
p = Person(name="James", age=34)
assert p._delta() == (
SON([("_cls", "Person"), ("name", "James"), ("age", 34)]),
{},
)
p.doc = 123
del p.doc
assert p._delta() == (
SON([("_cls", "Person"), ("name", "James"), ("age", 34)]),
{},
)
p = Person()
p.name = "Dean"
p.age = 22
p.save()
p.age = 24
assert p.age == 24
assert p._get_changed_fields() == ["age"]
assert p._delta() == ({"age": 24}, {})
p = Person.objects(age=22).get()
p.age = 24
assert p.age == 24
assert p._get_changed_fields() == ["age"]
assert p._delta() == ({"age": 24}, {})
p.save()
assert 1 == Person.objects(age=24).count()
def test_dynamic_delta(self):
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
assert doc._get_changed_fields() == []
assert doc._delta() == ({}, {})
doc.string_field = "hello"
assert doc._get_changed_fields() == ["string_field"]
assert doc._delta() == ({"string_field": "hello"}, {})
doc._changed_fields = []
doc.int_field = 1
assert doc._get_changed_fields() == ["int_field"]
assert doc._delta() == ({"int_field": 1}, {})
doc._changed_fields = []
dict_value = {"hello": "world", "ping": "pong"}
doc.dict_field = dict_value
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({"dict_field": dict_value}, {})
doc._changed_fields = []
list_value = ["1", 2, {"hello": "world"}]
doc.list_field = list_value
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({"list_field": list_value}, {})
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
assert doc._get_changed_fields() == ["dict_field"]
assert doc._delta() == ({}, {"dict_field": 1})
doc._changed_fields = []
doc.list_field = []
assert doc._get_changed_fields() == ["list_field"]
assert doc._delta() == ({}, {"list_field": 1})
def test_delta_with_dbref_true(self):
person, organization, employee = self.circular_reference_deltas_2(
Document, Document, True
)
employee.name = "test"
assert organization._get_changed_fields() == []
updates, removals = organization._delta()
assert removals == {}
assert updates == {}
organization.employees.append(person)
updates, removals = organization._delta()
assert removals == {}
assert "employees" in updates
def test_delta_with_dbref_false(self):
person, organization, employee = self.circular_reference_deltas_2(
Document, Document, False
)
employee.name = "test"
assert organization._get_changed_fields() == []
updates, removals = organization._delta()
assert removals == {}
assert updates == {}
organization.employees.append(person)
updates, removals = organization._delta()
assert removals == {}
assert "employees" in updates
def test_nested_nested_fields_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(MapField(EmbeddedDocumentField(EmbeddedDoc)))
name = StringField()
MyDoc.drop_collection()
MyDoc(name="testcase1", subs={"a": {"b": EmbeddedDoc(name="foo")}}).save()
mydoc = MyDoc.objects.first()
subdoc = mydoc.subs["a"]["b"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a.b.name"]
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_nested_nested_fields_db_field_set__gets_mark_as_changed_and_cleaned(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField(db_field="db_name")
class MyDoc(Document):
embed = EmbeddedDocumentField(EmbeddedDoc, db_field="db_embed")
name = StringField(db_field="db_name")
MyDoc.drop_collection()
MyDoc(name="testcase1", embed=EmbeddedDoc(name="foo")).save()
mydoc = MyDoc.objects.first()
mydoc.embed.name = "foo1"
assert mydoc.embed._get_changed_fields() == ["db_name"]
assert mydoc._get_changed_fields() == ["db_embed.db_name"]
mydoc = MyDoc.objects.first()
embed = EmbeddedDoc(name="foo2")
embed.name = "bar"
mydoc.embed = embed
assert embed._get_changed_fields() == ["db_name"]
assert mydoc._get_changed_fields() == ["db_embed"]
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_lower_level_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(EmbeddedDocumentField(EmbeddedDoc))
MyDoc.drop_collection()
MyDoc().save()
mydoc = MyDoc.objects.first()
mydoc.subs["a"] = EmbeddedDoc()
assert mydoc._get_changed_fields() == ["subs.a"]
subdoc = mydoc.subs["a"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a"]
mydoc.save()
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_upper_level_mark_as_changed(self):
class EmbeddedDoc(EmbeddedDocument):
name = StringField()
class MyDoc(Document):
subs = MapField(EmbeddedDocumentField(EmbeddedDoc))
MyDoc.drop_collection()
MyDoc(subs={"a": EmbeddedDoc(name="foo")}).save()
mydoc = MyDoc.objects.first()
subdoc = mydoc.subs["a"]
subdoc.name = "bar"
assert subdoc._get_changed_fields() == ["name"]
assert mydoc._get_changed_fields() == ["subs.a.name"]
mydoc.subs["a"] = EmbeddedDoc()
assert mydoc._get_changed_fields() == ["subs.a"]
mydoc.save()
mydoc._clear_changed_fields()
assert mydoc._get_changed_fields() == []
def test_referenced_object_changed_attributes(self):
"""Ensures that when you save a new reference to a field, the referenced object isn't altered"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
org = ReferenceField("Organization", required=True)
Organization.drop_collection()
User.drop_collection()
org1 = Organization(name="Org 1")
org1.save()
org2 = Organization(name="Org 2")
org2.save()
user = User(name="Fred", org=org1)
user.save()
org1.reload()
org2.reload()
user.reload()
assert org1.name == "Org 1"
assert org2.name == "Org 2"
assert user.name == "Fred"
user.name = "Harold"
user.org = org2
org2.name = "New Org 2"
assert org2.name == "New Org 2"
user.save()
org2.save()
assert org2.name == "New Org 2"
org2.reload()
assert org2.name == "New Org 2"
def test_delta_for_nested_map_fields(self):
class UInfoDocument(Document):
phone = StringField()
class EmbeddedRole(EmbeddedDocument):
type = StringField()
class EmbeddedUser(EmbeddedDocument):
name = StringField()
roles = MapField(field=EmbeddedDocumentField(EmbeddedRole))
rolist = ListField(field=EmbeddedDocumentField(EmbeddedRole))
info = ReferenceField(UInfoDocument)
class Doc(Document):
users = MapField(field=EmbeddedDocumentField(EmbeddedUser))
num = IntField(default=-1)
Doc.drop_collection()
doc = Doc(num=1)
doc.users["007"] = EmbeddedUser(name="Agent007")
doc.save()
uinfo = UInfoDocument(phone="79089269066")
uinfo.save()
d = Doc.objects(num=1).first()
d.users["007"]["roles"]["666"] = EmbeddedRole(type="superadmin")
d.users["007"]["rolist"].append(EmbeddedRole(type="oops"))
d.users["007"]["info"] = uinfo
delta = d._delta()
assert True == ("users.007.roles.666" in delta[0])
assert True == ("users.007.rolist" in delta[0])
assert True == ("users.007.info" in delta[0])
assert "superadmin" == delta[0]["users.007.roles.666"]["type"]
assert "oops" == delta[0]["users.007.rolist"][0]["type"]
assert uinfo.id == delta[0]["users.007.info"]
if __name__ == "__main__":
unittest.main() | 0.584864 | 0.429549 |
import socket
from test import resolvesLocalhostFQDN
from unittest.mock import patch
import pytest
from urllib3 import connection_from_url
from urllib3.exceptions import ClosedPoolError, LocationValueError
from urllib3.poolmanager import PoolKey, PoolManager, key_fn_by_scheme
from urllib3.util import retry, timeout
class TestPoolManager:
@resolvesLocalhostFQDN()
def test_same_url(self):
# Convince ourselves that normally we don't get the same object
conn1 = connection_from_url("http://localhost:8081/foo")
conn2 = connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
# Now try again using the PoolManager
p = PoolManager(1)
conn1 = p.connection_from_url("http://localhost:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 == conn2
# Ensure that FQDNs are handled separately from relative domains
p = PoolManager(2)
conn1 = p.connection_from_url("http://localhost.:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
def test_many_urls(self):
urls = [
"http://localhost:8081/foo",
"http://www.google.com/mail",
"http://localhost:8081/bar",
"https://www.google.com/",
"https://www.google.com/mail",
"http://yahoo.com",
"http://bing.com",
"http://yahoo.com/",
]
connections = set()
p = PoolManager(10)
for url in urls:
conn = p.connection_from_url(url)
connections.add(conn)
assert len(connections) == 5
def test_manager_clear(self):
p = PoolManager(5)
conn_pool = p.connection_from_url("http://google.com")
assert len(p.pools) == 1
conn = conn_pool._get_conn()
p.clear()
assert len(p.pools) == 0
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
conn_pool._put_conn(conn)
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
assert len(p.pools) == 0
@pytest.mark.parametrize("url", ["http://@", None])
def test_nohost(self, url):
p = PoolManager(5)
with pytest.raises(LocationValueError):
p.connection_from_url(url=url)
def test_contextmanager(self):
with PoolManager(1) as p:
conn_pool = p.connection_from_url("http://google.com")
assert len(p.pools) == 1
conn = conn_pool._get_conn()
assert len(p.pools) == 0
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
conn_pool._put_conn(conn)
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
assert len(p.pools) == 0
def test_http_pool_key_fields(self):
"""Assert the HTTPPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
}
p = PoolManager()
conn_pools = [
p.connection_from_url("http://example.com/"),
p.connection_from_url("http://example.com:8000/"),
p.connection_from_url("http://other.example.com/"),
]
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("http://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_pool_key_fields(self):
"""Assert the HTTPSPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
}
p = PoolManager()
conn_pools = [
p.connection_from_url("https://example.com/"),
p.connection_from_url("https://example.com:4333/"),
p.connection_from_url("https://other.example.com/"),
]
# Asking for a connection pool with the same key should give us an
# existing pool.
dup_pools = []
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("https://example.com/"))
dup_pools.append(p.connection_from_url("https://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(pool in conn_pools for pool in dup_pools)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_default_pool_key_funcs_copy(self):
"""Assert each PoolManager gets a copy of ``pool_keys_by_scheme``."""
p = PoolManager()
assert p.key_fn_by_scheme == p.key_fn_by_scheme
assert p.key_fn_by_scheme is not key_fn_by_scheme
def test_pools_keyed_with_from_host(self):
"""Assert pools are still keyed correctly with connection_from_host."""
ssl_kw = {
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
}
p = PoolManager(5, **ssl_kw)
conns = [p.connection_from_host("example.com", 443, scheme="https")]
for k in ssl_kw:
p.connection_pool_kw[k] = "newval"
conns.append(p.connection_from_host("example.com", 443, scheme="https"))
assert all(
x is not y
for i, x in enumerate(conns)
for j, y in enumerate(conns)
if i != j
)
def test_https_connection_from_url_case_insensitive(self):
"""Assert scheme case is ignored when pooling HTTPS connections."""
p = PoolManager()
pool = p.connection_from_url("https://example.com/")
other_pool = p.connection_from_url("HTTPS://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_host_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="https")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTPS")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_context_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "https", "host": "example.com", "port": "443"}
other_context = {"scheme": "HTTPS", "host": "EXAMPLE.COM", "port": "443"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_url_case_insensitive(self):
"""Assert scheme case is ignored when pooling HTTP connections."""
p = PoolManager()
pool = p.connection_from_url("http://example.com/")
other_pool = p.connection_from_url("HTTP://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_host_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="http")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTP")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_assert_hostname_and_fingerprint_flag(self):
"""Assert that pool manager can accept hostname and fingerprint flags."""
fingerprint = "92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
p = PoolManager(assert_hostname=True, assert_fingerprint=fingerprint)
pool = p.connection_from_url("https://example.com/")
assert 1 == len(p.pools)
assert pool.assert_hostname
assert fingerprint == pool.assert_fingerprint
def test_http_connection_from_context_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "http", "host": "example.com", "port": "8080"}
other_context = {"scheme": "HTTP", "host": "EXAMPLE.COM", "port": "8080"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
@patch("urllib3.poolmanager.PoolManager.connection_from_pool_key")
def test_connection_from_context_strict_param(self, connection_from_pool_key):
p = PoolManager()
context = {
"scheme": "http",
"host": "example.com",
"port": 8080,
"strict": True,
}
with pytest.warns(DeprecationWarning) as records:
p.connection_from_context(context)
msg = (
"The 'strict' parameter is no longer needed on Python 3+. "
"This will raise an error in urllib3 v3.0.0."
)
assert any(record.message.args[0] == msg for record in records)
_, kwargs = connection_from_pool_key.call_args
assert kwargs["request_context"] == {
"scheme": "http",
"host": "example.com",
"port": 8080,
}
def test_custom_pool_key(self):
"""Assert it is possible to define a custom key function."""
p = PoolManager(10)
p.key_fn_by_scheme["http"] = lambda x: tuple(x["key"])
pool1 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value"}
)
pool2 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "other"}
)
pool3 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value", "x": "y"}
)
assert 2 == len(p.pools)
assert pool1 is pool3
assert pool1 is not pool2
def test_override_pool_kwargs_url(self):
"""Assert overriding pool kwargs works with connection_from_url."""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_url("http://example.com/")
override_pool = p.connection_from_url(
"http://example.com/", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_override_pool_kwargs_host(self):
"""Assert overriding pool kwargs works with connection_from_host"""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_pool_kwargs_socket_options(self):
"""Assert passing socket options works with connection_from_host"""
p = PoolManager(socket_options=[])
override_opts = [
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
]
pool_kwargs = {"socket_options": override_opts}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert default_pool.conn_kw["socket_options"] == []
assert override_pool.conn_kw["socket_options"] == override_opts
def test_merge_pool_kwargs(self):
"""Assert _merge_pool_kwargs works in the happy case"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"new_key": "value"})
assert {"retries": 100, "new_key": "value"} == merged
def test_merge_pool_kwargs_none(self):
"""Assert false-y values to _merge_pool_kwargs result in defaults"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({})
assert p.connection_pool_kw == merged
merged = p._merge_pool_kwargs(None)
assert p.connection_pool_kw == merged
def test_merge_pool_kwargs_remove_key(self):
"""Assert keys can be removed with _merge_pool_kwargs"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"retries": None})
assert "retries" not in merged
def test_merge_pool_kwargs_invalid_key(self):
"""Assert removing invalid keys with _merge_pool_kwargs doesn't break"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"invalid_key": None})
assert p.connection_pool_kw == merged
def test_pool_manager_no_url_absolute_form(self):
"""Valides we won't send a request with absolute form without a proxy"""
p = PoolManager()
assert p._proxy_requires_url_absolute_form("http://example.com") is False
assert p._proxy_requires_url_absolute_form("https://example.com") is False | test/test_poolmanager.py | import socket
from test import resolvesLocalhostFQDN
from unittest.mock import patch
import pytest
from urllib3 import connection_from_url
from urllib3.exceptions import ClosedPoolError, LocationValueError
from urllib3.poolmanager import PoolKey, PoolManager, key_fn_by_scheme
from urllib3.util import retry, timeout
class TestPoolManager:
@resolvesLocalhostFQDN()
def test_same_url(self):
# Convince ourselves that normally we don't get the same object
conn1 = connection_from_url("http://localhost:8081/foo")
conn2 = connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
# Now try again using the PoolManager
p = PoolManager(1)
conn1 = p.connection_from_url("http://localhost:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 == conn2
# Ensure that FQDNs are handled separately from relative domains
p = PoolManager(2)
conn1 = p.connection_from_url("http://localhost.:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
def test_many_urls(self):
urls = [
"http://localhost:8081/foo",
"http://www.google.com/mail",
"http://localhost:8081/bar",
"https://www.google.com/",
"https://www.google.com/mail",
"http://yahoo.com",
"http://bing.com",
"http://yahoo.com/",
]
connections = set()
p = PoolManager(10)
for url in urls:
conn = p.connection_from_url(url)
connections.add(conn)
assert len(connections) == 5
def test_manager_clear(self):
p = PoolManager(5)
conn_pool = p.connection_from_url("http://google.com")
assert len(p.pools) == 1
conn = conn_pool._get_conn()
p.clear()
assert len(p.pools) == 0
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
conn_pool._put_conn(conn)
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
assert len(p.pools) == 0
@pytest.mark.parametrize("url", ["http://@", None])
def test_nohost(self, url):
p = PoolManager(5)
with pytest.raises(LocationValueError):
p.connection_from_url(url=url)
def test_contextmanager(self):
with PoolManager(1) as p:
conn_pool = p.connection_from_url("http://google.com")
assert len(p.pools) == 1
conn = conn_pool._get_conn()
assert len(p.pools) == 0
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
conn_pool._put_conn(conn)
with pytest.raises(ClosedPoolError):
conn_pool._get_conn()
assert len(p.pools) == 0
def test_http_pool_key_fields(self):
"""Assert the HTTPPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
}
p = PoolManager()
conn_pools = [
p.connection_from_url("http://example.com/"),
p.connection_from_url("http://example.com:8000/"),
p.connection_from_url("http://other.example.com/"),
]
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("http://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_pool_key_fields(self):
"""Assert the HTTPSPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
}
p = PoolManager()
conn_pools = [
p.connection_from_url("https://example.com/"),
p.connection_from_url("https://example.com:4333/"),
p.connection_from_url("https://other.example.com/"),
]
# Asking for a connection pool with the same key should give us an
# existing pool.
dup_pools = []
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("https://example.com/"))
dup_pools.append(p.connection_from_url("https://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(pool in conn_pools for pool in dup_pools)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_default_pool_key_funcs_copy(self):
"""Assert each PoolManager gets a copy of ``pool_keys_by_scheme``."""
p = PoolManager()
assert p.key_fn_by_scheme == p.key_fn_by_scheme
assert p.key_fn_by_scheme is not key_fn_by_scheme
def test_pools_keyed_with_from_host(self):
"""Assert pools are still keyed correctly with connection_from_host."""
ssl_kw = {
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
}
p = PoolManager(5, **ssl_kw)
conns = [p.connection_from_host("example.com", 443, scheme="https")]
for k in ssl_kw:
p.connection_pool_kw[k] = "newval"
conns.append(p.connection_from_host("example.com", 443, scheme="https"))
assert all(
x is not y
for i, x in enumerate(conns)
for j, y in enumerate(conns)
if i != j
)
def test_https_connection_from_url_case_insensitive(self):
"""Assert scheme case is ignored when pooling HTTPS connections."""
p = PoolManager()
pool = p.connection_from_url("https://example.com/")
other_pool = p.connection_from_url("HTTPS://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_host_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="https")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTPS")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_context_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "https", "host": "example.com", "port": "443"}
other_context = {"scheme": "HTTPS", "host": "EXAMPLE.COM", "port": "443"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_url_case_insensitive(self):
"""Assert scheme case is ignored when pooling HTTP connections."""
p = PoolManager()
pool = p.connection_from_url("http://example.com/")
other_pool = p.connection_from_url("HTTP://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_host_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="http")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTP")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_assert_hostname_and_fingerprint_flag(self):
"""Assert that pool manager can accept hostname and fingerprint flags."""
fingerprint = "92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
p = PoolManager(assert_hostname=True, assert_fingerprint=fingerprint)
pool = p.connection_from_url("https://example.com/")
assert 1 == len(p.pools)
assert pool.assert_hostname
assert fingerprint == pool.assert_fingerprint
def test_http_connection_from_context_case_insensitive(self):
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "http", "host": "example.com", "port": "8080"}
other_context = {"scheme": "HTTP", "host": "EXAMPLE.COM", "port": "8080"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
@patch("urllib3.poolmanager.PoolManager.connection_from_pool_key")
def test_connection_from_context_strict_param(self, connection_from_pool_key):
p = PoolManager()
context = {
"scheme": "http",
"host": "example.com",
"port": 8080,
"strict": True,
}
with pytest.warns(DeprecationWarning) as records:
p.connection_from_context(context)
msg = (
"The 'strict' parameter is no longer needed on Python 3+. "
"This will raise an error in urllib3 v3.0.0."
)
assert any(record.message.args[0] == msg for record in records)
_, kwargs = connection_from_pool_key.call_args
assert kwargs["request_context"] == {
"scheme": "http",
"host": "example.com",
"port": 8080,
}
def test_custom_pool_key(self):
"""Assert it is possible to define a custom key function."""
p = PoolManager(10)
p.key_fn_by_scheme["http"] = lambda x: tuple(x["key"])
pool1 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value"}
)
pool2 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "other"}
)
pool3 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value", "x": "y"}
)
assert 2 == len(p.pools)
assert pool1 is pool3
assert pool1 is not pool2
def test_override_pool_kwargs_url(self):
"""Assert overriding pool kwargs works with connection_from_url."""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_url("http://example.com/")
override_pool = p.connection_from_url(
"http://example.com/", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_override_pool_kwargs_host(self):
"""Assert overriding pool kwargs works with connection_from_host"""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_pool_kwargs_socket_options(self):
"""Assert passing socket options works with connection_from_host"""
p = PoolManager(socket_options=[])
override_opts = [
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
]
pool_kwargs = {"socket_options": override_opts}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert default_pool.conn_kw["socket_options"] == []
assert override_pool.conn_kw["socket_options"] == override_opts
def test_merge_pool_kwargs(self):
"""Assert _merge_pool_kwargs works in the happy case"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"new_key": "value"})
assert {"retries": 100, "new_key": "value"} == merged
def test_merge_pool_kwargs_none(self):
"""Assert false-y values to _merge_pool_kwargs result in defaults"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({})
assert p.connection_pool_kw == merged
merged = p._merge_pool_kwargs(None)
assert p.connection_pool_kw == merged
def test_merge_pool_kwargs_remove_key(self):
"""Assert keys can be removed with _merge_pool_kwargs"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"retries": None})
assert "retries" not in merged
def test_merge_pool_kwargs_invalid_key(self):
"""Assert removing invalid keys with _merge_pool_kwargs doesn't break"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"invalid_key": None})
assert p.connection_pool_kw == merged
def test_pool_manager_no_url_absolute_form(self):
"""Valides we won't send a request with absolute form without a proxy"""
p = PoolManager()
assert p._proxy_requires_url_absolute_form("http://example.com") is False
assert p._proxy_requires_url_absolute_form("https://example.com") is False | 0.63307 | 0.486941 |
# Analyze difference between two PALADIN alignemnts
import argparse
import shlex
import core.main
from core.datastore import DataStore
def plugin_connect(definition):
definition.name = "difference"
definition.description = "Analyze relative differences between two PALADIN taxonomy reports"
definition.version_major = 1
definition.version_minor = 1
definition.version_revision = 0
definition.dependencies = ["taxonomy"]
definition.callback_args = difference_args
# definition.callback_init = difference_init
definition.callback_main = difference_main
def difference_args(subargs):
arg_parser = argparse.ArgumentParser(description="PALADIN Pipeline Plugins: Difference", prog="difference")
arg_parser.add_argument("-1", dest="basis_files", metavar=("Taxonomy", "Sam", "Uniprot"), type=str, nargs=3, required=True, help="Basis files of comparison (Taxonomy, Sam, UniProt)")
arg_parser.add_argument("-2", dest="compare_files", metavar=("Taxonomy", "Sam", "Uniprot"), type=str, nargs=3, required=True, help="Compared files of comparison (Taxonomy, Sam, UniProt")
arg_parser.add_argument("-c", dest="custom", type=str, default="", help="Species-parsing regex pattern for non-UniProt entries")
arg_parser.add_argument("-s", dest="simple", action="store_true", help="Simple report, do not include mapped/unmapped read contributions (alters totals)")
return arg_parser.parse_known_args(shlex.split(subargs))
def difference_main(args):
# Obtain taxonomy differences
core.main.send_output("Comparing taxonomy reports...", "stderr")
taxonomy_data, taxonomy_total = taxonomy_compare((args[0].basis_files[0], args[0].compare_files[0]))
# Obtain SAM differences with Uniprot info for full report
uniprot_data = list()
if not args[0].simple:
core.main.send_output("Gathering SAM data from first alignment", "stderr")
sam_entries1 = core.main.SamEntry.get_entries(args[0].basis_files[1], -1)
core.main.send_output("Gathering SAM data from second alignment", "stderr")
sam_entries2 = core.main.SamEntry.get_entries(args[0].compare_files[1], -1)
core.main.send_output("Comparing SAM data...", "stderr")
sam_data = sam_compare(sam_entries1, sam_entries2)
# Get Uniprot entries
core.main.send_output("Gathering UniProt data from first alignment", "stderr")
uniprot_data.append(core.main.PaladinEntry.get_entries(args[0].basis_files[2], 0, args[0].custom))
core.main.send_output("Gathering UniProt data from second alignment", "stderr")
uniprot_data.append(core.main.PaladinEntry.get_entries(args[0].compare_files[2], 0, args[0].custom))
core.main.send_output("Comparing alignments...", "stderr")
combined_data = combined_compare(taxonomy_data, sam_data, uniprot_data, taxonomy_total)
render_combined(combined_data, args[0].simple)
def taxonomy_compare(reports):
""" Taxonomy report comparison """
ret_difference = dict()
ret_total = 0
report_data = (dict(), dict())
# Read files into memory
for idx in range(2):
with open(reports[idx], "r") as handle:
# Skip header
handle.readline()
for line in handle:
fields = line.rstrip().split("\t")
report_data[idx][fields[2]] = int(fields[0])
if idx == 1:
ret_total += int(fields[0])
# Calculate differences (set 1, set 2, shared)
set1_keys = set(report_data[0].keys())
set2_keys = set(report_data[1].keys())
shared_keys = set1_keys.intersection(set2_keys)
set1_keys = set1_keys.difference(shared_keys)
set2_keys = set2_keys.difference(shared_keys)
ret_difference = {key: (report_data[1][key] - report_data[0][key]) for key in shared_keys}
ret_difference.update({key: -report_data[0][key] for key in set1_keys})
ret_difference.update({key: report_data[1][key] for key in set2_keys})
return ret_difference, ret_total
def sam_compare(entries1, entries2):
""" SAM entries comparison """
ret_diff = dict()
# Use set 1 as the baseRead
for entry in entries1:
# Iterate through possible supplementary hits until neither available
read_idx = (entry[0], 0)
while read_idx in entries1 or read_idx in entries2:
if read_idx not in entries1:
ref1 = "*"
ref2 = entries2[read_idx].reference
elif read_idx not in entries2:
ref1 = entries1[read_idx].reference
ref2 = "*"
else:
ref1 = entries1[read_idx].reference
ref2 = entries2[read_idx].reference
# Note references that do not match
if ref1 != ref2:
diff_pair = (ref1, ref2)
if diff_pair not in ret_diff:
ret_diff[diff_pair] = 0
ret_diff[diff_pair] += 1
read_idx = (read_idx[0], read_idx[1] + 1)
return ret_diff
def combined_compare(taxonomy_entries, sam_entries, uniprot_entries, pass_total):
""" Combined compare breaks down taxonomy report on per read basis, then aggregates """
ret_entries = dict()
# Calculate unmapped difference and add to taxonomy set
unmapped_diff = 0
for sam_entry in sam_entries:
for set_idx in range(2):
if sam_entry[set_idx] == "*":
if set_idx == 0:
pass_total += 1
unmapped_diff += [1, -1][set_idx]
# Add unmapped entry to taxonomy list
taxonomy_entries["Unmapped"] = unmapped_diff
# Iterate through each taxonomy grouping
for taxon_entry in taxonomy_entries:
# Skip unchanged entries
if taxonomy_entries[taxon_entry] == 0:
continue
# Prepare entry if new
if taxon_entry not in ret_entries:
ret_entries[taxon_entry] = (dict(), taxonomy_entries[taxon_entry] / pass_total)
# Scan SAM entries for matching lineage
for sam_entry in sam_entries:
# Lookup species and lineage
sam_records = list()
lineage = list()
for set_idx in range(2):
if sam_entry[set_idx] == "*":
miss_record = type("miss_record", (object,), {})()
miss_record.species_id = "Unmapped"
miss_record.species_full = "Unmapped"
sam_records.append(miss_record)
lineage.append("Unmapped")
else:
sam_records.append(uniprot_entries[set_idx][sam_entry[set_idx].split("|")[-1]])
species_id = sam_records[set_idx].species_id
result = DataStore.get_entry("taxonomy").exec_query("lineage-lookup", [species_id]).fetchone()
if result:
lineage.append([x.strip() for x in result[0].split(";")])
else:
lineage.append("Unknown")
for set_idx in range(2):
# Attempt to match on species
if sam_records[set_idx].species_full == taxon_entry:
sam_species = sam_records[1 - set_idx].species_full
sam_amount = [1, -1][set_idx] * sam_entries[sam_entry]
if sam_species not in ret_entries[taxon_entry][0]:
ret_entries[taxon_entry][0][sam_species] = (0, 0)
sam_parts = ret_entries[taxon_entry][0][sam_species]
if sam_amount > 0:
ret_entries[taxon_entry][0][sam_species] = (sam_parts[0] + sam_amount, sam_parts[1])
else:
ret_entries[taxon_entry][0][sam_species] = (sam_parts[0], sam_parts[1] + sam_amount)
break
# Attempt to match on lineage
for rank_idx in range(len(lineage[set_idx])):
if lineage[set_idx][rank_idx] == taxon_entry:
compare_idx = rank_idx
if compare_idx >= len(lineage[1 - set_idx]):
compare_idx = len(lineage[1 - set_idx]) - 1
sam_lineage = lineage[1 - set_idx][compare_idx]
sam_amount = [1, -1][set_idx] * sam_entries[sam_entry]
if sam_lineage not in ret_entries[taxon_entry][0]:
ret_entries[taxon_entry][0][sam_lineage] = (0, 0)
sam_parts = ret_entries[taxon_entry][0][sam_lineage]
if sam_amount > 0:
ret_entries[taxon_entry][0][sam_lineage] = (sam_parts[0] + sam_amount, sam_parts[1])
else:
ret_entries[taxon_entry][0][sam_lineage] = (sam_parts[0], sam_parts[1] + sam_amount)
break
# Calculate percentages
for taxonomy_entry in ret_entries:
for sam_entry in ret_entries[taxonomy_entry][0]:
sam_parts = ret_entries[taxonomy_entry][0][sam_entry]
ret_entries[taxonomy_entry][0][sam_entry] = (sam_parts[0] / pass_total, sam_parts[1] / pass_total)
return ret_entries
def align_stats(entries1, entries2):
""" Align stats returns 3-tuple (Different, Added, Removed) """
total_diff = 0
total_add = 0
total_del = 0
for entry in entries1:
if entries1[entry].flag & 0x4 == entries2[entry].flag & 0x4:
# Matching map types
if entries1[entry].reference != entries2[entry].reference:
total_diff += 1
else:
if entries1[entry].flag & 0x4 > 0:
total_add += 1
else: total_del += 1
return (total_diff, total_add, total_del)
def render_combined(data, simple):
""" Render combined report """
# Render appropriate header
if simple:
header = "Taxon\tDifference"
else:
header = "Taxon\tDifference\tContributor\tContribution (Pos)\tContribution (Neg)"
core.main.send_output(header)
sorted_taxa = sorted(data.items(), key=lambda item: abs(item[1][1]), reverse=True)
for taxon_entry in sorted_taxa:
if simple:
core.main.send_output("{0}\t{1}".format(taxon_entry[0], taxon_entry[1][1]))
else:
# Sort SAM data
sorted_sam = sorted(taxon_entry[1][0].items(), key=lambda item: abs(item[1][0] + item[1][1]), reverse=True)
for sam_entry in sorted_sam:
core.main.send_output("{0}\t{1}\t{2}\t{3}\t{4}".format(taxon_entry[0], taxon_entry[1][1], sam_entry[0], sam_entry[1][0], sam_entry[1][1])) | plugins/difference.py | # Analyze difference between two PALADIN alignemnts
import argparse
import shlex
import core.main
from core.datastore import DataStore
def plugin_connect(definition):
definition.name = "difference"
definition.description = "Analyze relative differences between two PALADIN taxonomy reports"
definition.version_major = 1
definition.version_minor = 1
definition.version_revision = 0
definition.dependencies = ["taxonomy"]
definition.callback_args = difference_args
# definition.callback_init = difference_init
definition.callback_main = difference_main
def difference_args(subargs):
arg_parser = argparse.ArgumentParser(description="PALADIN Pipeline Plugins: Difference", prog="difference")
arg_parser.add_argument("-1", dest="basis_files", metavar=("Taxonomy", "Sam", "Uniprot"), type=str, nargs=3, required=True, help="Basis files of comparison (Taxonomy, Sam, UniProt)")
arg_parser.add_argument("-2", dest="compare_files", metavar=("Taxonomy", "Sam", "Uniprot"), type=str, nargs=3, required=True, help="Compared files of comparison (Taxonomy, Sam, UniProt")
arg_parser.add_argument("-c", dest="custom", type=str, default="", help="Species-parsing regex pattern for non-UniProt entries")
arg_parser.add_argument("-s", dest="simple", action="store_true", help="Simple report, do not include mapped/unmapped read contributions (alters totals)")
return arg_parser.parse_known_args(shlex.split(subargs))
def difference_main(args):
# Obtain taxonomy differences
core.main.send_output("Comparing taxonomy reports...", "stderr")
taxonomy_data, taxonomy_total = taxonomy_compare((args[0].basis_files[0], args[0].compare_files[0]))
# Obtain SAM differences with Uniprot info for full report
uniprot_data = list()
if not args[0].simple:
core.main.send_output("Gathering SAM data from first alignment", "stderr")
sam_entries1 = core.main.SamEntry.get_entries(args[0].basis_files[1], -1)
core.main.send_output("Gathering SAM data from second alignment", "stderr")
sam_entries2 = core.main.SamEntry.get_entries(args[0].compare_files[1], -1)
core.main.send_output("Comparing SAM data...", "stderr")
sam_data = sam_compare(sam_entries1, sam_entries2)
# Get Uniprot entries
core.main.send_output("Gathering UniProt data from first alignment", "stderr")
uniprot_data.append(core.main.PaladinEntry.get_entries(args[0].basis_files[2], 0, args[0].custom))
core.main.send_output("Gathering UniProt data from second alignment", "stderr")
uniprot_data.append(core.main.PaladinEntry.get_entries(args[0].compare_files[2], 0, args[0].custom))
core.main.send_output("Comparing alignments...", "stderr")
combined_data = combined_compare(taxonomy_data, sam_data, uniprot_data, taxonomy_total)
render_combined(combined_data, args[0].simple)
def taxonomy_compare(reports):
""" Taxonomy report comparison """
ret_difference = dict()
ret_total = 0
report_data = (dict(), dict())
# Read files into memory
for idx in range(2):
with open(reports[idx], "r") as handle:
# Skip header
handle.readline()
for line in handle:
fields = line.rstrip().split("\t")
report_data[idx][fields[2]] = int(fields[0])
if idx == 1:
ret_total += int(fields[0])
# Calculate differences (set 1, set 2, shared)
set1_keys = set(report_data[0].keys())
set2_keys = set(report_data[1].keys())
shared_keys = set1_keys.intersection(set2_keys)
set1_keys = set1_keys.difference(shared_keys)
set2_keys = set2_keys.difference(shared_keys)
ret_difference = {key: (report_data[1][key] - report_data[0][key]) for key in shared_keys}
ret_difference.update({key: -report_data[0][key] for key in set1_keys})
ret_difference.update({key: report_data[1][key] for key in set2_keys})
return ret_difference, ret_total
def sam_compare(entries1, entries2):
""" SAM entries comparison """
ret_diff = dict()
# Use set 1 as the baseRead
for entry in entries1:
# Iterate through possible supplementary hits until neither available
read_idx = (entry[0], 0)
while read_idx in entries1 or read_idx in entries2:
if read_idx not in entries1:
ref1 = "*"
ref2 = entries2[read_idx].reference
elif read_idx not in entries2:
ref1 = entries1[read_idx].reference
ref2 = "*"
else:
ref1 = entries1[read_idx].reference
ref2 = entries2[read_idx].reference
# Note references that do not match
if ref1 != ref2:
diff_pair = (ref1, ref2)
if diff_pair not in ret_diff:
ret_diff[diff_pair] = 0
ret_diff[diff_pair] += 1
read_idx = (read_idx[0], read_idx[1] + 1)
return ret_diff
def combined_compare(taxonomy_entries, sam_entries, uniprot_entries, pass_total):
""" Combined compare breaks down taxonomy report on per read basis, then aggregates """
ret_entries = dict()
# Calculate unmapped difference and add to taxonomy set
unmapped_diff = 0
for sam_entry in sam_entries:
for set_idx in range(2):
if sam_entry[set_idx] == "*":
if set_idx == 0:
pass_total += 1
unmapped_diff += [1, -1][set_idx]
# Add unmapped entry to taxonomy list
taxonomy_entries["Unmapped"] = unmapped_diff
# Iterate through each taxonomy grouping
for taxon_entry in taxonomy_entries:
# Skip unchanged entries
if taxonomy_entries[taxon_entry] == 0:
continue
# Prepare entry if new
if taxon_entry not in ret_entries:
ret_entries[taxon_entry] = (dict(), taxonomy_entries[taxon_entry] / pass_total)
# Scan SAM entries for matching lineage
for sam_entry in sam_entries:
# Lookup species and lineage
sam_records = list()
lineage = list()
for set_idx in range(2):
if sam_entry[set_idx] == "*":
miss_record = type("miss_record", (object,), {})()
miss_record.species_id = "Unmapped"
miss_record.species_full = "Unmapped"
sam_records.append(miss_record)
lineage.append("Unmapped")
else:
sam_records.append(uniprot_entries[set_idx][sam_entry[set_idx].split("|")[-1]])
species_id = sam_records[set_idx].species_id
result = DataStore.get_entry("taxonomy").exec_query("lineage-lookup", [species_id]).fetchone()
if result:
lineage.append([x.strip() for x in result[0].split(";")])
else:
lineage.append("Unknown")
for set_idx in range(2):
# Attempt to match on species
if sam_records[set_idx].species_full == taxon_entry:
sam_species = sam_records[1 - set_idx].species_full
sam_amount = [1, -1][set_idx] * sam_entries[sam_entry]
if sam_species not in ret_entries[taxon_entry][0]:
ret_entries[taxon_entry][0][sam_species] = (0, 0)
sam_parts = ret_entries[taxon_entry][0][sam_species]
if sam_amount > 0:
ret_entries[taxon_entry][0][sam_species] = (sam_parts[0] + sam_amount, sam_parts[1])
else:
ret_entries[taxon_entry][0][sam_species] = (sam_parts[0], sam_parts[1] + sam_amount)
break
# Attempt to match on lineage
for rank_idx in range(len(lineage[set_idx])):
if lineage[set_idx][rank_idx] == taxon_entry:
compare_idx = rank_idx
if compare_idx >= len(lineage[1 - set_idx]):
compare_idx = len(lineage[1 - set_idx]) - 1
sam_lineage = lineage[1 - set_idx][compare_idx]
sam_amount = [1, -1][set_idx] * sam_entries[sam_entry]
if sam_lineage not in ret_entries[taxon_entry][0]:
ret_entries[taxon_entry][0][sam_lineage] = (0, 0)
sam_parts = ret_entries[taxon_entry][0][sam_lineage]
if sam_amount > 0:
ret_entries[taxon_entry][0][sam_lineage] = (sam_parts[0] + sam_amount, sam_parts[1])
else:
ret_entries[taxon_entry][0][sam_lineage] = (sam_parts[0], sam_parts[1] + sam_amount)
break
# Calculate percentages
for taxonomy_entry in ret_entries:
for sam_entry in ret_entries[taxonomy_entry][0]:
sam_parts = ret_entries[taxonomy_entry][0][sam_entry]
ret_entries[taxonomy_entry][0][sam_entry] = (sam_parts[0] / pass_total, sam_parts[1] / pass_total)
return ret_entries
def align_stats(entries1, entries2):
""" Align stats returns 3-tuple (Different, Added, Removed) """
total_diff = 0
total_add = 0
total_del = 0
for entry in entries1:
if entries1[entry].flag & 0x4 == entries2[entry].flag & 0x4:
# Matching map types
if entries1[entry].reference != entries2[entry].reference:
total_diff += 1
else:
if entries1[entry].flag & 0x4 > 0:
total_add += 1
else: total_del += 1
return (total_diff, total_add, total_del)
def render_combined(data, simple):
""" Render combined report """
# Render appropriate header
if simple:
header = "Taxon\tDifference"
else:
header = "Taxon\tDifference\tContributor\tContribution (Pos)\tContribution (Neg)"
core.main.send_output(header)
sorted_taxa = sorted(data.items(), key=lambda item: abs(item[1][1]), reverse=True)
for taxon_entry in sorted_taxa:
if simple:
core.main.send_output("{0}\t{1}".format(taxon_entry[0], taxon_entry[1][1]))
else:
# Sort SAM data
sorted_sam = sorted(taxon_entry[1][0].items(), key=lambda item: abs(item[1][0] + item[1][1]), reverse=True)
for sam_entry in sorted_sam:
core.main.send_output("{0}\t{1}\t{2}\t{3}\t{4}".format(taxon_entry[0], taxon_entry[1][1], sam_entry[0], sam_entry[1][0], sam_entry[1][1])) | 0.518059 | 0.321141 |
class ListViewGroupCollection(object, IList, ICollection, IEnumerable):
""" Represents the collection of groups within a System.Windows.Forms.ListView control. """
def Add(self, *__args):
"""
Add(self: ListViewGroupCollection,key: str,headerText: str) -> ListViewGroup
Adds a new System.Windows.Forms.ListViewGroup to the collection using the specified values to
initialize the System.Windows.Forms.ListViewGroup.Name and
System.Windows.Forms.ListViewGroup.Header properties
key: The initial value of the System.Windows.Forms.ListViewGroup.Name property for the new group.
headerText: The initial value of the System.Windows.Forms.ListViewGroup.Header property for the new group.
Returns: The new System.Windows.Forms.ListViewGroup.
Add(self: ListViewGroupCollection,group: ListViewGroup) -> int
Adds the specified System.Windows.Forms.ListViewGroup to the collection.
group: The System.Windows.Forms.ListViewGroup to add to the collection.
Returns: The index of the group within the collection,or -1 if the group is already present in the
collection.
"""
pass
def AddRange(self, groups):
"""
AddRange(self: ListViewGroupCollection,groups: ListViewGroupCollection)
Adds the groups in an existing System.Windows.Forms.ListViewGroupCollection to the collection.
groups: A System.Windows.Forms.ListViewGroupCollection containing the groups to add to the collection.
AddRange(self: ListViewGroupCollection,groups: Array[ListViewGroup])
Adds an array of groups to the collection.
groups: An array of type System.Windows.Forms.ListViewGroup that specifies the groups to add to the
collection.
"""
pass
def Clear(self):
"""
Clear(self: ListViewGroupCollection)
Removes all groups from the collection.
"""
pass
def Contains(self, value):
"""
Contains(self: ListViewGroupCollection,value: ListViewGroup) -> bool
Determines whether the specified group is located in the collection.
value: The System.Windows.Forms.ListViewGroup to locate in the collection.
Returns: true if the group is in the collection; otherwise,false.
"""
pass
def CopyTo(self, array, index):
"""
CopyTo(self: ListViewGroupCollection,array: Array,index: int)
Copies the groups in the collection to a compatible one-dimensional System.Array,starting at
the specified index of the target array.
array: The System.Array to which the groups are copied.
index: The first index within the array to which the groups are copied.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: ListViewGroupCollection) -> IEnumerator
Returns an enumerator used to iterate through the collection.
Returns: An System.Collections.IEnumerator that represents the collection.
"""
pass
def IndexOf(self, value):
"""
IndexOf(self: ListViewGroupCollection,value: ListViewGroup) -> int
Returns the index of the specified System.Windows.Forms.ListViewGroup within the collection.
value: The System.Windows.Forms.ListViewGroup to locate in the collection.
Returns: The zero-based index of the group within the collection,or -1 if the group is not in the
collection.
"""
pass
def Insert(self, index, group):
"""
Insert(self: ListViewGroupCollection,index: int,group: ListViewGroup)
Inserts the specified System.Windows.Forms.ListViewGroup into the collection at the specified
index.
index: The index within the collection at which to insert the group.
group: The System.Windows.Forms.ListViewGroup to insert into the collection.
"""
pass
def Remove(self, group):
"""
Remove(self: ListViewGroupCollection,group: ListViewGroup)
Removes the specified System.Windows.Forms.ListViewGroup from the collection.
group: The System.Windows.Forms.ListViewGroup to remove from the collection.
"""
pass
def RemoveAt(self, index):
"""
RemoveAt(self: ListViewGroupCollection,index: int)
Removes the System.Windows.Forms.ListViewGroup at the specified index within the collection.
index: The index within the collection of the System.Windows.Forms.ListViewGroup to remove.
"""
pass
def __add__(self, *args):
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __contains__(self, *args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,false.
"""
pass
def __getitem__(self, *args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args):
""" x.__len__() <==> len(x) """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the number of groups in the collection.
Get: Count(self: ListViewGroupCollection) -> int
""" | release/stubs.min/System/Windows/Forms/__init___parts/ListViewGroupCollection.py | class ListViewGroupCollection(object, IList, ICollection, IEnumerable):
""" Represents the collection of groups within a System.Windows.Forms.ListView control. """
def Add(self, *__args):
"""
Add(self: ListViewGroupCollection,key: str,headerText: str) -> ListViewGroup
Adds a new System.Windows.Forms.ListViewGroup to the collection using the specified values to
initialize the System.Windows.Forms.ListViewGroup.Name and
System.Windows.Forms.ListViewGroup.Header properties
key: The initial value of the System.Windows.Forms.ListViewGroup.Name property for the new group.
headerText: The initial value of the System.Windows.Forms.ListViewGroup.Header property for the new group.
Returns: The new System.Windows.Forms.ListViewGroup.
Add(self: ListViewGroupCollection,group: ListViewGroup) -> int
Adds the specified System.Windows.Forms.ListViewGroup to the collection.
group: The System.Windows.Forms.ListViewGroup to add to the collection.
Returns: The index of the group within the collection,or -1 if the group is already present in the
collection.
"""
pass
def AddRange(self, groups):
"""
AddRange(self: ListViewGroupCollection,groups: ListViewGroupCollection)
Adds the groups in an existing System.Windows.Forms.ListViewGroupCollection to the collection.
groups: A System.Windows.Forms.ListViewGroupCollection containing the groups to add to the collection.
AddRange(self: ListViewGroupCollection,groups: Array[ListViewGroup])
Adds an array of groups to the collection.
groups: An array of type System.Windows.Forms.ListViewGroup that specifies the groups to add to the
collection.
"""
pass
def Clear(self):
"""
Clear(self: ListViewGroupCollection)
Removes all groups from the collection.
"""
pass
def Contains(self, value):
"""
Contains(self: ListViewGroupCollection,value: ListViewGroup) -> bool
Determines whether the specified group is located in the collection.
value: The System.Windows.Forms.ListViewGroup to locate in the collection.
Returns: true if the group is in the collection; otherwise,false.
"""
pass
def CopyTo(self, array, index):
"""
CopyTo(self: ListViewGroupCollection,array: Array,index: int)
Copies the groups in the collection to a compatible one-dimensional System.Array,starting at
the specified index of the target array.
array: The System.Array to which the groups are copied.
index: The first index within the array to which the groups are copied.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: ListViewGroupCollection) -> IEnumerator
Returns an enumerator used to iterate through the collection.
Returns: An System.Collections.IEnumerator that represents the collection.
"""
pass
def IndexOf(self, value):
"""
IndexOf(self: ListViewGroupCollection,value: ListViewGroup) -> int
Returns the index of the specified System.Windows.Forms.ListViewGroup within the collection.
value: The System.Windows.Forms.ListViewGroup to locate in the collection.
Returns: The zero-based index of the group within the collection,or -1 if the group is not in the
collection.
"""
pass
def Insert(self, index, group):
"""
Insert(self: ListViewGroupCollection,index: int,group: ListViewGroup)
Inserts the specified System.Windows.Forms.ListViewGroup into the collection at the specified
index.
index: The index within the collection at which to insert the group.
group: The System.Windows.Forms.ListViewGroup to insert into the collection.
"""
pass
def Remove(self, group):
"""
Remove(self: ListViewGroupCollection,group: ListViewGroup)
Removes the specified System.Windows.Forms.ListViewGroup from the collection.
group: The System.Windows.Forms.ListViewGroup to remove from the collection.
"""
pass
def RemoveAt(self, index):
"""
RemoveAt(self: ListViewGroupCollection,index: int)
Removes the System.Windows.Forms.ListViewGroup at the specified index within the collection.
index: The index within the collection of the System.Windows.Forms.ListViewGroup to remove.
"""
pass
def __add__(self, *args):
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __contains__(self, *args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,false.
"""
pass
def __getitem__(self, *args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args):
""" x.__len__() <==> len(x) """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the number of groups in the collection.
Get: Count(self: ListViewGroupCollection) -> int
""" | 0.776284 | 0.313492 |
from pyspedas.mms.mms_load_data import mms_load_data
from pyspedas.mms.print_vars import print_vars
@print_vars
def mms_load_aspoc(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='', varformat=None, varnames=[], get_support_data=False, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads ASPOC data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for ASPOC include 'srvy', 'sitl'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for ASPOC are: ['asp1', 'asp2', 'aspoc']; default is 'aspoc'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if suffix == '':
suffix = '_' + level
else:
suffix = '_' + level + suffix
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='aspoc',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf,
always_prompt=always_prompt)
return tvars | pyspedas/mms/aspoc/aspoc.py | from pyspedas.mms.mms_load_data import mms_load_data
from pyspedas.mms.print_vars import print_vars
@print_vars
def mms_load_aspoc(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='', varformat=None, varnames=[], get_support_data=False, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads ASPOC data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for ASPOC include 'srvy', 'sitl'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for ASPOC are: ['asp1', 'asp2', 'aspoc']; default is 'aspoc'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if suffix == '':
suffix = '_' + level
else:
suffix = '_' + level + suffix
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='aspoc',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf,
always_prompt=always_prompt)
return tvars | 0.634996 | 0.361841 |
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from .models import UserModel
from .schemas import TokenPayload, User
from .settings import JWT_ALGORITHM, JWT_EXPIRE_HOURS, JWT_SECRET_KEY
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
async def get_user_by_name(username: str):
return await UserModel.get(username=username)
async def authenticate_user(username: str, password: str):
user = await get_user_by_name(username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(hours=720)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[JWT_ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_payload = TokenPayload(username=username)
except JWTError:
raise credentials_exception
user = await get_user_by_name(username=token_payload.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user | fairy_note/auth.py | from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from .models import UserModel
from .schemas import TokenPayload, User
from .settings import JWT_ALGORITHM, JWT_EXPIRE_HOURS, JWT_SECRET_KEY
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
async def get_user_by_name(username: str):
return await UserModel.get(username=username)
async def authenticate_user(username: str, password: str):
user = await get_user_by_name(username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(hours=720)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[JWT_ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_payload = TokenPayload(username=username)
except JWTError:
raise credentials_exception
user = await get_user_by_name(username=token_payload.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user | 0.723114 | 0.080864 |
"""API handler for rendering descriptors of GRR data structures."""
from grr.lib import rdfvalue
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto.api import reflection_pb2
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server.gui import api_call_handler_base
from grr.server.grr_response_server.gui import api_value_renderers
def _GetAllTypes():
# We have to provide info for python primitive types as well, as sometimes
# they may be used within FlowState objects.
all_types = rdfvalue.RDFValue.classes.copy()
# We shouldn't render base RDFValue class.
all_types.pop("RDFValue", None)
for cls in [bool, int, float, long, str, unicode, list, tuple]:
all_types[cls.__name__] = cls
return all_types
class ApiGetRDFValueDescriptorArgs(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiGetRDFValueDescriptorArgs
class ApiGetRDFValueDescriptorHandler(api_call_handler_base.ApiCallHandler):
"""Renders descriptor of a given RDFValue type."""
args_type = ApiGetRDFValueDescriptorArgs
result_type = api_value_renderers.ApiRDFValueDescriptor
def Handle(self, args, token=None):
_ = token
rdfvalue_class = _GetAllTypes()[args.type]
return api_value_renderers.BuildTypeDescriptor(rdfvalue_class)
class ApiListRDFValueDescriptorsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListRDFValueDescriptorsResult
rdf_deps = [
api_value_renderers.ApiRDFValueDescriptor,
]
class ApiListRDFValuesDescriptorsHandler(ApiGetRDFValueDescriptorHandler):
"""Renders descriptors of all available RDFValues."""
args_type = None
result_type = ApiListRDFValueDescriptorsResult
def Handle(self, unused_args, token=None):
result = ApiListRDFValueDescriptorsResult()
all_types = _GetAllTypes()
for cls_name in sorted(all_types):
cls = all_types[cls_name]
result.items.append(api_value_renderers.BuildTypeDescriptor(cls))
return result
class ApiAff4AttributeDescriptor(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiAff4AttributeDescriptor
class ApiListAff4AttributeDescriptorsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListAff4AttributeDescriptorsResult
rdf_deps = [
ApiAff4AttributeDescriptor,
]
class ApiListAff4AttributeDescriptorsHandler(
api_call_handler_base.ApiCallHandler):
"""Renders available aff4 attributes."""
result_type = ApiListAff4AttributeDescriptorsResult
def Handle(self, unused_args, token=None):
_ = token
result = ApiListAff4AttributeDescriptorsResult()
for name in sorted(aff4.Attribute.NAMES.keys()):
result.items.append(ApiAff4AttributeDescriptor(name=name))
return result
class ApiMethod(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiMethod
rdf_deps = [
api_value_renderers.ApiRDFValueDescriptor,
]
class ApiListApiMethodsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListApiMethodsResult
rdf_deps = [
ApiMethod,
]
class ApiListApiMethodsHandler(api_call_handler_base.ApiCallHandler):
"""Renders HTTP API docs sources."""
TYPE_URL_PATTERN = "type.googleapis.com/%s"
result_type = ApiListApiMethodsResult
def __init__(self, router):
self.router = router
def Handle(self, unused_args, token=None):
router_methods = self.router.__class__.GetAnnotatedMethods()
result = ApiListApiMethodsResult()
for router_method in router_methods.values():
api_method = ApiMethod(
name=router_method.name,
category=router_method.category,
doc=router_method.doc,
http_route=router_method.http_methods[-1][1],
http_methods=[router_method.http_methods[-1][0]])
if router_method.args_type:
api_method.args_type_descriptor = (
api_value_renderers.BuildTypeDescriptor(router_method.args_type))
if router_method.result_type:
if router_method.result_type == router_method.BINARY_STREAM_RESULT_TYPE:
api_method.result_kind = api_method.ResultKind.BINARY_STREAM
else:
api_method.result_kind = api_method.ResultKind.VALUE
api_method.result_type_descriptor = (
api_value_renderers.BuildTypeDescriptor(router_method.result_type)
)
else:
api_method.result_kind = api_method.ResultKind.NONE
result.items.append(api_method)
return result | grr/server/grr_response_server/gui/api_plugins/reflection.py | """API handler for rendering descriptors of GRR data structures."""
from grr.lib import rdfvalue
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto.api import reflection_pb2
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server.gui import api_call_handler_base
from grr.server.grr_response_server.gui import api_value_renderers
def _GetAllTypes():
# We have to provide info for python primitive types as well, as sometimes
# they may be used within FlowState objects.
all_types = rdfvalue.RDFValue.classes.copy()
# We shouldn't render base RDFValue class.
all_types.pop("RDFValue", None)
for cls in [bool, int, float, long, str, unicode, list, tuple]:
all_types[cls.__name__] = cls
return all_types
class ApiGetRDFValueDescriptorArgs(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiGetRDFValueDescriptorArgs
class ApiGetRDFValueDescriptorHandler(api_call_handler_base.ApiCallHandler):
"""Renders descriptor of a given RDFValue type."""
args_type = ApiGetRDFValueDescriptorArgs
result_type = api_value_renderers.ApiRDFValueDescriptor
def Handle(self, args, token=None):
_ = token
rdfvalue_class = _GetAllTypes()[args.type]
return api_value_renderers.BuildTypeDescriptor(rdfvalue_class)
class ApiListRDFValueDescriptorsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListRDFValueDescriptorsResult
rdf_deps = [
api_value_renderers.ApiRDFValueDescriptor,
]
class ApiListRDFValuesDescriptorsHandler(ApiGetRDFValueDescriptorHandler):
"""Renders descriptors of all available RDFValues."""
args_type = None
result_type = ApiListRDFValueDescriptorsResult
def Handle(self, unused_args, token=None):
result = ApiListRDFValueDescriptorsResult()
all_types = _GetAllTypes()
for cls_name in sorted(all_types):
cls = all_types[cls_name]
result.items.append(api_value_renderers.BuildTypeDescriptor(cls))
return result
class ApiAff4AttributeDescriptor(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiAff4AttributeDescriptor
class ApiListAff4AttributeDescriptorsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListAff4AttributeDescriptorsResult
rdf_deps = [
ApiAff4AttributeDescriptor,
]
class ApiListAff4AttributeDescriptorsHandler(
api_call_handler_base.ApiCallHandler):
"""Renders available aff4 attributes."""
result_type = ApiListAff4AttributeDescriptorsResult
def Handle(self, unused_args, token=None):
_ = token
result = ApiListAff4AttributeDescriptorsResult()
for name in sorted(aff4.Attribute.NAMES.keys()):
result.items.append(ApiAff4AttributeDescriptor(name=name))
return result
class ApiMethod(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiMethod
rdf_deps = [
api_value_renderers.ApiRDFValueDescriptor,
]
class ApiListApiMethodsResult(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiListApiMethodsResult
rdf_deps = [
ApiMethod,
]
class ApiListApiMethodsHandler(api_call_handler_base.ApiCallHandler):
"""Renders HTTP API docs sources."""
TYPE_URL_PATTERN = "type.googleapis.com/%s"
result_type = ApiListApiMethodsResult
def __init__(self, router):
self.router = router
def Handle(self, unused_args, token=None):
router_methods = self.router.__class__.GetAnnotatedMethods()
result = ApiListApiMethodsResult()
for router_method in router_methods.values():
api_method = ApiMethod(
name=router_method.name,
category=router_method.category,
doc=router_method.doc,
http_route=router_method.http_methods[-1][1],
http_methods=[router_method.http_methods[-1][0]])
if router_method.args_type:
api_method.args_type_descriptor = (
api_value_renderers.BuildTypeDescriptor(router_method.args_type))
if router_method.result_type:
if router_method.result_type == router_method.BINARY_STREAM_RESULT_TYPE:
api_method.result_kind = api_method.ResultKind.BINARY_STREAM
else:
api_method.result_kind = api_method.ResultKind.VALUE
api_method.result_type_descriptor = (
api_value_renderers.BuildTypeDescriptor(router_method.result_type)
)
else:
api_method.result_kind = api_method.ResultKind.NONE
result.items.append(api_method)
return result | 0.855021 | 0.156201 |
import numpy as np
import tensorflow as tf
import os
import cv2
from scipy.misc import imresize
from PIL import Image, ImageOps
import random
import sys
from sklearn.utils import shuffle
def crop_to_square(image, upsampling):
"""
Crop image to square
"""
if image.shape[0] == image.shape[1]:
return image
if upsampling:
img = Image.fromarray(image)
target_side = max(img.size)
horizontal_padding = (target_side - img.size[0]) / 2
vertical_padding = (target_side - img.size[1]) / 2
start = [-horizontal_padding, -vertical_padding]
width = img.size[0] + horizontal_padding
height = img.size[1] + vertical_padding
else:
target_side = min(image.shape)
horizontal_padding = int((image.shape[0] - target_side) / 2)
vertical_padding = int((image.shape[1] - target_side) / 2)
start = [horizontal_padding, vertical_padding]
width = image.shape[0] - horizontal_padding
height = image.shape[1] - vertical_padding
return image[start[0]:width, start[1]:height]
img = img.crop((start[0], start[1], width, height))
return np.array(img)
def extract_n_preprocess_dicom(path, size):
"""
Extract DICOM image from path with preprocessing to size
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
ds = crop_to_square(ds, upsampling=True)
ds = imresize(ds, (size,size), "lanczos")
return ds
def extract_image(path):
"""
Extract DICOM image from path
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
return ds
def augment_image_pair(image1, image2, size, output_path1, output_path2):
"""
Augment image pair
"""
image1 = Image.fromarray(image1).convert('L')
image2 = Image.fromarray(image2).convert('L')
offset = random.randint(0, 100)
rotate = random.randint(-30,30)
min_val = random.randint(0, offset+1)
# Flip
if random.randint(1,3) % 2 == 0:
image1 = image1.transpose(Image.FLIP_LEFT_RIGHT)
image2 = image2.transpose(Image.FLIP_LEFT_RIGHT)
# Add offset
image1 = ImageOps.expand(image1, offset)
image2 = ImageOps.expand(image2, offset)
# Rotate
image1 = image1.rotate(rotate)
image2 = image2.rotate(rotate)
# Crop
image1 = image1.crop((min_val, min_val, min_val+size, min_val+size))
image2 = image2.crop((min_val, min_val, min_val+size, min_val+size))
# Save
image1.save(output_path1)
image2.save(output_path2)
def extract_images(paths):
"""
Extract images from paths
"""
images = []
for path in paths:
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
images.append(ds)
return images
def check_and_create_dir(dir_path):
"""
Check and create directory path
"""
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def extract_image_path(folders, extension="png"):
"""
Extract image paths with extension from folders
"""
images = []
for folder in folders:
for dirName, subdirList, fileList in os.walk(folder):
for filename in fileList:
if "." + extension in filename.lower():
images.append(os.path.join(dirName,filename))
return images
def extract_n_normalize_image(path):
"""
Extract DICOM image from path
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
return ds.astype(float)/255
def get_batch(batch_size, size, x_filenames, y_filenames):
X, y = shuffle(x_filenames, y_filenames)
X = X[:batch_size]
y = y[:batch_size]
X_images = []
y_images = []
for i in range(len(X)):
X_images.append(extract_n_normalize_image(X[i]))
y_images.append(extract_n_normalize_image(y[i]))
X_images = np.reshape(np.array(X_images), (batch_size, size, size, 1))
y_images = np.reshape(np.array(y_images), (batch_size, size, size, 1))
return (X_images, y_images)
def print_train_steps(current_step, total_steps):
point = int(current_step / (total_steps * 0.05))
sys.stdout.write("\r[" + "=" * point + " " * (20 - point) + "] ---- Step {}/{} ----- ".format(current_step, total_steps) + str(int(float(current_step) * 100 / total_steps)) + "%")
sys.stdout.flush() | utils.py | import numpy as np
import tensorflow as tf
import os
import cv2
from scipy.misc import imresize
from PIL import Image, ImageOps
import random
import sys
from sklearn.utils import shuffle
def crop_to_square(image, upsampling):
"""
Crop image to square
"""
if image.shape[0] == image.shape[1]:
return image
if upsampling:
img = Image.fromarray(image)
target_side = max(img.size)
horizontal_padding = (target_side - img.size[0]) / 2
vertical_padding = (target_side - img.size[1]) / 2
start = [-horizontal_padding, -vertical_padding]
width = img.size[0] + horizontal_padding
height = img.size[1] + vertical_padding
else:
target_side = min(image.shape)
horizontal_padding = int((image.shape[0] - target_side) / 2)
vertical_padding = int((image.shape[1] - target_side) / 2)
start = [horizontal_padding, vertical_padding]
width = image.shape[0] - horizontal_padding
height = image.shape[1] - vertical_padding
return image[start[0]:width, start[1]:height]
img = img.crop((start[0], start[1], width, height))
return np.array(img)
def extract_n_preprocess_dicom(path, size):
"""
Extract DICOM image from path with preprocessing to size
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
ds = crop_to_square(ds, upsampling=True)
ds = imresize(ds, (size,size), "lanczos")
return ds
def extract_image(path):
"""
Extract DICOM image from path
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
return ds
def augment_image_pair(image1, image2, size, output_path1, output_path2):
"""
Augment image pair
"""
image1 = Image.fromarray(image1).convert('L')
image2 = Image.fromarray(image2).convert('L')
offset = random.randint(0, 100)
rotate = random.randint(-30,30)
min_val = random.randint(0, offset+1)
# Flip
if random.randint(1,3) % 2 == 0:
image1 = image1.transpose(Image.FLIP_LEFT_RIGHT)
image2 = image2.transpose(Image.FLIP_LEFT_RIGHT)
# Add offset
image1 = ImageOps.expand(image1, offset)
image2 = ImageOps.expand(image2, offset)
# Rotate
image1 = image1.rotate(rotate)
image2 = image2.rotate(rotate)
# Crop
image1 = image1.crop((min_val, min_val, min_val+size, min_val+size))
image2 = image2.crop((min_val, min_val, min_val+size, min_val+size))
# Save
image1.save(output_path1)
image2.save(output_path2)
def extract_images(paths):
"""
Extract images from paths
"""
images = []
for path in paths:
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
images.append(ds)
return images
def check_and_create_dir(dir_path):
"""
Check and create directory path
"""
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def extract_image_path(folders, extension="png"):
"""
Extract image paths with extension from folders
"""
images = []
for folder in folders:
for dirName, subdirList, fileList in os.walk(folder):
for filename in fileList:
if "." + extension in filename.lower():
images.append(os.path.join(dirName,filename))
return images
def extract_n_normalize_image(path):
"""
Extract DICOM image from path
"""
ds = cv2.imread(path)
ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
return ds.astype(float)/255
def get_batch(batch_size, size, x_filenames, y_filenames):
X, y = shuffle(x_filenames, y_filenames)
X = X[:batch_size]
y = y[:batch_size]
X_images = []
y_images = []
for i in range(len(X)):
X_images.append(extract_n_normalize_image(X[i]))
y_images.append(extract_n_normalize_image(y[i]))
X_images = np.reshape(np.array(X_images), (batch_size, size, size, 1))
y_images = np.reshape(np.array(y_images), (batch_size, size, size, 1))
return (X_images, y_images)
def print_train_steps(current_step, total_steps):
point = int(current_step / (total_steps * 0.05))
sys.stdout.write("\r[" + "=" * point + " " * (20 - point) + "] ---- Step {}/{} ----- ".format(current_step, total_steps) + str(int(float(current_step) * 100 / total_steps)) + "%")
sys.stdout.flush() | 0.38943 | 0.409162 |
from __future__ import unicode_literals
from functools import reduce
from django.conf import settings
from django.conf.urls import include, url
from django.views.i18n import JavaScriptCatalog
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from ckeditor_uploader import views as cku_views
from rest_framework.documentation import include_docs_urls
from modoboa.admin.views.user import forward
from modoboa.core import signals as core_signals
from modoboa.core import views as core_views
from modoboa.core.extensions import exts_pool
API_TITLE = 'Modoboa API'
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^ckeditor/upload/', login_required(cku_views.upload),
name="ckeditor_upload"),
url(r'^ckeditor/browse/', login_required(cku_views.browse),
name="ckeditor_browse"),
url('', include('modoboa.core.urls', namespace="core")),
url('^user/forward/', forward, name="user_forward"),
url('admin/', include('modoboa.admin.urls', namespace="admin")),
url(r'^transports/',
include("modoboa.transport.urls", namespace="transport")),
# No namespace
url(r'^accounts/password_reset/$', core_views.password_reset,
name="password_reset"),
url(r'^accounts/password_reset/done/$', auth_views.password_reset_done,
name="password_reset_done"),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-<KEY>1,13}-[0-9A-Za-z]{1,20})/$', # noqa
auth_views.password_reset_confirm, name="password_reset_confirm"),
url(r'^reset/done/$', auth_views.password_reset_complete,
name="password_reset_complete"),
]
exts_pool.load_all()
urlpatterns += exts_pool.get_urls()
extra_routes = core_signals.extra_uprefs_routes.send(sender="urls")
if extra_routes:
extra_routes = reduce(
lambda a, b: a + b, [route[1] for route in extra_routes])
urlpatterns += extra_routes
# API urls
urlpatterns += [
url("^docs/api/", include_docs_urls(title=API_TITLE, public=False)),
url("^api/v1/", include("modoboa.urls_api", namespace="api")),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) | modoboa/urls.py | from __future__ import unicode_literals
from functools import reduce
from django.conf import settings
from django.conf.urls import include, url
from django.views.i18n import JavaScriptCatalog
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from ckeditor_uploader import views as cku_views
from rest_framework.documentation import include_docs_urls
from modoboa.admin.views.user import forward
from modoboa.core import signals as core_signals
from modoboa.core import views as core_views
from modoboa.core.extensions import exts_pool
API_TITLE = 'Modoboa API'
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^ckeditor/upload/', login_required(cku_views.upload),
name="ckeditor_upload"),
url(r'^ckeditor/browse/', login_required(cku_views.browse),
name="ckeditor_browse"),
url('', include('modoboa.core.urls', namespace="core")),
url('^user/forward/', forward, name="user_forward"),
url('admin/', include('modoboa.admin.urls', namespace="admin")),
url(r'^transports/',
include("modoboa.transport.urls", namespace="transport")),
# No namespace
url(r'^accounts/password_reset/$', core_views.password_reset,
name="password_reset"),
url(r'^accounts/password_reset/done/$', auth_views.password_reset_done,
name="password_reset_done"),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-<KEY>1,13}-[0-9A-Za-z]{1,20})/$', # noqa
auth_views.password_reset_confirm, name="password_reset_confirm"),
url(r'^reset/done/$', auth_views.password_reset_complete,
name="password_reset_complete"),
]
exts_pool.load_all()
urlpatterns += exts_pool.get_urls()
extra_routes = core_signals.extra_uprefs_routes.send(sender="urls")
if extra_routes:
extra_routes = reduce(
lambda a, b: a + b, [route[1] for route in extra_routes])
urlpatterns += extra_routes
# API urls
urlpatterns += [
url("^docs/api/", include_docs_urls(title=API_TITLE, public=False)),
url("^api/v1/", include("modoboa.urls_api", namespace="api")),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) | 0.395951 | 0.06951 |
from darknet_config_generator.common import *
""" Learning Rate Decay Policies """
class ScheduledLRDecay:
"""
Learning rate decay policy
"""
def __init__(self, lr_decay_schedule={400000:0.1, 450000:0.1}):
self.__HEADER__= '# LR Policy'
self.policy = LearningRateDecayPolicy.SCHEDULED
self.lr_decay_schedule = lr_decay_schedule
def export(self, file_obj):
""" exports learning rate decay policy """
file_obj.write(f'{NL}')
file_obj.write(f'{self.__HEADER__}{NL}')
file_obj.write(f'policy={self.policy}{NL}')
file_obj.write(f'steps={list_to_str(self.lr_decay_schedule.keys())}{NL}')
file_obj.write(f'scales={list_to_str(self.lr_decay_schedule.values())}{NL}')
"""Network Optimization """
class YOLOOptimizer:
def __init__(self, learning_rate:float=0.001, batch_size=64, subdivisions=64, num_gpus:int=2,
policy=LearningRateDecayPolicy.SCHEDULED, momentum=0.9, lr_decay=0.0005,
lr_decay_schedule={400000:0.1, 450000:0.1}, burn_in:int=1000, batches_per_class=2000, num_classes=80):
self.__HEADER__= '# Optimization Parameters'
self.batch = batch_size
self.subdivisions = subdivisions
self.num_gpus = num_gpus
self.burn_in = burn_in * self.num_gpus
self.max_batches = batches_per_class * num_classes
self.policy = policy
self.learning_rate = learning_rate
self.lr_decay = lr_decay
self.momentum = momentum
self.lr_decay_schedule = lr_decay_schedule
def export(self, file_obj):
"""exports the optimizer to the given file object"""
file_obj.write(f'{NL}')
file_obj.write(f'{self.__HEADER__}{NL}')
file_obj.write(f'batch={self.batch}{NL}')
file_obj.write(f'subdivisions={self.subdivisions}{NL}')
file_obj.write(f'decay={self.lr_decay}{NL}')
file_obj.write(f'learning_rate={self.learning_rate}{NL}')
file_obj.write(f'momentum={self.momentum}{NL}')
file_obj.write(f'burn_in={self.burn_in}{NL}')
file_obj.write(f'max_batches={self.max_batches}{NL}')
file_obj.write(f'policy={self.policy}{NL}')
file_obj.write(f'steps={list_to_str(self.lr_decay_schedule.keys())}{NL}')
file_obj.write(f'scales={list_to_str(self.lr_decay_schedule.values())}{NL}')
file_obj.write(NL) | darknet_config_generator/yolo_optimizers.py | from darknet_config_generator.common import *
""" Learning Rate Decay Policies """
class ScheduledLRDecay:
"""
Learning rate decay policy
"""
def __init__(self, lr_decay_schedule={400000:0.1, 450000:0.1}):
self.__HEADER__= '# LR Policy'
self.policy = LearningRateDecayPolicy.SCHEDULED
self.lr_decay_schedule = lr_decay_schedule
def export(self, file_obj):
""" exports learning rate decay policy """
file_obj.write(f'{NL}')
file_obj.write(f'{self.__HEADER__}{NL}')
file_obj.write(f'policy={self.policy}{NL}')
file_obj.write(f'steps={list_to_str(self.lr_decay_schedule.keys())}{NL}')
file_obj.write(f'scales={list_to_str(self.lr_decay_schedule.values())}{NL}')
"""Network Optimization """
class YOLOOptimizer:
def __init__(self, learning_rate:float=0.001, batch_size=64, subdivisions=64, num_gpus:int=2,
policy=LearningRateDecayPolicy.SCHEDULED, momentum=0.9, lr_decay=0.0005,
lr_decay_schedule={400000:0.1, 450000:0.1}, burn_in:int=1000, batches_per_class=2000, num_classes=80):
self.__HEADER__= '# Optimization Parameters'
self.batch = batch_size
self.subdivisions = subdivisions
self.num_gpus = num_gpus
self.burn_in = burn_in * self.num_gpus
self.max_batches = batches_per_class * num_classes
self.policy = policy
self.learning_rate = learning_rate
self.lr_decay = lr_decay
self.momentum = momentum
self.lr_decay_schedule = lr_decay_schedule
def export(self, file_obj):
"""exports the optimizer to the given file object"""
file_obj.write(f'{NL}')
file_obj.write(f'{self.__HEADER__}{NL}')
file_obj.write(f'batch={self.batch}{NL}')
file_obj.write(f'subdivisions={self.subdivisions}{NL}')
file_obj.write(f'decay={self.lr_decay}{NL}')
file_obj.write(f'learning_rate={self.learning_rate}{NL}')
file_obj.write(f'momentum={self.momentum}{NL}')
file_obj.write(f'burn_in={self.burn_in}{NL}')
file_obj.write(f'max_batches={self.max_batches}{NL}')
file_obj.write(f'policy={self.policy}{NL}')
file_obj.write(f'steps={list_to_str(self.lr_decay_schedule.keys())}{NL}')
file_obj.write(f'scales={list_to_str(self.lr_decay_schedule.values())}{NL}')
file_obj.write(NL) | 0.84338 | 0.227662 |
# pylama: ignore=W0611
from exceptions import ImproperlyConfigured
import os
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
from flask import Flask
from dash import Dash
from dash.dependencies import Input, Output, State
from dotenv import load_dotenv
if "DYNO" in os.environ:
# Heroku-specific config
debug = False
else:
# Development-mode: set debug to true and load from .env file
debug = True
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
# Sign in to plotly
try:
py.sign_in(os.environ["PLOTLY_USERNAME"], os.environ["PLOTLY_API_KEY"])
except KeyError:
raise ImproperlyConfigured("Plotly credentials not set in .env")
# app init
app_name = "Car Cost Calculator"
server = Flask(app_name)
try:
server.secret_key = os.environ["SECRET_KEY"]
except KeyError:
raise ImproperlyConfigured("SECRET KEY not set in .env:")
app = Dash(name=app_name, server=server)
app.title = app_name
external_js = []
external_css = [
"https://stackpath.bootstrapcdn.com/bootswatch/3.3.7/flatly/bootstrap.min.css",
# "https://stackpath.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css",
"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css",
"https://codepen.io/chriddyp/pen/bWLwgP.css",
]
def create_header():
"""Creates the page header.
Returns:
Dash HTML Object: The Dash HTML object representing the page header.
"""
header = html.Header(
html.Nav(
[
html.Div(
[html.Div([app_name], className="navbar-brand navbar-left")],
className="container",
)
],
className="navbar navbar-default navbar-fixed-top",
)
)
return header
def create_form_group(label, control):
return html.Div([label, control], className="form-group")
def create_content():
"""page content"""
# input controls
inputs = html.Div(
[
html.Div(
[
create_form_group(
html.Label("Purchase Price"),
dcc.Input(type="number", min=1e3, max=2e5, step=1000, value=35000),
),
create_form_group(
html.Label("Fuel Economy (L/100km)"),
dcc.Input(type="number", min=0, max=30, step=0.1, value=6.0),
),
create_form_group(
html.Label("KM per Year"),
dcc.Input(type="number", min=0, max=2e5, step=5000, value=15000),
),
create_form_group(
html.Label("Age at Purchase"),
dcc.Input(type="number", min=0, max=30, step=1, value=0),
),
create_form_group(
html.Label("Depreciation Rate (first 3 years)"),
dcc.Input(type="number", min=0, max=100, step=5, value=19),
),
create_form_group(
html.Label("Depreciation Rate (after 3 years)"),
dcc.Input(type="number", min=0, max=100, step=5, value=10),
),
],
className="col-md-4",
),
# html.Div(
# [
# ],
# className="col-md-2",
# ),
]
)
# outputs
outputs = html.Div(
[
dcc.Graph(
id="graph-0",
figure={
"data": [
{"x": [1, 2, 3], "y": [4, 1, 2], "type": "bar", "name": "SF"},
{
"x": [1, 2, 3],
"y": [2, 4, 5],
"type": "bar",
"name": u"Montréal",
},
],
"layout": {"title": "Dash Data Visualization"},
},
)
],
className="col-md-8 text-justify",
)
content = html.Div(
[html.Div([inputs, outputs], className="row")],
id="main-content",
className="container",
)
return content
def create_footer():
"""page footer"""
footer = html.Footer(
[
html.Div(
[
html.P(
[
html.Span(
"{0}, version 0.1.0".format(app_name),
className="text-muted",
)
],
className="navbar-text pull-left footer-text",
),
html.P(
[
html.Span(className="fa fa-copyright text-muted"),
html.Span(" 2018, jugglindan", className="text-muted"),
],
className="navbar-text pull-right footer-text",
),
]
)
],
id="main-footer",
className="navbar navbar-default navbar-fixed-bottom",
)
return footer
def serve_layout():
"""page layout function"""
layout = html.Div(
[create_header(), create_content(), create_footer()],
className="container-fluid",
)
return layout
app.layout = serve_layout
for js in external_js:
app.scripts.append_script({"external_url": js})
for css in external_css:
app.css.append_css({"external_url": css})
# TODO: callbacks
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run_server(debug=debug, port=port, threaded=True) | ccc_gui/app.py |
# pylama: ignore=W0611
from exceptions import ImproperlyConfigured
import os
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
from flask import Flask
from dash import Dash
from dash.dependencies import Input, Output, State
from dotenv import load_dotenv
if "DYNO" in os.environ:
# Heroku-specific config
debug = False
else:
# Development-mode: set debug to true and load from .env file
debug = True
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
# Sign in to plotly
try:
py.sign_in(os.environ["PLOTLY_USERNAME"], os.environ["PLOTLY_API_KEY"])
except KeyError:
raise ImproperlyConfigured("Plotly credentials not set in .env")
# app init
app_name = "Car Cost Calculator"
server = Flask(app_name)
try:
server.secret_key = os.environ["SECRET_KEY"]
except KeyError:
raise ImproperlyConfigured("SECRET KEY not set in .env:")
app = Dash(name=app_name, server=server)
app.title = app_name
external_js = []
external_css = [
"https://stackpath.bootstrapcdn.com/bootswatch/3.3.7/flatly/bootstrap.min.css",
# "https://stackpath.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css",
"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css",
"https://codepen.io/chriddyp/pen/bWLwgP.css",
]
def create_header():
"""Creates the page header.
Returns:
Dash HTML Object: The Dash HTML object representing the page header.
"""
header = html.Header(
html.Nav(
[
html.Div(
[html.Div([app_name], className="navbar-brand navbar-left")],
className="container",
)
],
className="navbar navbar-default navbar-fixed-top",
)
)
return header
def create_form_group(label, control):
return html.Div([label, control], className="form-group")
def create_content():
"""page content"""
# input controls
inputs = html.Div(
[
html.Div(
[
create_form_group(
html.Label("Purchase Price"),
dcc.Input(type="number", min=1e3, max=2e5, step=1000, value=35000),
),
create_form_group(
html.Label("Fuel Economy (L/100km)"),
dcc.Input(type="number", min=0, max=30, step=0.1, value=6.0),
),
create_form_group(
html.Label("KM per Year"),
dcc.Input(type="number", min=0, max=2e5, step=5000, value=15000),
),
create_form_group(
html.Label("Age at Purchase"),
dcc.Input(type="number", min=0, max=30, step=1, value=0),
),
create_form_group(
html.Label("Depreciation Rate (first 3 years)"),
dcc.Input(type="number", min=0, max=100, step=5, value=19),
),
create_form_group(
html.Label("Depreciation Rate (after 3 years)"),
dcc.Input(type="number", min=0, max=100, step=5, value=10),
),
],
className="col-md-4",
),
# html.Div(
# [
# ],
# className="col-md-2",
# ),
]
)
# outputs
outputs = html.Div(
[
dcc.Graph(
id="graph-0",
figure={
"data": [
{"x": [1, 2, 3], "y": [4, 1, 2], "type": "bar", "name": "SF"},
{
"x": [1, 2, 3],
"y": [2, 4, 5],
"type": "bar",
"name": u"Montréal",
},
],
"layout": {"title": "Dash Data Visualization"},
},
)
],
className="col-md-8 text-justify",
)
content = html.Div(
[html.Div([inputs, outputs], className="row")],
id="main-content",
className="container",
)
return content
def create_footer():
"""page footer"""
footer = html.Footer(
[
html.Div(
[
html.P(
[
html.Span(
"{0}, version 0.1.0".format(app_name),
className="text-muted",
)
],
className="navbar-text pull-left footer-text",
),
html.P(
[
html.Span(className="fa fa-copyright text-muted"),
html.Span(" 2018, jugglindan", className="text-muted"),
],
className="navbar-text pull-right footer-text",
),
]
)
],
id="main-footer",
className="navbar navbar-default navbar-fixed-bottom",
)
return footer
def serve_layout():
"""page layout function"""
layout = html.Div(
[create_header(), create_content(), create_footer()],
className="container-fluid",
)
return layout
app.layout = serve_layout
for js in external_js:
app.scripts.append_script({"external_url": js})
for css in external_css:
app.css.append_css({"external_url": css})
# TODO: callbacks
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run_server(debug=debug, port=port, threaded=True) | 0.477311 | 0.120568 |
from abstract.instruccion import *
from tools.tabla_tipos import *
from tools.tabla_simbolos import *
from error.errores import *
from instruccion.P_Key import *
from tools.console_text import *
from storage import jsonMode as funciones
class altertb_drop(instruccion):
def __init__(self,alterdrop, ID, line, column, num_nodo):
super().__init__(line, column)
self.alterdrop = alterdrop
self.ID = ID
#Nodo ALTER DROP
self.nodo = nodo_AST('DROP', num_nodo)
self.nodo.hijos.append(nodo_AST('DROP',num_nodo+1))
self.nodo.hijos.append(nodo_AST(alterdrop, num_nodo + 2))
self.nodo.hijos.append(nodo_AST(ID, num_nodo + 3))
#Gramatica
self.grammar_ = '<TR><TD> OP_ALTER ::= DROP ' + alterdrop + ID + ' </TD><TD> OP_ALTER = new altertb_drop(' + alterdrop + ', ' + ID + '); </TD></TR>\n'
def ejecutar(self, tb_id):
try:
if self.alterdrop.lower() == 'constraint':
#Buscar constraint entre las columnas
db_id = get_actual_use()
retorno_drop = 1
if db_id == '':
retorno_drop = 2
#Extraer todas las columnas
columnas_tb = ts.get_cols(db_id, tb_id)
if columnas_tb == None:
retorno_drop = 3
#Recorrer la columna
for columna_item in columnas_tb:
if columna_item.condiciones != None:
count_restricciones = 0
for restriccion in columna_item.condiciones:
try:
if restriccion.constraint.dato == self.ID:
ts.delete_restriccion(db_id, tb_id, columna_item.id_, count_restricciones)
retorno_drop = 0
if isinstance(restriccion, P_Key):
retorno_drop = funciones.alterDropPK(db_id, tb_id)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 pk no existente.
except:
pass
count_restricciones += 1
if retorno_drop == 0:
add_text('Se eliminó la restricción ' + self.ID + '\n')
elif retorno_drop == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo eliminar la restriccion ' + self.ID, 'Semántico'))
add_text('ERROR - No se pudo eliminar la restriccion ' + self.ID + '\n')
elif retorno_drop == 2:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encuentra la base de datos', 'Semántico'))
add_text('ERROR - No se encuentra la base de datos\n')
elif retorno_drop == 3:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encuentra la tabla: ' + tb_id, 'Semántico'))
add_text('ERROR - No se encuentra la tabla: ' + tb_id + '\n')
elif retorno_drop == 4:
errores.append(nodo_error(self.line, self.column, 'ERROR - No existe llave primaria: ' + self.ID, 'Semántico'))
add_text('ERROR - No existe llave primaria: ' + self.ID + '\n')
elif self.alterdrop.lower() == 'column':
db_id = get_actual_use()
index_col = ts.get_pos_col(db_id, tb_id, self.ID)
drop_col = funciones.alterDropColumn(db_id, tb_id, index_col)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 llave no puede eliminarse o tabla quedarse sin columnas, 5 columna fuera de límites.
if drop_col == 0:
ts.delete_col(db_id, tb_id, self.ID)
add_text('Columna ' + self.ID + ' se eliminó correctamente.\n')
elif drop_col == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo eliminar la columna: ' + self.ID, 'Semántico'))
add_text('ERROR - No se pudo eliminar la columna: ' + self.ID + '\n')
elif drop_col == 2:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontró la base de datos: ' + db_id, 'Semántico'))
add_text('ERROR - No se encontró la base de datos: ' + db_id + '\n')
elif drop_col == 3:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontró la tabla: ' + tb_id, 'Semántico'))
add_text('ERROR - No se encontró la tabla: ' + tb_id + '\n')
elif drop_col == 4:
if ts.count_columns(db_id, tb_id) == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No puede eliminar la unica columna de la tabla', 'Semántico'))
add_text('ERROR - No puede eliminar la unica columna de la tabla\n')
else:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se puede eliminar una llave primaria ' + self.ID, 'Semántico'))
add_text('ERROR - No se puede eliminar una llave primaria: ' + self.ID + '\n')
elif drop_col == 5:
errores.append(nodo_error(self.line, self.column, 'ERROR - Columnas fuera de indice', 'Semántico'))
add_text('ERROR - Columnas fuera de indice\n')
except:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se puede hacer la instruccion drop', 'Semántico'))
add_text('ERROR - No se puede hacer la instruccion drop\n') | parser/team23/instruccion/altertb_drop.py | from abstract.instruccion import *
from tools.tabla_tipos import *
from tools.tabla_simbolos import *
from error.errores import *
from instruccion.P_Key import *
from tools.console_text import *
from storage import jsonMode as funciones
class altertb_drop(instruccion):
def __init__(self,alterdrop, ID, line, column, num_nodo):
super().__init__(line, column)
self.alterdrop = alterdrop
self.ID = ID
#Nodo ALTER DROP
self.nodo = nodo_AST('DROP', num_nodo)
self.nodo.hijos.append(nodo_AST('DROP',num_nodo+1))
self.nodo.hijos.append(nodo_AST(alterdrop, num_nodo + 2))
self.nodo.hijos.append(nodo_AST(ID, num_nodo + 3))
#Gramatica
self.grammar_ = '<TR><TD> OP_ALTER ::= DROP ' + alterdrop + ID + ' </TD><TD> OP_ALTER = new altertb_drop(' + alterdrop + ', ' + ID + '); </TD></TR>\n'
def ejecutar(self, tb_id):
try:
if self.alterdrop.lower() == 'constraint':
#Buscar constraint entre las columnas
db_id = get_actual_use()
retorno_drop = 1
if db_id == '':
retorno_drop = 2
#Extraer todas las columnas
columnas_tb = ts.get_cols(db_id, tb_id)
if columnas_tb == None:
retorno_drop = 3
#Recorrer la columna
for columna_item in columnas_tb:
if columna_item.condiciones != None:
count_restricciones = 0
for restriccion in columna_item.condiciones:
try:
if restriccion.constraint.dato == self.ID:
ts.delete_restriccion(db_id, tb_id, columna_item.id_, count_restricciones)
retorno_drop = 0
if isinstance(restriccion, P_Key):
retorno_drop = funciones.alterDropPK(db_id, tb_id)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 pk no existente.
except:
pass
count_restricciones += 1
if retorno_drop == 0:
add_text('Se eliminó la restricción ' + self.ID + '\n')
elif retorno_drop == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo eliminar la restriccion ' + self.ID, 'Semántico'))
add_text('ERROR - No se pudo eliminar la restriccion ' + self.ID + '\n')
elif retorno_drop == 2:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encuentra la base de datos', 'Semántico'))
add_text('ERROR - No se encuentra la base de datos\n')
elif retorno_drop == 3:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encuentra la tabla: ' + tb_id, 'Semántico'))
add_text('ERROR - No se encuentra la tabla: ' + tb_id + '\n')
elif retorno_drop == 4:
errores.append(nodo_error(self.line, self.column, 'ERROR - No existe llave primaria: ' + self.ID, 'Semántico'))
add_text('ERROR - No existe llave primaria: ' + self.ID + '\n')
elif self.alterdrop.lower() == 'column':
db_id = get_actual_use()
index_col = ts.get_pos_col(db_id, tb_id, self.ID)
drop_col = funciones.alterDropColumn(db_id, tb_id, index_col)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 llave no puede eliminarse o tabla quedarse sin columnas, 5 columna fuera de límites.
if drop_col == 0:
ts.delete_col(db_id, tb_id, self.ID)
add_text('Columna ' + self.ID + ' se eliminó correctamente.\n')
elif drop_col == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo eliminar la columna: ' + self.ID, 'Semántico'))
add_text('ERROR - No se pudo eliminar la columna: ' + self.ID + '\n')
elif drop_col == 2:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontró la base de datos: ' + db_id, 'Semántico'))
add_text('ERROR - No se encontró la base de datos: ' + db_id + '\n')
elif drop_col == 3:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontró la tabla: ' + tb_id, 'Semántico'))
add_text('ERROR - No se encontró la tabla: ' + tb_id + '\n')
elif drop_col == 4:
if ts.count_columns(db_id, tb_id) == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No puede eliminar la unica columna de la tabla', 'Semántico'))
add_text('ERROR - No puede eliminar la unica columna de la tabla\n')
else:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se puede eliminar una llave primaria ' + self.ID, 'Semántico'))
add_text('ERROR - No se puede eliminar una llave primaria: ' + self.ID + '\n')
elif drop_col == 5:
errores.append(nodo_error(self.line, self.column, 'ERROR - Columnas fuera de indice', 'Semántico'))
add_text('ERROR - Columnas fuera de indice\n')
except:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se puede hacer la instruccion drop', 'Semántico'))
add_text('ERROR - No se puede hacer la instruccion drop\n') | 0.24599 | 0.110447 |
import config
from utils import Utils
class Wenker:
@staticmethod
def create_transcribe_media(app, client):
req, resp = app.op['get_tasks'](limit=20000)
tasks = client.request((req, resp)).data
for t in tasks:
media = {
'source_id': '',
'path': '',
'name': '',
'f_type': 'raster-image'
}
if('SheetNumber' in t.info):
i = t.info['SheetNumber'] + 'a.jpg'
media['source_id'] = t.id
media['path'] = './static/upload/' + i
media['name'] = i
req, resp = app.op['create_media'](media=media)
m_save = client.request((req, resp)).data
print(t.id, m_save)
else:
continue
@staticmethod
def create_translate_tasks(app, client, rows):
t = "<TEXT>"
tasks = []
for r in rows:
task = {
'project_id': '507b3f89-aff1-4fa3-8f28-9c8399811539',
'sequence': 0,
'info': {},
'required': True,
'title': t,
'content': {"question": {"text": r['Sentence'], "type": "text"}, "answers": [{"type": "text", "placeholder": "", "choices": []}]}
}
tasks.append(task)
req, resp = app.op['create_tasks'](tasks=tasks)
task_save = client.request((req, resp)).data
print(task_save)
@staticmethod
def create_transcribe_tasks(app, client, rows):
tasks = []
t = "Bitte übertragen Sie jeden nummerierten Satz in das entsprechende Textfeld."
for r in rows:
task = {
'project_id': 'e4b5ebc5-47a2-430b-84a9-a03b1d4dda34',
'sequence': 0,
'info': {},
'required': True,
'title': t,
'content': {"question": {"text": t, "type": "single_file"}}
}
task_answers = []
for i in range(1, 41):
task_answers.append(
{"type": "text", "placeholder": "{}".format(i), "choices": []})
task['content']['answers'] = task_answers
img_name = r['SheetNumber'] + '*a.jp2'
img = Utils.list_images(config.ROOT_DIR, img_name)
if img is not None:
r['path'] = './static/upload/' + img.replace(config.ROOT_DIR, '')
task['info'] = r
tasks.append(task)
print(len(tasks))
req, resp = app.op['create_tasks'](tasks=tasks)
task_save = client.request((req, resp)).data
print(task_save) | general/tasks_importer/wenker.py | import config
from utils import Utils
class Wenker:
@staticmethod
def create_transcribe_media(app, client):
req, resp = app.op['get_tasks'](limit=20000)
tasks = client.request((req, resp)).data
for t in tasks:
media = {
'source_id': '',
'path': '',
'name': '',
'f_type': 'raster-image'
}
if('SheetNumber' in t.info):
i = t.info['SheetNumber'] + 'a.jpg'
media['source_id'] = t.id
media['path'] = './static/upload/' + i
media['name'] = i
req, resp = app.op['create_media'](media=media)
m_save = client.request((req, resp)).data
print(t.id, m_save)
else:
continue
@staticmethod
def create_translate_tasks(app, client, rows):
t = "<TEXT>"
tasks = []
for r in rows:
task = {
'project_id': '507b3f89-aff1-4fa3-8f28-9c8399811539',
'sequence': 0,
'info': {},
'required': True,
'title': t,
'content': {"question": {"text": r['Sentence'], "type": "text"}, "answers": [{"type": "text", "placeholder": "", "choices": []}]}
}
tasks.append(task)
req, resp = app.op['create_tasks'](tasks=tasks)
task_save = client.request((req, resp)).data
print(task_save)
@staticmethod
def create_transcribe_tasks(app, client, rows):
tasks = []
t = "Bitte übertragen Sie jeden nummerierten Satz in das entsprechende Textfeld."
for r in rows:
task = {
'project_id': 'e4b5ebc5-47a2-430b-84a9-a03b1d4dda34',
'sequence': 0,
'info': {},
'required': True,
'title': t,
'content': {"question": {"text": t, "type": "single_file"}}
}
task_answers = []
for i in range(1, 41):
task_answers.append(
{"type": "text", "placeholder": "{}".format(i), "choices": []})
task['content']['answers'] = task_answers
img_name = r['SheetNumber'] + '*a.jp2'
img = Utils.list_images(config.ROOT_DIR, img_name)
if img is not None:
r['path'] = './static/upload/' + img.replace(config.ROOT_DIR, '')
task['info'] = r
tasks.append(task)
print(len(tasks))
req, resp = app.op['create_tasks'](tasks=tasks)
task_save = client.request((req, resp)).data
print(task_save) | 0.128484 | 0.148973 |
import os
from telethon import events
from telethon.tl import functions
from uniborg.util import admin_cmd
@borg.on(admin_cmd("pbio (.*)")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
bio = event.pattern_match.group(1)
try:
await borg(functions.account.UpdateProfileRequest( # pylint:disable=E0602
about=bio
))
await event.edit("Succesfully changed my profile bio")
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
@borg.on(admin_cmd("pname ((.|\n)*)")) # pylint:disable=E0602,W0703
async def _(event):
if event.fwd_from:
return
names = event.pattern_match.group(1)
first_name = names
last_name = ""
if "\\n" in names:
first_name, last_name = names.split("\\n", 1)
try:
await borg(functions.account.UpdateProfileRequest( # pylint:disable=E0602
first_name=first_name,
last_name=last_name
))
await event.edit("My name was changed successfully")
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
@borg.on(admin_cmd("ppic")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
await event.edit("Downloading Profile Picture to my local ...")
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): # pylint:disable=E0602
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) # pylint:disable=E0602
photo = None
try:
photo = await borg.download_media( # pylint:disable=E0602
reply_message,
Config.TMP_DOWNLOAD_DIRECTORY # pylint:disable=E0602
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
if photo:
await event.edit("now, Uploading to @Telegram ...")
file = await borg.upload_file(photo) # pylint:disable=E0602
try:
await borg(functions.photos.UploadProfilePhotoRequest( # pylint:disable=E0602
file
))
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
await event.edit("My profile picture was succesfully changed")
try:
os.remove(photo)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602 | stdplugins/account_profile.py |
import os
from telethon import events
from telethon.tl import functions
from uniborg.util import admin_cmd
@borg.on(admin_cmd("pbio (.*)")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
bio = event.pattern_match.group(1)
try:
await borg(functions.account.UpdateProfileRequest( # pylint:disable=E0602
about=bio
))
await event.edit("Succesfully changed my profile bio")
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
@borg.on(admin_cmd("pname ((.|\n)*)")) # pylint:disable=E0602,W0703
async def _(event):
if event.fwd_from:
return
names = event.pattern_match.group(1)
first_name = names
last_name = ""
if "\\n" in names:
first_name, last_name = names.split("\\n", 1)
try:
await borg(functions.account.UpdateProfileRequest( # pylint:disable=E0602
first_name=first_name,
last_name=last_name
))
await event.edit("My name was changed successfully")
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
@borg.on(admin_cmd("ppic")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
await event.edit("Downloading Profile Picture to my local ...")
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): # pylint:disable=E0602
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) # pylint:disable=E0602
photo = None
try:
photo = await borg.download_media( # pylint:disable=E0602
reply_message,
Config.TMP_DOWNLOAD_DIRECTORY # pylint:disable=E0602
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
if photo:
await event.edit("now, Uploading to @Telegram ...")
file = await borg.upload_file(photo) # pylint:disable=E0602
try:
await borg(functions.photos.UploadProfilePhotoRequest( # pylint:disable=E0602
file
))
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
await event.edit("My profile picture was succesfully changed")
try:
os.remove(photo)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602 | 0.207375 | 0.076064 |
import logging
import random
import uuid
import time
import zmq
from include.functions_pb2 import *
from include.serializer import *
from include import server_utils as sutils
from include.shared import *
from . import utils
sys_random = random.SystemRandom()
def call_function(func_call_socket, pusher_cache, executors, key_ip_map,
executor_status_map, running_counts, backoff):
call = FunctionCall()
call.ParseFromString(func_call_socket.recv())
if not call.HasField('resp_id'):
call.resp_id = str(uuid.uuid4())
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
call.args)))
ip, tid = _pick_node(executors, key_ip_map, refs, running_counts, backoff)
sckt = pusher_cache.get(utils._get_exec_address(ip, tid))
sckt.send(call.SerializeToString())
executors.discard((ip, tid))
executor_status_map[(ip, tid)] = time.time()
r = GenericResponse()
r.success = True
r.response_id = call.resp_id
func_call_socket.send(r.SerializeToString())
def call_dag(call, pusher_cache, dags, func_locations, key_ip_map,
running_counts, backoff):
dag, sources = dags[call.name]
schedule = DagSchedule()
schedule.id = str(uuid.uuid4())
schedule.dag.CopyFrom(dag)
schedule.consistency = NORMAL
if call.HasField('response_address'):
schedule.response_address = call.response_address
for fname in dag.functions:
locations = func_locations[fname]
args = call.function_args[fname].args
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
args)))
loc = _pick_node(locations, key_ip_map, refs, running_counts, backoff)
schedule.locations[fname] = loc[0] + ':' + str(loc[1])
# copy over arguments into the dag schedule
arg_list = schedule.arguments[fname]
arg_list.args.extend(args)
for func in schedule.locations:
loc = schedule.locations[func].split(':')
ip = utils._get_queue_address(loc[0], loc[1])
schedule.target_function = func
triggers = sutils._get_dag_predecessors(dag, func)
if len(triggers) == 0:
triggers.append('BEGIN')
schedule.ClearField('triggers')
schedule.triggers.extend(triggers)
sckt = pusher_cache.get(ip)
sckt.send(schedule.SerializeToString())
for source in sources:
trigger = DagTrigger()
trigger.id = schedule.id
trigger.source = 'BEGIN'
trigger.target_function = source
ip = sutils._get_dag_trigger_address(schedule.locations[source])
sckt = pusher_cache.get(ip)
sckt.send(trigger.SerializeToString())
return schedule.id
def _pick_node(valid_executors, key_ip_map, refs, running_counts, backoff):
# Construct a map which maps from IP addresses to the number of
# relevant arguments they have cached. For the time begin, we will
# just pick the machine that has the most number of keys cached.
arg_map = {}
reason = ''
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executor_ips = [e[0] for e in executors]
for ref in refs:
if ref.key in key_ip_map:
ips = key_ip_map[ref.key]
for ip in ips:
# only choose this cached node if its a valid executor for our
# purposes
if ip in executor_ips:
if ip not in arg_map:
arg_map[ip] = 0
arg_map[ip] += 1
max_ip = None
max_count = 0
for ip in arg_map.keys():
if arg_map[ip] > max_count:
max_count = arg_map[ip]
max_ip = ip
# pick a random thead from our potential executors that is on that IP
# address; we also route some requests to a random valid node
if max_ip:
candidates = list(filter(lambda e: e[0] == max_ip, executors))
max_ip = sys_random.choice(candidates)
# This only happens if max_ip is never set, and that means that
# there were no machines with any of the keys cached. In this case,
# we pick a random IP that was in the set of IPs that was running
# most recently.
if not max_ip or sys_random.random() < 0.20:
max_ip = sys_random.sample(executors, 1)[0]
if max_ip not in running_counts:
running_counts[max_ip] = set()
running_counts[max_ip].add(time.time())
return max_ip | functions/scheduler/call.py |
import logging
import random
import uuid
import time
import zmq
from include.functions_pb2 import *
from include.serializer import *
from include import server_utils as sutils
from include.shared import *
from . import utils
sys_random = random.SystemRandom()
def call_function(func_call_socket, pusher_cache, executors, key_ip_map,
executor_status_map, running_counts, backoff):
call = FunctionCall()
call.ParseFromString(func_call_socket.recv())
if not call.HasField('resp_id'):
call.resp_id = str(uuid.uuid4())
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
call.args)))
ip, tid = _pick_node(executors, key_ip_map, refs, running_counts, backoff)
sckt = pusher_cache.get(utils._get_exec_address(ip, tid))
sckt.send(call.SerializeToString())
executors.discard((ip, tid))
executor_status_map[(ip, tid)] = time.time()
r = GenericResponse()
r.success = True
r.response_id = call.resp_id
func_call_socket.send(r.SerializeToString())
def call_dag(call, pusher_cache, dags, func_locations, key_ip_map,
running_counts, backoff):
dag, sources = dags[call.name]
schedule = DagSchedule()
schedule.id = str(uuid.uuid4())
schedule.dag.CopyFrom(dag)
schedule.consistency = NORMAL
if call.HasField('response_address'):
schedule.response_address = call.response_address
for fname in dag.functions:
locations = func_locations[fname]
args = call.function_args[fname].args
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
args)))
loc = _pick_node(locations, key_ip_map, refs, running_counts, backoff)
schedule.locations[fname] = loc[0] + ':' + str(loc[1])
# copy over arguments into the dag schedule
arg_list = schedule.arguments[fname]
arg_list.args.extend(args)
for func in schedule.locations:
loc = schedule.locations[func].split(':')
ip = utils._get_queue_address(loc[0], loc[1])
schedule.target_function = func
triggers = sutils._get_dag_predecessors(dag, func)
if len(triggers) == 0:
triggers.append('BEGIN')
schedule.ClearField('triggers')
schedule.triggers.extend(triggers)
sckt = pusher_cache.get(ip)
sckt.send(schedule.SerializeToString())
for source in sources:
trigger = DagTrigger()
trigger.id = schedule.id
trigger.source = 'BEGIN'
trigger.target_function = source
ip = sutils._get_dag_trigger_address(schedule.locations[source])
sckt = pusher_cache.get(ip)
sckt.send(trigger.SerializeToString())
return schedule.id
def _pick_node(valid_executors, key_ip_map, refs, running_counts, backoff):
# Construct a map which maps from IP addresses to the number of
# relevant arguments they have cached. For the time begin, we will
# just pick the machine that has the most number of keys cached.
arg_map = {}
reason = ''
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executor_ips = [e[0] for e in executors]
for ref in refs:
if ref.key in key_ip_map:
ips = key_ip_map[ref.key]
for ip in ips:
# only choose this cached node if its a valid executor for our
# purposes
if ip in executor_ips:
if ip not in arg_map:
arg_map[ip] = 0
arg_map[ip] += 1
max_ip = None
max_count = 0
for ip in arg_map.keys():
if arg_map[ip] > max_count:
max_count = arg_map[ip]
max_ip = ip
# pick a random thead from our potential executors that is on that IP
# address; we also route some requests to a random valid node
if max_ip:
candidates = list(filter(lambda e: e[0] == max_ip, executors))
max_ip = sys_random.choice(candidates)
# This only happens if max_ip is never set, and that means that
# there were no machines with any of the keys cached. In this case,
# we pick a random IP that was in the set of IPs that was running
# most recently.
if not max_ip or sys_random.random() < 0.20:
max_ip = sys_random.sample(executors, 1)[0]
if max_ip not in running_counts:
running_counts[max_ip] = set()
running_counts[max_ip].add(time.time())
return max_ip | 0.384219 | 0.173778 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import argparse
import os
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, Dict, List
import pandas as pd
import parlai.utils.logging as logging
# Defining the class only if Mephisto is installed, since it relies on Mephisto
try:
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.unit import Unit
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
except ImportError:
pass
class AbstractResultsCompiler(ABC):
"""
Abstract class for compiling results of crowdsourcing runs.
Currently only provides utility attributes/methods for analyzing turn annotations.
"""
@classmethod
def setup_args(cls):
parser = argparse.ArgumentParser('Compile crowdsourcing results')
parser.add_argument(
'--output-folder', type=str, help='Folder to save output files to'
)
parser.add_argument(
'--results-format',
type=str,
choices=['csv', 'json'],
default='csv',
help='Output format for results data',
)
return parser
def __init__(self, opt: Dict[str, Any]):
self.output_folder = opt.get('output_folder')
self.results_format = opt['results_format']
def get_results_path_base(self) -> str:
"""
Return the save path for the results file, not including the file extension.
"""
now = datetime.now()
return os.path.join(
self.output_folder,
f'{self.__class__.__name__}__{now.strftime("%Y%m%d_%H%M%S")}',
)
@abstractmethod
def compile_results(self) -> pd.DataFrame:
"""
Method for returning the final results dataframe.
Each row of the dataframe consists of one utterance of one conversation.
"""
def compile_and_save_results(self):
"""
Compile results and save them.
Results will be saved in the format given by --results-format.
"""
result_df = self.compile_results()
results_path_base = self.get_results_path_base()
results_path = f'{results_path_base}.{self.results_format}'
os.makedirs(self.output_folder, exist_ok=True)
if self.results_format == 'csv':
result_df.to_csv(results_path, index=False)
elif self.results_format == 'json':
result_df.reset_index().to_json(results_path)
# Reset the index to make each row have a unique index value
else:
raise ValueError(
f'Results save format of "{self.results_format}" currently unsupported!'
)
print(f'Wrote results file to {results_path}.')
class AbstractTurnAnnotationResultsCompiler(AbstractResultsCompiler):
"""
Results compiler subclass to provide utility code for turn annotations.
Currently incompatible with Mephisto's DataBrowser: all subclasses load results
files directly from disk.
TODO: make all subclasses compatible with DataBrowser
"""
@classmethod
def setup_args(cls):
parser = super().setup_args()
parser.add_argument(
'--results-folders', type=str, help='Comma-separated list of result folders'
)
parser.add_argument(
'--problem-buckets',
type=str,
help='Comma-separated list of buckets used for annotation',
default='bucket_0,bucket_1,bucket_2,bucket_3,bucket_4,none_all_good',
)
return parser
def __init__(self, opt: Dict[str, Any]):
super().__init__(opt)
# Handle inputs
if 'results_folders' in opt:
self.results_folders = opt['results_folders'].split(',')
else:
self.results_folders = None
self.problem_buckets = opt['problem_buckets'].split(',')
# Validate problem buckets
if 'none_all_good' not in self.problem_buckets:
# The code relies on a catchall "none" category if the user selects no other
# annotation bucket
raise ValueError(
'There must be a "none_all_good" category in self.problem_buckets!'
)
class AbstractDataBrowserResultsCompiler(AbstractResultsCompiler):
"""
Provides interface for using Mephisto's DataBrowser, DB, and their methods.
Uses Mephisto's DataBrowser to retrieve the work units and their data.
"""
@classmethod
def setup_args(cls):
parser = super().setup_args()
parser.add_argument(
'--task-name', type=str, help='Name of the Mephisto task to open'
)
return parser
def __init__(self, opt: Dict[str, Any]):
super().__init__(opt)
self.task_name = opt["task_name"]
self._mephisto_db = None
self._mephisto_data_browser = None
def get_mephisto_data_browser(self) -> MephistoDataBrowser:
if not self._mephisto_data_browser:
db = self.get_mephisto_db()
self._mephisto_data_browser = MephistoDataBrowser(db=db)
return self._mephisto_data_browser
def get_mephisto_db(self) -> LocalMephistoDB:
if not self._mephisto_db:
self._mephisto_db = LocalMephistoDB()
return self._mephisto_db
def get_worker_name(self, worker_id: str) -> str:
"""
Gets the global (AWS) id of a worker from their Mephisto worker_id.
"""
db = self.get_mephisto_db()
return db.get_worker(worker_id)["worker_name"]
def get_task_units(self, task_name: str) -> List[Unit]:
"""
Retrieves the list of work units from the Mephisto task.
"""
data_browser = self.get_mephisto_data_browser()
return data_browser.get_units_for_task_name(task_name)
def get_units_data(self, task_units: List[Unit]) -> List[dict]:
"""
Retrieves task data for a list of Mephisto task units.
"""
data_browser = self.get_mephisto_data_browser()
task_data = []
for unit in task_units:
try:
unit_data = data_browser.get_data_from_unit(unit)
task_data.append(unit_data)
except IndexError:
logging.warning(
f"Skipping unit {unit.db_id}. No message found for this unit."
)
return task_data | parlai/crowdsourcing/utils/analysis.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import argparse
import os
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, Dict, List
import pandas as pd
import parlai.utils.logging as logging
# Defining the class only if Mephisto is installed, since it relies on Mephisto
try:
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.unit import Unit
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
except ImportError:
pass
class AbstractResultsCompiler(ABC):
"""
Abstract class for compiling results of crowdsourcing runs.
Currently only provides utility attributes/methods for analyzing turn annotations.
"""
@classmethod
def setup_args(cls):
parser = argparse.ArgumentParser('Compile crowdsourcing results')
parser.add_argument(
'--output-folder', type=str, help='Folder to save output files to'
)
parser.add_argument(
'--results-format',
type=str,
choices=['csv', 'json'],
default='csv',
help='Output format for results data',
)
return parser
def __init__(self, opt: Dict[str, Any]):
self.output_folder = opt.get('output_folder')
self.results_format = opt['results_format']
def get_results_path_base(self) -> str:
"""
Return the save path for the results file, not including the file extension.
"""
now = datetime.now()
return os.path.join(
self.output_folder,
f'{self.__class__.__name__}__{now.strftime("%Y%m%d_%H%M%S")}',
)
@abstractmethod
def compile_results(self) -> pd.DataFrame:
"""
Method for returning the final results dataframe.
Each row of the dataframe consists of one utterance of one conversation.
"""
def compile_and_save_results(self):
"""
Compile results and save them.
Results will be saved in the format given by --results-format.
"""
result_df = self.compile_results()
results_path_base = self.get_results_path_base()
results_path = f'{results_path_base}.{self.results_format}'
os.makedirs(self.output_folder, exist_ok=True)
if self.results_format == 'csv':
result_df.to_csv(results_path, index=False)
elif self.results_format == 'json':
result_df.reset_index().to_json(results_path)
# Reset the index to make each row have a unique index value
else:
raise ValueError(
f'Results save format of "{self.results_format}" currently unsupported!'
)
print(f'Wrote results file to {results_path}.')
class AbstractTurnAnnotationResultsCompiler(AbstractResultsCompiler):
"""
Results compiler subclass to provide utility code for turn annotations.
Currently incompatible with Mephisto's DataBrowser: all subclasses load results
files directly from disk.
TODO: make all subclasses compatible with DataBrowser
"""
@classmethod
def setup_args(cls):
parser = super().setup_args()
parser.add_argument(
'--results-folders', type=str, help='Comma-separated list of result folders'
)
parser.add_argument(
'--problem-buckets',
type=str,
help='Comma-separated list of buckets used for annotation',
default='bucket_0,bucket_1,bucket_2,bucket_3,bucket_4,none_all_good',
)
return parser
def __init__(self, opt: Dict[str, Any]):
super().__init__(opt)
# Handle inputs
if 'results_folders' in opt:
self.results_folders = opt['results_folders'].split(',')
else:
self.results_folders = None
self.problem_buckets = opt['problem_buckets'].split(',')
# Validate problem buckets
if 'none_all_good' not in self.problem_buckets:
# The code relies on a catchall "none" category if the user selects no other
# annotation bucket
raise ValueError(
'There must be a "none_all_good" category in self.problem_buckets!'
)
class AbstractDataBrowserResultsCompiler(AbstractResultsCompiler):
"""
Provides interface for using Mephisto's DataBrowser, DB, and their methods.
Uses Mephisto's DataBrowser to retrieve the work units and their data.
"""
@classmethod
def setup_args(cls):
parser = super().setup_args()
parser.add_argument(
'--task-name', type=str, help='Name of the Mephisto task to open'
)
return parser
def __init__(self, opt: Dict[str, Any]):
super().__init__(opt)
self.task_name = opt["task_name"]
self._mephisto_db = None
self._mephisto_data_browser = None
def get_mephisto_data_browser(self) -> MephistoDataBrowser:
if not self._mephisto_data_browser:
db = self.get_mephisto_db()
self._mephisto_data_browser = MephistoDataBrowser(db=db)
return self._mephisto_data_browser
def get_mephisto_db(self) -> LocalMephistoDB:
if not self._mephisto_db:
self._mephisto_db = LocalMephistoDB()
return self._mephisto_db
def get_worker_name(self, worker_id: str) -> str:
"""
Gets the global (AWS) id of a worker from their Mephisto worker_id.
"""
db = self.get_mephisto_db()
return db.get_worker(worker_id)["worker_name"]
def get_task_units(self, task_name: str) -> List[Unit]:
"""
Retrieves the list of work units from the Mephisto task.
"""
data_browser = self.get_mephisto_data_browser()
return data_browser.get_units_for_task_name(task_name)
def get_units_data(self, task_units: List[Unit]) -> List[dict]:
"""
Retrieves task data for a list of Mephisto task units.
"""
data_browser = self.get_mephisto_data_browser()
task_data = []
for unit in task_units:
try:
unit_data = data_browser.get_data_from_unit(unit)
task_data.append(unit_data)
except IndexError:
logging.warning(
f"Skipping unit {unit.db_id}. No message found for this unit."
)
return task_data | 0.76973 | 0.173533 |
from os import listdir, remove
from os.path import isdir, isfile
from pkg_resources import resource_filename, Requirement
from shutil import rmtree
from tempfile import TemporaryFile, TemporaryDirectory
from unittest import TestCase
import json
import search_google.api
class resultsTest(TestCase):
def setUp(self):
file_path = resource_filename(Requirement.parse('search_google'), 'search_google/config.json')
with open(file_path, 'r') as in_file:
defaults = json.load(in_file)
buildargs = {
'serviceName': 'customsearch',
'version': 'v1',
'developerKey': defaults['build_developerKey']
}
cseargs = {
'q': 'google',
'num': 1,
'fileType': 'png',
'cx': defaults['cx']
}
self.results = search_google.api.results(buildargs, cseargs)
tempfile = TemporaryFile()
self.tempfile = str(tempfile.name)
tempfile.close()
self.tempdir = str(TemporaryDirectory().name)
def test_preview(self):
results = self.results
expected = None
self.assertTrue(expected == results.preview())
def test_get_values(self):
results = self.results
values = results.get_values('items', 'link')
self.assertTrue(isinstance(values, list))
def test_links(self):
results = self.results
expected = results.get_values('items', 'link')
self.assertTrue(expected == results.links)
def test_save_links(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_links(self.tempfile)
with open(self.tempfile) as f:
nlinks = len(f.readlines())
self.assertTrue(nlinks == 1)
def test_save_metadata(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_metadata(self.tempfile)
with open(self.tempfile, 'r') as f:
metadata = json.load(f)
self.assertTrue(metadata == results.metadata)
def test_download_links(self):
results = self.results
results.download_links(self.tempdir)
nfiles = len(listdir(self.tempdir))
rmtree(self.tempdir)
self.assertTrue(nfiles == 1)
def tearDown(self):
if isfile(self.tempfile):
remove(self.tempfile)
if isdir(self.tempdir):
rmtree(self.tempdir) | search_google/tests/test_api_results.py |
from os import listdir, remove
from os.path import isdir, isfile
from pkg_resources import resource_filename, Requirement
from shutil import rmtree
from tempfile import TemporaryFile, TemporaryDirectory
from unittest import TestCase
import json
import search_google.api
class resultsTest(TestCase):
def setUp(self):
file_path = resource_filename(Requirement.parse('search_google'), 'search_google/config.json')
with open(file_path, 'r') as in_file:
defaults = json.load(in_file)
buildargs = {
'serviceName': 'customsearch',
'version': 'v1',
'developerKey': defaults['build_developerKey']
}
cseargs = {
'q': 'google',
'num': 1,
'fileType': 'png',
'cx': defaults['cx']
}
self.results = search_google.api.results(buildargs, cseargs)
tempfile = TemporaryFile()
self.tempfile = str(tempfile.name)
tempfile.close()
self.tempdir = str(TemporaryDirectory().name)
def test_preview(self):
results = self.results
expected = None
self.assertTrue(expected == results.preview())
def test_get_values(self):
results = self.results
values = results.get_values('items', 'link')
self.assertTrue(isinstance(values, list))
def test_links(self):
results = self.results
expected = results.get_values('items', 'link')
self.assertTrue(expected == results.links)
def test_save_links(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_links(self.tempfile)
with open(self.tempfile) as f:
nlinks = len(f.readlines())
self.assertTrue(nlinks == 1)
def test_save_metadata(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_metadata(self.tempfile)
with open(self.tempfile, 'r') as f:
metadata = json.load(f)
self.assertTrue(metadata == results.metadata)
def test_download_links(self):
results = self.results
results.download_links(self.tempdir)
nfiles = len(listdir(self.tempdir))
rmtree(self.tempdir)
self.assertTrue(nfiles == 1)
def tearDown(self):
if isfile(self.tempfile):
remove(self.tempfile)
if isdir(self.tempdir):
rmtree(self.tempdir) | 0.27914 | 0.249556 |
import sys
from io import BytesIO
import telegram
import youtube_dl
from flask import Flask, request, send_file
from fsm import TocMachine
import urllib.request
from urllib.request import urlopen
API_TOKEN = '356618024:AAElDMxCEDCCodg27fr-ewUtoNFmmruPi3s'
WEBHOOK_URL = 'https://07567b0d.ngrok.io/hook'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'user',
'state1000',
'state1',
'state2',
'state3',
'state5',
'state25',
'state45',
'state65',
'state6',
'state7',
'state17',
'state27',
'state171',
'state172',
'state271',
'state272',
'state77',
'state78',
'state9',
'state29',
'state49',
'state69',
'state10',
'state11',
'state13',
'state23',
'state33',
'state43',
'state15',
'state100'
],
transitions=[
{
'trigger': 'advance',
'source': 'user',
'dest': 'state1000',
'conditions': 'is_going_to_state1000'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state2',
'conditions': 'is_going_to_state2'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state3',
'conditions': 'is_going_to_state3'
},
#******food**********************************
# 1->food, 9->yes, 19->no, 102->back
# 5->breakfast, 25->lunch, 45->dinner, 65->lunch
{
'trigger': 'advance',
'source': 'user',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#breakfast
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state5',
'conditions': 'is_going_to_state5'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#lunch
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state25',
'conditions': 'is_going_to_state25'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#dinner
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state45',
'conditions': 'is_going_to_state45'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#late-night supper
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state65',
'conditions': 'is_going_to_state65'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#*********************************************
#*********************************************
#breakfast_menu
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state9',
'conditions': 'is_going_to_state9'
},
#breakfast_not_menu
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state5',
'conditions': 'is_going_to_state19'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state5',
'conditions': 'is_going_to_state102'
},
#breakfast_back_food
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#lunch_menu
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state29',
'conditions': 'is_going_to_state9'
},
#lunch_not_menu
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state25',
'conditions': 'is_going_to_state19'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state25',
'conditions': 'is_going_to_state102'
},
#lunch_back_food
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#dinner_menu
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state49',
'conditions': 'is_going_to_state9'
},
#dinner_not_menu
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state45',
'conditions': 'is_going_to_state19'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state45',
'conditions': 'is_going_to_state102'
},
#dinner_back_food
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#late-night supper_menu
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state69',
'conditions': 'is_going_to_state9'
},
#late-night supper_not_menu
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state65',
'conditions': 'is_going_to_state19'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state65',
'conditions': 'is_going_to_state102'
},
#late-night supper_back_food
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#*********************************************
#breakfast_information
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state13',
'conditions': 'is_going_to_state9'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state9',
'conditions': 'is_going_to_state102'
},
#lunch_information
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state23',
'conditions': 'is_going_to_state9'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state29',
'conditions': 'is_going_to_state102'
},
#dinner_information
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state33',
'conditions': 'is_going_to_state9'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state49',
'conditions': 'is_going_to_state102'
},
#late-night supper_information
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state43',
'conditions': 'is_going_to_state9'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state69',
'conditions': 'is_going_to_state102'
},
#*********************************************
#*********************************************
#breakfast_food
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#lunch_food
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#dinner_food
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#late-night supper_food
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#*********************************************
#breakfast_together
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#lunch_together
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#dinner_together
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#late-night supper_together
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#*********************************************
#breakfast,lunch,dinner,late-night supper back to finish
{
'trigger': 'go_back',
'source': 'state100',
'dest': 'user',
},
#*********************************************
#*********************************************
#music
{
'trigger': 'advance',
'source': 'user',
'dest': 'state2',
'conditions': 'is_going_to_state2'
},
#*********************************************
#music_information_yes
{
'trigger': 'advance',
'source': 'state2',
'dest': 'state6',
'conditions': 'is_going_to_state9'
},
#music_information_no
{
'trigger': 'advance',
'source': 'state2',
'dest': 'user',
'conditions': 'is_going_to_state19'
},
#*********************************************
#music_play_yes
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state10',
'conditions': 'is_going_to_state9'
},
#music_play_no
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state6',
'conditions': 'is_going_to_state19'
},
#*********************************************
#music_replay_yes
{
'trigger': 'advance',
'source': 'state10',
'dest': 'state6',
'conditions': 'is_going_to_state9'
},
#music_replay_no
{
'trigger': 'advance',
'source': 'state10',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#*********************************************
#*********************************************
#chat
{
'trigger': 'advance',
'source': 'user',
'dest': 'state3',
'conditions': 'is_going_to_state3'
},
#*********************************************
#chat_hw
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state7',
'conditions': 'is_going_to_state7'
},
#*********************************************
#chat_hw_1
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state11',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_hw_2
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_other
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state17',
'conditions': 'is_going_to_state17'
},
#*********************************************
#chat_other_1
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_other_2
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state27',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind_1
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state11',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind_2
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#*********************************************
#chat_last
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
{
'trigger': 'advance',
'source': 'state172',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
{
'trigger': 'advance',
'source': 'state272',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
#*********************************************
#*********************************************
#chat_last_last
{
'trigger': 'advance',
'source': 'state77',
'dest': 'state78',
'conditions': 'is_going_to_state78'
},
#*********************************************
#chat_last_last_last
{
'trigger': 'advance',
'source': 'state78',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
{
'trigger': 'advance',
'source': 'state78',
'dest': 'state3',
'conditions': 'is_going_to_state9'
},
#*********************************************
],
initial='user',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
machine.get_graph().draw('my_state_diagram.png', prog='dot') | app.py | import sys
from io import BytesIO
import telegram
import youtube_dl
from flask import Flask, request, send_file
from fsm import TocMachine
import urllib.request
from urllib.request import urlopen
API_TOKEN = '356618024:AAElDMxCEDCCodg27fr-ewUtoNFmmruPi3s'
WEBHOOK_URL = 'https://07567b0d.ngrok.io/hook'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'user',
'state1000',
'state1',
'state2',
'state3',
'state5',
'state25',
'state45',
'state65',
'state6',
'state7',
'state17',
'state27',
'state171',
'state172',
'state271',
'state272',
'state77',
'state78',
'state9',
'state29',
'state49',
'state69',
'state10',
'state11',
'state13',
'state23',
'state33',
'state43',
'state15',
'state100'
],
transitions=[
{
'trigger': 'advance',
'source': 'user',
'dest': 'state1000',
'conditions': 'is_going_to_state1000'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state2',
'conditions': 'is_going_to_state2'
},
{
'trigger': 'advance',
'source': 'state1000',
'dest': 'state3',
'conditions': 'is_going_to_state3'
},
#******food**********************************
# 1->food, 9->yes, 19->no, 102->back
# 5->breakfast, 25->lunch, 45->dinner, 65->lunch
{
'trigger': 'advance',
'source': 'user',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#breakfast
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state5',
'conditions': 'is_going_to_state5'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#lunch
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state25',
'conditions': 'is_going_to_state25'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#dinner
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state45',
'conditions': 'is_going_to_state45'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#late-night supper
{
'trigger': 'advance',
'source': 'state1',
'dest': 'state65',
'conditions': 'is_going_to_state65'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state1',
'conditions': 'is_going_to_state102'
},
#*********************************************
#*********************************************
#breakfast_menu
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state9',
'conditions': 'is_going_to_state9'
},
#breakfast_not_menu
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state5',
'conditions': 'is_going_to_state19'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state5',
'conditions': 'is_going_to_state102'
},
#breakfast_back_food
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#lunch_menu
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state29',
'conditions': 'is_going_to_state9'
},
#lunch_not_menu
{
'trigger': 'advance',
'source': 'state25',
'dest': 'state25',
'conditions': 'is_going_to_state19'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state25',
'conditions': 'is_going_to_state102'
},
#lunch_back_food
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#dinner_menu
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state49',
'conditions': 'is_going_to_state9'
},
#dinner_not_menu
{
'trigger': 'advance',
'source': 'state45',
'dest': 'state45',
'conditions': 'is_going_to_state19'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state45',
'conditions': 'is_going_to_state102'
},
#dinner_back_food
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#late-night supper_menu
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state69',
'conditions': 'is_going_to_state9'
},
#late-night supper_not_menu
{
'trigger': 'advance',
'source': 'state65',
'dest': 'state65',
'conditions': 'is_going_to_state19'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state65',
'conditions': 'is_going_to_state102'
},
#late-night supper_back_food
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#*********************************************
#breakfast_information
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state13',
'conditions': 'is_going_to_state9'
},
#breakfast_back
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state9',
'conditions': 'is_going_to_state102'
},
#lunch_information
{
'trigger': 'advance',
'source': 'state29',
'dest': 'state23',
'conditions': 'is_going_to_state9'
},
#lunch_back
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state29',
'conditions': 'is_going_to_state102'
},
#dinner_information
{
'trigger': 'advance',
'source': 'state49',
'dest': 'state33',
'conditions': 'is_going_to_state9'
},
#dinner_back
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state49',
'conditions': 'is_going_to_state102'
},
#late-night supper_information
{
'trigger': 'advance',
'source': 'state69',
'dest': 'state43',
'conditions': 'is_going_to_state9'
},
#late-night supper_back
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state69',
'conditions': 'is_going_to_state102'
},
#*********************************************
#*********************************************
#breakfast_food
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#lunch_food
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#dinner_food
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#late-night supper_food
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state1',
'conditions': 'is_going_to_state1'
},
#*********************************************
#*********************************************
#breakfast_together
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#lunch_together
{
'trigger': 'advance',
'source': 'state23',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#dinner_together
{
'trigger': 'advance',
'source': 'state33',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#late-night supper_together
{
'trigger': 'advance',
'source': 'state43',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#*********************************************
#breakfast,lunch,dinner,late-night supper back to finish
{
'trigger': 'go_back',
'source': 'state100',
'dest': 'user',
},
#*********************************************
#*********************************************
#music
{
'trigger': 'advance',
'source': 'user',
'dest': 'state2',
'conditions': 'is_going_to_state2'
},
#*********************************************
#music_information_yes
{
'trigger': 'advance',
'source': 'state2',
'dest': 'state6',
'conditions': 'is_going_to_state9'
},
#music_information_no
{
'trigger': 'advance',
'source': 'state2',
'dest': 'user',
'conditions': 'is_going_to_state19'
},
#*********************************************
#music_play_yes
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state10',
'conditions': 'is_going_to_state9'
},
#music_play_no
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state6',
'conditions': 'is_going_to_state19'
},
#*********************************************
#music_replay_yes
{
'trigger': 'advance',
'source': 'state10',
'dest': 'state6',
'conditions': 'is_going_to_state9'
},
#music_replay_no
{
'trigger': 'advance',
'source': 'state10',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
#*********************************************
#*********************************************
#chat
{
'trigger': 'advance',
'source': 'user',
'dest': 'state3',
'conditions': 'is_going_to_state3'
},
#*********************************************
#chat_hw
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state7',
'conditions': 'is_going_to_state7'
},
#*********************************************
#chat_hw_1
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state11',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_hw_2
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_other
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state17',
'conditions': 'is_going_to_state17'
},
#*********************************************
#chat_other_1
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state17',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_other_2
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state171',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state27',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind_1
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state11',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state171',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state27',
'dest': 'state271',
'conditions': 'is_going_to_state27'
},
#*********************************************
#chat_freind_2
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state15',
'conditions': 'is_going_to_state7'
},
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state172',
'conditions': 'is_going_to_state17'
},
{
'trigger': 'advance',
'source': 'state271',
'dest': 'state272',
'conditions': 'is_going_to_state27'
},
#*********************************************
#*********************************************
#chat_last
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
{
'trigger': 'advance',
'source': 'state172',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
{
'trigger': 'advance',
'source': 'state272',
'dest': 'state77',
'conditions': 'is_going_to_state78'
},
#*********************************************
#*********************************************
#chat_last_last
{
'trigger': 'advance',
'source': 'state77',
'dest': 'state78',
'conditions': 'is_going_to_state78'
},
#*********************************************
#chat_last_last_last
{
'trigger': 'advance',
'source': 'state78',
'dest': 'state100',
'conditions': 'is_going_to_state19'
},
{
'trigger': 'advance',
'source': 'state78',
'dest': 'state3',
'conditions': 'is_going_to_state9'
},
#*********************************************
],
initial='user',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
machine.get_graph().draw('my_state_diagram.png', prog='dot') | 0.18866 | 0.21158 |
import os
import torch
import cv2
import util.io
from glob import glob
from torchvision.transforms import Compose
from dpt.load_models import load_model
from dpt.transforms import Resize, PrepareForNet
def run(input_path,
model_path,
model_type="dpt_hybrid",
optimize=True,
save_png=True,
save_npy=True):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
model, net_w, net_h, normalization = load_model(model_type, model_path)
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input images
img_names = []
for side in ["leftImg8bit", "rightImg8bit"]:
for split in ["train", "val", "test"]:
img_names += glob(os.path.join(input_path, side, split, "*/*.png"))
num_images = len(img_names)
# create output folders structure
left_depth_dir = os.path.join(input_path, "leftDepth")
right_depth_dir = os.path.join(input_path, "rightDepth")
os.makedirs(left_depth_dir, exist_ok=True)
os.makedirs(right_depth_dir, exist_ok=True)
splits_cities = {}
for split in ["train", "val", "test"]:
all_splits = glob(os.path.join(input_path, "leftImg8bit", split, "*"))
splits_cities[split] = [os.path.basename(s) for s in all_splits]
for split in ["train", "val", "test"]:
cities = splits_cities[split]
for city in cities:
os.makedirs(os.path.join(left_depth_dir, split, city), exist_ok=True)
os.makedirs(os.path.join(right_depth_dir, split, city), exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
if os.path.isdir(img_name):
continue
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
img = util.io.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bilinear",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
if model_type == "dpt_hybrid_kitti":
prediction *= 256
if model_type == "dpt_hybrid_nyu":
prediction *= 1000.0
# output
output_name = img_name.replace("leftImg8bit", "leftDepth").replace("rightImg8bit", "rightDepth")
output_name = os.path.splitext(output_name)[0]
if save_png:
util.io.write_depth(output_name, prediction, bits=2)
if save_npy:
util.io.write_npy(output_name, prediction)
print("finished")
if __name__ == "__main__":
parser = util.io.get_parser()
parser.set_defaults(optimize=True)
parser.set_defaults(absolute_depth=False)
args = parser.parse_args()
default_models = {
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
"dpt_hybrid_kitti": "weights/dpt_hybrid_kitti-cb926ef4.pt",
"dpt_hybrid_nyu": "weights/dpt_hybrid_nyu-2ce69ec7.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(
args.input_path,
args.model_weights,
args.model_type,
args.optimize,
) | run_monodepth_cityscapes.py | import os
import torch
import cv2
import util.io
from glob import glob
from torchvision.transforms import Compose
from dpt.load_models import load_model
from dpt.transforms import Resize, PrepareForNet
def run(input_path,
model_path,
model_type="dpt_hybrid",
optimize=True,
save_png=True,
save_npy=True):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
model, net_w, net_h, normalization = load_model(model_type, model_path)
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input images
img_names = []
for side in ["leftImg8bit", "rightImg8bit"]:
for split in ["train", "val", "test"]:
img_names += glob(os.path.join(input_path, side, split, "*/*.png"))
num_images = len(img_names)
# create output folders structure
left_depth_dir = os.path.join(input_path, "leftDepth")
right_depth_dir = os.path.join(input_path, "rightDepth")
os.makedirs(left_depth_dir, exist_ok=True)
os.makedirs(right_depth_dir, exist_ok=True)
splits_cities = {}
for split in ["train", "val", "test"]:
all_splits = glob(os.path.join(input_path, "leftImg8bit", split, "*"))
splits_cities[split] = [os.path.basename(s) for s in all_splits]
for split in ["train", "val", "test"]:
cities = splits_cities[split]
for city in cities:
os.makedirs(os.path.join(left_depth_dir, split, city), exist_ok=True)
os.makedirs(os.path.join(right_depth_dir, split, city), exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
if os.path.isdir(img_name):
continue
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
img = util.io.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bilinear",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
if model_type == "dpt_hybrid_kitti":
prediction *= 256
if model_type == "dpt_hybrid_nyu":
prediction *= 1000.0
# output
output_name = img_name.replace("leftImg8bit", "leftDepth").replace("rightImg8bit", "rightDepth")
output_name = os.path.splitext(output_name)[0]
if save_png:
util.io.write_depth(output_name, prediction, bits=2)
if save_npy:
util.io.write_npy(output_name, prediction)
print("finished")
if __name__ == "__main__":
parser = util.io.get_parser()
parser.set_defaults(optimize=True)
parser.set_defaults(absolute_depth=False)
args = parser.parse_args()
default_models = {
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
"dpt_hybrid_kitti": "weights/dpt_hybrid_kitti-cb926ef4.pt",
"dpt_hybrid_nyu": "weights/dpt_hybrid_nyu-2ce69ec7.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(
args.input_path,
args.model_weights,
args.model_type,
args.optimize,
) | 0.717111 | 0.334263 |
import numpy as np
import gp.base as base
import gp.numeric as numeric
from gp.numeric import correlation_between_distinct_sets_from_covariance
from gp.moment_matching.numpy.moment_matching_minimum import (
calculate_cumulative_min_moments,
get_next_cumulative_min_moments,
)
class SequentialMomentMatchingEI(base.SequentialMomentMatchingBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Calculate and save the cumulative minima means and standard deviations
(
self.selected_cumulative_min_means,
self.selected_cumulative_min_std,
self.selected_points_alpha_pdfs,
self.selected_points_alpha_cdfs,
) = calculate_cumulative_min_moments(
means=self.selected_y_mean.T,
stds=self.selected_y_std.T,
corr_matrix=self.selected_y_correlation[None, :, :],
) # Shape (n_selected, 1) each
def evaluate(self, x: np.ndarray) -> np.ndarray:
candidate_mean, candidate_variance = self.model.predict(x) # Two arrays of shape (n_candidates, 1)
candidate_std: np.ndarray = np.sqrt(candidate_variance) # Array of shape (n_candidates, 1)
# Covariance between candidates and selected points. Shape (n_candidates, n_selected)
covariance_to_collected: np.ndarray = self.model.get_covariance_between_points(x, self.selected_x)
# Get the minimum as observed so far
y_min = self.model.Y.min()
# Get the correlation from new points to selected:
correlation = correlation_between_distinct_sets_from_covariance(
covariance=covariance_to_collected,
std1=candidate_std.ravel(),
std2=self.selected_y_std.ravel(),
) # Shape (n_candidates, n_selected)
# If D points already selected, the index of next point to be selected will be D
next_point_idx = self.selected_y_mean.shape[0]
cumulative_min_mean, cumulative_min_std, *_ = get_next_cumulative_min_moments(
next_output_idx=next_point_idx,
mean=candidate_mean.ravel(),
std=candidate_std.ravel(),
prev_stds=self.selected_y_std.T,
corr_to_next=correlation,
theta_means=self.selected_cumulative_min_means,
theta_stds=self.selected_cumulative_min_std,
alpha_pdfs=self.selected_points_alpha_pdfs,
alpha_cdfs=self.selected_points_alpha_cdfs,
) # Shape (num_candidates) for both
# Calculate the Expected Improvement
expected_improvement = numeric.expected_improvement(
y_min=y_min, mean=cumulative_min_mean, standard_deviation=cumulative_min_std
) # Shape (num_candidates)
return expected_improvement[:, None]
@property
def has_gradients(self) -> bool:
return False | PyStationB/libraries/GlobalPenalisation/gp/moment_matching/numpy/api.py | import numpy as np
import gp.base as base
import gp.numeric as numeric
from gp.numeric import correlation_between_distinct_sets_from_covariance
from gp.moment_matching.numpy.moment_matching_minimum import (
calculate_cumulative_min_moments,
get_next_cumulative_min_moments,
)
class SequentialMomentMatchingEI(base.SequentialMomentMatchingBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Calculate and save the cumulative minima means and standard deviations
(
self.selected_cumulative_min_means,
self.selected_cumulative_min_std,
self.selected_points_alpha_pdfs,
self.selected_points_alpha_cdfs,
) = calculate_cumulative_min_moments(
means=self.selected_y_mean.T,
stds=self.selected_y_std.T,
corr_matrix=self.selected_y_correlation[None, :, :],
) # Shape (n_selected, 1) each
def evaluate(self, x: np.ndarray) -> np.ndarray:
candidate_mean, candidate_variance = self.model.predict(x) # Two arrays of shape (n_candidates, 1)
candidate_std: np.ndarray = np.sqrt(candidate_variance) # Array of shape (n_candidates, 1)
# Covariance between candidates and selected points. Shape (n_candidates, n_selected)
covariance_to_collected: np.ndarray = self.model.get_covariance_between_points(x, self.selected_x)
# Get the minimum as observed so far
y_min = self.model.Y.min()
# Get the correlation from new points to selected:
correlation = correlation_between_distinct_sets_from_covariance(
covariance=covariance_to_collected,
std1=candidate_std.ravel(),
std2=self.selected_y_std.ravel(),
) # Shape (n_candidates, n_selected)
# If D points already selected, the index of next point to be selected will be D
next_point_idx = self.selected_y_mean.shape[0]
cumulative_min_mean, cumulative_min_std, *_ = get_next_cumulative_min_moments(
next_output_idx=next_point_idx,
mean=candidate_mean.ravel(),
std=candidate_std.ravel(),
prev_stds=self.selected_y_std.T,
corr_to_next=correlation,
theta_means=self.selected_cumulative_min_means,
theta_stds=self.selected_cumulative_min_std,
alpha_pdfs=self.selected_points_alpha_pdfs,
alpha_cdfs=self.selected_points_alpha_cdfs,
) # Shape (num_candidates) for both
# Calculate the Expected Improvement
expected_improvement = numeric.expected_improvement(
y_min=y_min, mean=cumulative_min_mean, standard_deviation=cumulative_min_std
) # Shape (num_candidates)
return expected_improvement[:, None]
@property
def has_gradients(self) -> bool:
return False | 0.87153 | 0.45538 |
import itertools
import datetime
from collections import defaultdict
import sqlalchemy as sa
from werkzeug import exceptions
from ggrc import db
from ggrc.models import relationship, inflector
from ggrc.rbac import permissions
from ggrc.services import signals
from ggrc.utils.log_event import log_event
# TODO: Clonning of Audit should be done with MultiClonable mixin and
# SingleClonable should be removed
class SingleClonable(object):
"""Old Clonable mixin.
It's deprecated now, for clonning use MultiClonnable mixin
"""
__lazy_init__ = True
CLONEABLE_CHILDREN = {}
_operation_data = {}
@classmethod
def init(cls, model):
cls.set_handlers(model)
@classmethod
def set_handlers(cls, model):
"""Set up handlers for cloning"""
# pylint: disable=unused-argument, unused-variable
@signals.Restful.collection_posted.connect_via(model)
def handle_model_clone(sender, objects=None, sources=None):
"""Process cloning of objects"""
for obj, src in itertools.izip(objects, sources):
if src.get("operation") == "clone":
options = src.get("cloneOptions")
mapped_objects = options.get("mappedObjects", [])
source_id = int(options.get("sourceObjectId"))
obj.clone(
source_id=source_id,
mapped_objects={obj for obj in mapped_objects
if obj in model.CLONEABLE_CHILDREN})
@signals.Restful.model_posted_after_commit.connect_via(model)
def handle_scope_clone(sender, obj=None, src=None, service=None,
event=None):
"""Process cloning of objects"""
if src.get("operation") == "clone":
from ggrc.snapshotter import clone_scope
options = src.get("cloneOptions")
source_id = int(options.get("sourceObjectId"))
base_object = model.query.get(source_id)
clone_scope(base_object, obj, event)
def generate_attribute(self, attribute):
"""Generate a new unique attribute as a copy of original"""
attr = getattr(self, attribute)
def count_values(key, value):
return self.query.filter_by(**{key: value}).count()
i = 1
generated_attr_value = "{0} - copy {1}".format(attr, i)
while count_values(attribute, generated_attr_value):
i += 1
generated_attr_value = "{0} - copy {1}".format(attr, i)
return generated_attr_value
def clone_custom_attribute_values(self, obj):
"""Copy object's custom attribute values"""
ca_values = obj.custom_attribute_values
for value in ca_values:
value._clone(self) # pylint: disable=protected-access
def update_attrs(self, values):
for key, value in values.items():
setattr(self, key, value)
# TODO: This should be renamed to Clonable when Audit clone logic will be
# refactored and SingleClonable will be removed
class MultiClonable(object):
"""Clonable mixin"""
CLONEABLE_CHILDREN = {} # Types of related objects to clone with base one
RETURN_OBJ_JSON = False # Return json for created object
@classmethod
def _parse_query(cls, query):
"""Parse cloning parameters from input query.
Args:
query: Dict with cloning parameters.
Returns:
Tuple that include list objects to clone, destination object and
list of possible mapped types (source_objs, destination, mapped_types).
"""
if not query:
raise exceptions.BadRequest()
source_ids = query.get("sourceObjectIds", [])
if not source_ids:
raise exceptions.BadRequest("sourceObjectIds parameter wasn't provided")
source_objs = cls.query.options(
sa.orm.subqueryload('custom_attribute_definitions'),
sa.orm.subqueryload('custom_attribute_values'),
).filter(cls.id.in_(source_ids)).all()
dest_query = query.get("destination", {})
destination = None
if dest_query and dest_query.get("type") and dest_query.get("id"):
destination_cls = inflector.get_model(dest_query.get("type"))
destination = destination_cls.query.filter_by(
id=dest_query.get("id")
).first()
mapped_types = {
type_ for type_ in query.get("mappedObjects", [])
if type_ in cls.CLONEABLE_CHILDREN
}
return source_objs, destination, mapped_types
@classmethod
def handle_model_clone(cls, query):
"""Process cloning of objects.
Args:
query: Dict with parameters for cloning procedure. It should have
following structure:
{
"sourceObjectIds": [1, 2],
"destination": {"type": "Audit", "id": 2}, # optional
"mappedObjects":[] # optional
}.
Returns:
Response with status code 200 in case of success and 400 if provided
parameters are invalid.
"""
source_objs, destination, mapped_types = cls._parse_query(query)
clonned_objs = {}
for source_obj in source_objs:
if (
not permissions.is_allowed_read_for(source_obj) or
not permissions.is_allowed_create(
source_obj.type, source_obj.id, destination.context_id
)
):
raise exceptions.Forbidden()
clonned_objs[source_obj] = cls._copy_obj(source_obj, destination)
for target, mapped_obj in cls._collect_mapped(source_objs, mapped_types):
clonned_objs[mapped_obj] = cls._copy_obj(mapped_obj, target)
cls._set_parent_context(clonned_objs.values(), destination)
db.session.flush()
for source, clonned in clonned_objs.items():
cls._clone_cads(source, clonned)
if clonned_objs:
db.session.add(log_event(db.session, flush=False))
db.session.commit()
from ggrc.query import views
collections = []
if cls.RETURN_OBJ_JSON:
for obj in clonned_objs:
collections.append(
views.build_collection_representation(cls, obj.log_json())
)
return views.json_success_response(collections, datetime.datetime.utcnow())
def _clone(self, target=None):
"""Create a copy of self.
This method should be overridden for class that implement Clonable mixin.
Args:
target: Destination object where clonned object should be created.
Returns:
Instance of object copy.
"""
raise NotImplementedError()
@classmethod
def _copy_obj(cls, source, target=None):
"""Make object copy of source into target as destination.
Source will be cloned and mapped to target if it's provided.
Args:
source: Object that should be clonned.
target: Destination for coppied object.
Returns:
Cloned object.
"""
# pylint: disable=protected-access
clonned_object = source._clone(target)
if target:
db.session.add(relationship.Relationship(
source=target,
destination=clonned_object,
))
return clonned_object
@classmethod
def _clone_cads(cls, source, target):
"""Clone CADs from source to target.
Args:
source: Object with CADs.
target: Object in which CADs should be copied.
"""
for cad in source.custom_attribute_definitions:
# Copy only local CADs
if cad.definition_id:
# pylint: disable=protected-access
cad._clone(target)
@classmethod
def _collect_mapped(cls, source_objs, mapped_types):
"""Collect mapped objects.
Args:
source_objs: List of objects for which mapped should be collected.
mapped_types: List of possible types of mapped objects.
Returns:
List of tuples containing source and mapped object
[(source1, mapped1), (source2, mapped2), ...].
"""
if not mapped_types:
return []
source_ids = {obj.id: obj for obj in source_objs}
related_data = db.session.query(
relationship.Relationship.source_id,
relationship.Relationship.destination_type,
relationship.Relationship.destination_id,
).filter(
relationship.Relationship.source_type == cls.__name__,
relationship.Relationship.source_id.in_(source_ids),
relationship.Relationship.destination_type.in_(mapped_types)
).union_all(
db.session.query(
relationship.Relationship.destination_id,
relationship.Relationship.source_type,
relationship.Relationship.source_id,
).filter(
relationship.Relationship.destination_type == cls.__name__,
relationship.Relationship.destination_id.in_(source_ids),
relationship.Relationship.source_type.in_(mapped_types)
)
).all()
related_objs = cls.load_objs(related_data)
source_related_objs = []
for src_id, rel_type, rel_id in related_data:
source_related_objs.append(
(source_ids[src_id], related_objs[rel_type][rel_id])
)
return source_related_objs
@classmethod
def load_objs(cls, data):
"""Load objects by their ids and types.
Args:
data: List of stubs [(_, type, id),] for objects to load.
Returns:
Dict with object type and id as keys and instance as value.
"""
# Combine ids of one type together to load in one query
type_ids = defaultdict(set)
for _, type_, id_ in data:
type_ids[type_].add(id_)
type_id_objs = defaultdict(dict)
# We can't load all objects with different types in one step, so we
# load them for each type separately
for type_, ids in type_ids.items():
related_model = inflector.get_model(type_)
related_query = related_model.query.options(
sa.orm.subqueryload('custom_attribute_definitions'),
).filter(related_model.id.in_(ids))
for related in related_query:
type_id_objs[type_][related.id] = related
return type_id_objs
@classmethod
def _set_parent_context(cls, objs, parent):
"""Set up parent context to child objects.
Args:
clonned_objs: List of objects where context should be changed.
parent: Parent object which determine context for children.
"""
if not getattr(parent, "context_id", None):
return
for clonned in objs:
clonned.context_id = parent.context_id | src/ggrc/models/mixins/clonable.py | import itertools
import datetime
from collections import defaultdict
import sqlalchemy as sa
from werkzeug import exceptions
from ggrc import db
from ggrc.models import relationship, inflector
from ggrc.rbac import permissions
from ggrc.services import signals
from ggrc.utils.log_event import log_event
# TODO: Clonning of Audit should be done with MultiClonable mixin and
# SingleClonable should be removed
class SingleClonable(object):
"""Old Clonable mixin.
It's deprecated now, for clonning use MultiClonnable mixin
"""
__lazy_init__ = True
CLONEABLE_CHILDREN = {}
_operation_data = {}
@classmethod
def init(cls, model):
cls.set_handlers(model)
@classmethod
def set_handlers(cls, model):
"""Set up handlers for cloning"""
# pylint: disable=unused-argument, unused-variable
@signals.Restful.collection_posted.connect_via(model)
def handle_model_clone(sender, objects=None, sources=None):
"""Process cloning of objects"""
for obj, src in itertools.izip(objects, sources):
if src.get("operation") == "clone":
options = src.get("cloneOptions")
mapped_objects = options.get("mappedObjects", [])
source_id = int(options.get("sourceObjectId"))
obj.clone(
source_id=source_id,
mapped_objects={obj for obj in mapped_objects
if obj in model.CLONEABLE_CHILDREN})
@signals.Restful.model_posted_after_commit.connect_via(model)
def handle_scope_clone(sender, obj=None, src=None, service=None,
event=None):
"""Process cloning of objects"""
if src.get("operation") == "clone":
from ggrc.snapshotter import clone_scope
options = src.get("cloneOptions")
source_id = int(options.get("sourceObjectId"))
base_object = model.query.get(source_id)
clone_scope(base_object, obj, event)
def generate_attribute(self, attribute):
"""Generate a new unique attribute as a copy of original"""
attr = getattr(self, attribute)
def count_values(key, value):
return self.query.filter_by(**{key: value}).count()
i = 1
generated_attr_value = "{0} - copy {1}".format(attr, i)
while count_values(attribute, generated_attr_value):
i += 1
generated_attr_value = "{0} - copy {1}".format(attr, i)
return generated_attr_value
def clone_custom_attribute_values(self, obj):
"""Copy object's custom attribute values"""
ca_values = obj.custom_attribute_values
for value in ca_values:
value._clone(self) # pylint: disable=protected-access
def update_attrs(self, values):
for key, value in values.items():
setattr(self, key, value)
# TODO: This should be renamed to Clonable when Audit clone logic will be
# refactored and SingleClonable will be removed
class MultiClonable(object):
"""Clonable mixin"""
CLONEABLE_CHILDREN = {} # Types of related objects to clone with base one
RETURN_OBJ_JSON = False # Return json for created object
@classmethod
def _parse_query(cls, query):
"""Parse cloning parameters from input query.
Args:
query: Dict with cloning parameters.
Returns:
Tuple that include list objects to clone, destination object and
list of possible mapped types (source_objs, destination, mapped_types).
"""
if not query:
raise exceptions.BadRequest()
source_ids = query.get("sourceObjectIds", [])
if not source_ids:
raise exceptions.BadRequest("sourceObjectIds parameter wasn't provided")
source_objs = cls.query.options(
sa.orm.subqueryload('custom_attribute_definitions'),
sa.orm.subqueryload('custom_attribute_values'),
).filter(cls.id.in_(source_ids)).all()
dest_query = query.get("destination", {})
destination = None
if dest_query and dest_query.get("type") and dest_query.get("id"):
destination_cls = inflector.get_model(dest_query.get("type"))
destination = destination_cls.query.filter_by(
id=dest_query.get("id")
).first()
mapped_types = {
type_ for type_ in query.get("mappedObjects", [])
if type_ in cls.CLONEABLE_CHILDREN
}
return source_objs, destination, mapped_types
@classmethod
def handle_model_clone(cls, query):
"""Process cloning of objects.
Args:
query: Dict with parameters for cloning procedure. It should have
following structure:
{
"sourceObjectIds": [1, 2],
"destination": {"type": "Audit", "id": 2}, # optional
"mappedObjects":[] # optional
}.
Returns:
Response with status code 200 in case of success and 400 if provided
parameters are invalid.
"""
source_objs, destination, mapped_types = cls._parse_query(query)
clonned_objs = {}
for source_obj in source_objs:
if (
not permissions.is_allowed_read_for(source_obj) or
not permissions.is_allowed_create(
source_obj.type, source_obj.id, destination.context_id
)
):
raise exceptions.Forbidden()
clonned_objs[source_obj] = cls._copy_obj(source_obj, destination)
for target, mapped_obj in cls._collect_mapped(source_objs, mapped_types):
clonned_objs[mapped_obj] = cls._copy_obj(mapped_obj, target)
cls._set_parent_context(clonned_objs.values(), destination)
db.session.flush()
for source, clonned in clonned_objs.items():
cls._clone_cads(source, clonned)
if clonned_objs:
db.session.add(log_event(db.session, flush=False))
db.session.commit()
from ggrc.query import views
collections = []
if cls.RETURN_OBJ_JSON:
for obj in clonned_objs:
collections.append(
views.build_collection_representation(cls, obj.log_json())
)
return views.json_success_response(collections, datetime.datetime.utcnow())
def _clone(self, target=None):
"""Create a copy of self.
This method should be overridden for class that implement Clonable mixin.
Args:
target: Destination object where clonned object should be created.
Returns:
Instance of object copy.
"""
raise NotImplementedError()
@classmethod
def _copy_obj(cls, source, target=None):
"""Make object copy of source into target as destination.
Source will be cloned and mapped to target if it's provided.
Args:
source: Object that should be clonned.
target: Destination for coppied object.
Returns:
Cloned object.
"""
# pylint: disable=protected-access
clonned_object = source._clone(target)
if target:
db.session.add(relationship.Relationship(
source=target,
destination=clonned_object,
))
return clonned_object
@classmethod
def _clone_cads(cls, source, target):
"""Clone CADs from source to target.
Args:
source: Object with CADs.
target: Object in which CADs should be copied.
"""
for cad in source.custom_attribute_definitions:
# Copy only local CADs
if cad.definition_id:
# pylint: disable=protected-access
cad._clone(target)
@classmethod
def _collect_mapped(cls, source_objs, mapped_types):
"""Collect mapped objects.
Args:
source_objs: List of objects for which mapped should be collected.
mapped_types: List of possible types of mapped objects.
Returns:
List of tuples containing source and mapped object
[(source1, mapped1), (source2, mapped2), ...].
"""
if not mapped_types:
return []
source_ids = {obj.id: obj for obj in source_objs}
related_data = db.session.query(
relationship.Relationship.source_id,
relationship.Relationship.destination_type,
relationship.Relationship.destination_id,
).filter(
relationship.Relationship.source_type == cls.__name__,
relationship.Relationship.source_id.in_(source_ids),
relationship.Relationship.destination_type.in_(mapped_types)
).union_all(
db.session.query(
relationship.Relationship.destination_id,
relationship.Relationship.source_type,
relationship.Relationship.source_id,
).filter(
relationship.Relationship.destination_type == cls.__name__,
relationship.Relationship.destination_id.in_(source_ids),
relationship.Relationship.source_type.in_(mapped_types)
)
).all()
related_objs = cls.load_objs(related_data)
source_related_objs = []
for src_id, rel_type, rel_id in related_data:
source_related_objs.append(
(source_ids[src_id], related_objs[rel_type][rel_id])
)
return source_related_objs
@classmethod
def load_objs(cls, data):
"""Load objects by their ids and types.
Args:
data: List of stubs [(_, type, id),] for objects to load.
Returns:
Dict with object type and id as keys and instance as value.
"""
# Combine ids of one type together to load in one query
type_ids = defaultdict(set)
for _, type_, id_ in data:
type_ids[type_].add(id_)
type_id_objs = defaultdict(dict)
# We can't load all objects with different types in one step, so we
# load them for each type separately
for type_, ids in type_ids.items():
related_model = inflector.get_model(type_)
related_query = related_model.query.options(
sa.orm.subqueryload('custom_attribute_definitions'),
).filter(related_model.id.in_(ids))
for related in related_query:
type_id_objs[type_][related.id] = related
return type_id_objs
@classmethod
def _set_parent_context(cls, objs, parent):
"""Set up parent context to child objects.
Args:
clonned_objs: List of objects where context should be changed.
parent: Parent object which determine context for children.
"""
if not getattr(parent, "context_id", None):
return
for clonned in objs:
clonned.context_id = parent.context_id | 0.428712 | 0.10904 |
from test_gc01 import test_gc_base
from wiredtiger import stat
from wtdataset import SimpleDataSet
# test_gc04.py
# Test that checkpoint must not clean the pages that are not obsolete.
class test_gc04(test_gc_base):
conn_config = 'cache_size=50MB,statistics=(all)'
def get_stat(self, stat):
stat_cursor = self.session.open_cursor('statistics:')
val = stat_cursor[stat][2]
stat_cursor.close()
return val
def test_gc(self):
nrows = 10000
# Create a table.
uri = "table:gc04"
ds = SimpleDataSet(self, uri, 0, key_format="i", value_format="S")
ds.populate()
# Pin oldest and stable to timestamp 1.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
',stable_timestamp=' + self.timestamp_str(1))
bigvalue = "aaaaa" * 100
bigvalue2 = "ddddd" * 100
self.large_updates(uri, bigvalue, ds, nrows, 10)
self.large_updates(uri, bigvalue2, ds, nrows, 20)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 30)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue2, ds, nrows, 40)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 50)
self.large_updates(uri, bigvalue2, ds, nrows, 60)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 70)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
if __name__ == '__main__':
wttest.run() | src/third_party/wiredtiger/test/suite/test_gc04.py |
from test_gc01 import test_gc_base
from wiredtiger import stat
from wtdataset import SimpleDataSet
# test_gc04.py
# Test that checkpoint must not clean the pages that are not obsolete.
class test_gc04(test_gc_base):
conn_config = 'cache_size=50MB,statistics=(all)'
def get_stat(self, stat):
stat_cursor = self.session.open_cursor('statistics:')
val = stat_cursor[stat][2]
stat_cursor.close()
return val
def test_gc(self):
nrows = 10000
# Create a table.
uri = "table:gc04"
ds = SimpleDataSet(self, uri, 0, key_format="i", value_format="S")
ds.populate()
# Pin oldest and stable to timestamp 1.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
',stable_timestamp=' + self.timestamp_str(1))
bigvalue = "aaaaa" * 100
bigvalue2 = "ddddd" * 100
self.large_updates(uri, bigvalue, ds, nrows, 10)
self.large_updates(uri, bigvalue2, ds, nrows, 20)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 30)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue2, ds, nrows, 40)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 50)
self.large_updates(uri, bigvalue2, ds, nrows, 60)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
self.large_updates(uri, bigvalue, ds, nrows, 70)
# Checkpoint to ensure that the history store is populated.
self.session.checkpoint()
self.assertEqual(self.get_stat(stat.conn.cc_pages_evict), 0)
self.assertEqual(self.get_stat(stat.conn.cc_pages_removed), 0)
self.assertGreater(self.get_stat(stat.conn.cc_pages_visited), 0)
if __name__ == '__main__':
wttest.run() | 0.671471 | 0.372648 |
from pipeline.c3dgenrator import *
from tensorflow.keras.layers import Input
from nets.extract_1 import *
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.wrappers import TimeDistributed
from nets.RoiPoolingConv import *
from pipeline.SPN import *
from helper import losses
import time
import tensorflow.keras.backend as bk
import tensorflow as tf
num_frames = 5
num_anchors = 15
gen = C3DGenerator(config.video_info)
input_tensor = Input((5, 144, 144, 3))
pre_roi = extract_layer(input_tensor)
x_class = TimeDistributed(Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform',
name='rpn_out_class'))(pre_roi)
x_regr = TimeDistributed(Conv2D(4 * num_anchors, (1, 1), activation='linear', kernel_initializer='zero',
name='rpn_out_regress'))(pre_roi)
spn_helper = SPN(7, (144, 144))
m = Model([input_tensor], [x_class, x_regr])
m.compile(optimizer='sgd', loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_reg(num_anchors)])
m.summary(160)
for i in range(500):
time1 = time.time()
img, label = gen.next()
spn_cls = []
spn_reg = []
time2 = time.time()
for j in range(num_frames):
cur_spn_cls, cur_spn_reg, cur_box_cls, cur_box_raw, cur_spn_cls_valid = spn_helper.cal_gt_tags(label[0][j])
spn_cls.append(cur_spn_cls)
spn_reg.append(cur_spn_reg)
spn_cls = np.expand_dims(np.array(spn_cls), 0)
spn_reg = np.expand_dims(np.array(spn_reg), 0)
loss = m.train_on_batch(img, [spn_cls, spn_reg])
y_cls, y_rpn = m.predict_on_batch(img)
anchor_valid = tf.convert_to_tensor(spn_cls[:, :, :, :, :num_anchors], dtype=float)
anchor_signal = tf.convert_to_tensor(spn_cls[:, :, :, :, num_anchors:], dtype=float)
valid_loss = bk.sum(anchor_valid * bk.binary_crossentropy(y_cls, anchor_signal))
n_cls = bk.sum(1e-4 + anchor_valid[:, :, :, :, :num_anchors])
lo = valid_loss / n_cls
print(loss)
time3 = time.time()
print('gen label time: {0}. traingin time: {1}'.format(time2 - time1, time3 - time2)) | Scipts/train_c3d.py | from pipeline.c3dgenrator import *
from tensorflow.keras.layers import Input
from nets.extract_1 import *
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.wrappers import TimeDistributed
from nets.RoiPoolingConv import *
from pipeline.SPN import *
from helper import losses
import time
import tensorflow.keras.backend as bk
import tensorflow as tf
num_frames = 5
num_anchors = 15
gen = C3DGenerator(config.video_info)
input_tensor = Input((5, 144, 144, 3))
pre_roi = extract_layer(input_tensor)
x_class = TimeDistributed(Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform',
name='rpn_out_class'))(pre_roi)
x_regr = TimeDistributed(Conv2D(4 * num_anchors, (1, 1), activation='linear', kernel_initializer='zero',
name='rpn_out_regress'))(pre_roi)
spn_helper = SPN(7, (144, 144))
m = Model([input_tensor], [x_class, x_regr])
m.compile(optimizer='sgd', loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_reg(num_anchors)])
m.summary(160)
for i in range(500):
time1 = time.time()
img, label = gen.next()
spn_cls = []
spn_reg = []
time2 = time.time()
for j in range(num_frames):
cur_spn_cls, cur_spn_reg, cur_box_cls, cur_box_raw, cur_spn_cls_valid = spn_helper.cal_gt_tags(label[0][j])
spn_cls.append(cur_spn_cls)
spn_reg.append(cur_spn_reg)
spn_cls = np.expand_dims(np.array(spn_cls), 0)
spn_reg = np.expand_dims(np.array(spn_reg), 0)
loss = m.train_on_batch(img, [spn_cls, spn_reg])
y_cls, y_rpn = m.predict_on_batch(img)
anchor_valid = tf.convert_to_tensor(spn_cls[:, :, :, :, :num_anchors], dtype=float)
anchor_signal = tf.convert_to_tensor(spn_cls[:, :, :, :, num_anchors:], dtype=float)
valid_loss = bk.sum(anchor_valid * bk.binary_crossentropy(y_cls, anchor_signal))
n_cls = bk.sum(1e-4 + anchor_valid[:, :, :, :, :num_anchors])
lo = valid_loss / n_cls
print(loss)
time3 = time.time()
print('gen label time: {0}. traingin time: {1}'.format(time2 - time1, time3 - time2)) | 0.81648 | 0.337777 |
"""Unit tests for check_grid_match function."""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.metadata.utilities import create_coordinate_hash
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.spotdata.spot_extraction import check_grid_match
from ...set_up_test_cubes import set_up_variable_cube
class Test_check_grid_match(IrisTest):
"""Test the check_grid_match function."""
def setUp(self):
"""Set up cubes for use in testing."""
data = np.ones(9).reshape(3, 3).astype(np.float32)
self.reference_cube = set_up_variable_cube(data, spatial_grid="equalarea")
self.cube1 = self.reference_cube.copy()
self.cube2 = self.reference_cube.copy()
self.unmatched_cube = set_up_variable_cube(data, spatial_grid="latlon")
self.diagnostic_cube_hash = create_coordinate_hash(self.reference_cube)
neighbours = np.array([[[0.0, 0.0, 0.0]]])
altitudes = np.array([0])
latitudes = np.array([0])
longitudes = np.array([0])
wmo_ids = np.array([0])
grid_attributes = ["x_index", "y_index", "vertical_displacement"]
neighbour_methods = ["nearest"]
self.neighbour_cube = build_spotdata_cube(
neighbours,
"grid_neighbours",
1,
altitudes,
latitudes,
longitudes,
wmo_ids,
grid_attributes=grid_attributes,
neighbour_methods=neighbour_methods,
)
self.neighbour_cube.attributes["model_grid_hash"] = self.diagnostic_cube_hash
def test_matching_grids(self):
"""Test a case in which the grids match. There is no assert
statement as this test is successful if no exception is raised."""
cubes = [self.reference_cube, self.cube1, self.cube2]
check_grid_match(cubes)
def test_non_matching_grids(self):
"""Test a case in which a cube with an unmatching grid is included in
the comparison, raising a ValueError."""
cubes = [self.reference_cube, self.cube1, self.unmatched_cube]
msg = (
"Cubes do not share or originate from the same grid, so cannot "
"be used together."
)
with self.assertRaisesRegex(ValueError, msg):
check_grid_match(cubes)
def test_using_model_grid_hash(self):
"""Test a case in which one of the cubes is a spotdata cube without a
spatial grid. This cube includes a model_grid_hash to indicate on which
grid the neighbours were found."""
cubes = [self.reference_cube, self.neighbour_cube, self.cube2]
check_grid_match(cubes)
def test_using_model_grid_hash_reordered_cubes(self):
"""Test as above but using the neighbour_cube as the first in the list
so that it acts as the reference for all the other cubes."""
cubes = [self.neighbour_cube, self.reference_cube, self.cube2]
check_grid_match(cubes)
def test_multiple_model_grid_hash_cubes(self):
"""Test that a check works when all the cubes passed to the function
have model_grid_hashes."""
self.cube1.attributes["model_grid_hash"] = self.diagnostic_cube_hash
cubes = [self.neighbour_cube, self.cube1]
check_grid_match(cubes)
def test_mismatched_model_grid_hash_cubes(self):
"""Test that a check works when all the cubes passed to the function
have model_grid_hashes and these do not match."""
self.cube1.attributes["model_grid_hash"] = "123"
cubes = [self.neighbour_cube, self.cube1]
msg = (
"Cubes do not share or originate from the same grid, so cannot "
"be used together."
)
with self.assertRaisesRegex(ValueError, msg):
check_grid_match(cubes)
if __name__ == "__main__":
unittest.main() | improver_tests/spotdata/spotdata/test_check_grid_match.py | """Unit tests for check_grid_match function."""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.metadata.utilities import create_coordinate_hash
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.spotdata.spot_extraction import check_grid_match
from ...set_up_test_cubes import set_up_variable_cube
class Test_check_grid_match(IrisTest):
"""Test the check_grid_match function."""
def setUp(self):
"""Set up cubes for use in testing."""
data = np.ones(9).reshape(3, 3).astype(np.float32)
self.reference_cube = set_up_variable_cube(data, spatial_grid="equalarea")
self.cube1 = self.reference_cube.copy()
self.cube2 = self.reference_cube.copy()
self.unmatched_cube = set_up_variable_cube(data, spatial_grid="latlon")
self.diagnostic_cube_hash = create_coordinate_hash(self.reference_cube)
neighbours = np.array([[[0.0, 0.0, 0.0]]])
altitudes = np.array([0])
latitudes = np.array([0])
longitudes = np.array([0])
wmo_ids = np.array([0])
grid_attributes = ["x_index", "y_index", "vertical_displacement"]
neighbour_methods = ["nearest"]
self.neighbour_cube = build_spotdata_cube(
neighbours,
"grid_neighbours",
1,
altitudes,
latitudes,
longitudes,
wmo_ids,
grid_attributes=grid_attributes,
neighbour_methods=neighbour_methods,
)
self.neighbour_cube.attributes["model_grid_hash"] = self.diagnostic_cube_hash
def test_matching_grids(self):
"""Test a case in which the grids match. There is no assert
statement as this test is successful if no exception is raised."""
cubes = [self.reference_cube, self.cube1, self.cube2]
check_grid_match(cubes)
def test_non_matching_grids(self):
"""Test a case in which a cube with an unmatching grid is included in
the comparison, raising a ValueError."""
cubes = [self.reference_cube, self.cube1, self.unmatched_cube]
msg = (
"Cubes do not share or originate from the same grid, so cannot "
"be used together."
)
with self.assertRaisesRegex(ValueError, msg):
check_grid_match(cubes)
def test_using_model_grid_hash(self):
"""Test a case in which one of the cubes is a spotdata cube without a
spatial grid. This cube includes a model_grid_hash to indicate on which
grid the neighbours were found."""
cubes = [self.reference_cube, self.neighbour_cube, self.cube2]
check_grid_match(cubes)
def test_using_model_grid_hash_reordered_cubes(self):
"""Test as above but using the neighbour_cube as the first in the list
so that it acts as the reference for all the other cubes."""
cubes = [self.neighbour_cube, self.reference_cube, self.cube2]
check_grid_match(cubes)
def test_multiple_model_grid_hash_cubes(self):
"""Test that a check works when all the cubes passed to the function
have model_grid_hashes."""
self.cube1.attributes["model_grid_hash"] = self.diagnostic_cube_hash
cubes = [self.neighbour_cube, self.cube1]
check_grid_match(cubes)
def test_mismatched_model_grid_hash_cubes(self):
"""Test that a check works when all the cubes passed to the function
have model_grid_hashes and these do not match."""
self.cube1.attributes["model_grid_hash"] = "123"
cubes = [self.neighbour_cube, self.cube1]
msg = (
"Cubes do not share or originate from the same grid, so cannot "
"be used together."
)
with self.assertRaisesRegex(ValueError, msg):
check_grid_match(cubes)
if __name__ == "__main__":
unittest.main() | 0.814717 | 0.621541 |
from root.config.main import sIze, np
from screws.freeze.main import FrozenOnly
class _3dCSCG_Mesh_DO_FIND(FrozenOnly):
def __init__(self, DO):
self._DO_ = DO
self._mesh_ = DO._mesh_
self._freeze_self_()
def region_name_of_element(self, i):
"""Find the regions of ith element.
Parameters
----------
i
Returns
-------
"""
region_name = None
for num_elements_accumulation in self._mesh_._num_elements_accumulation_:
if i < num_elements_accumulation:
region_name = self._mesh_._num_elements_accumulation_[num_elements_accumulation]
break
return region_name
def region_name_and_local_indices_of_element(self, i):
"""
Parameters
----------
i
Returns
-------
"""
return self._mesh_.___PRIVATE_do_find_region_name_and_local_indices_of_element___(i)
def reference_origin_and_size_of_element_of_given_local_indices(self, region_name, local_indices):
origin = [None for _ in range(self._mesh_.ndim)]
delta = [None for _ in range(self._mesh_.ndim)]
for i in range(self._mesh_.ndim):
origin[i] = self._mesh_._element_spacing_[region_name][i][local_indices[i]]
delta[i] = self._mesh_._element_ratio_[region_name][i][local_indices[i]]
return tuple(origin), tuple(delta)
def reference_origin_and_size_of_element(self, i):
"""
Find the origin, the UL corner(2D), NWB corner (3D), and the size of
the ith element in the reference regions [0,1]^ndim.
"""
region_name, local_indices = self.region_name_and_local_indices_of_element(i)
return self.reference_origin_and_size_of_element_of_given_local_indices(
region_name, local_indices)
def slave_of_element(self, i: int) -> int:
"""Find the core rank of mesh element #i.
Parameters
----------
i : int
The number of the mesh element.
Returns
-------
midCore1 : int
The core the mesh element #`i` is in.
"""
DISTRI = self._mesh_._element_distribution_
if isinstance(i, str): i = int(i)
if sIze <= 6 or not self._mesh_.___is_occupying_all_cores___:
for nC in range(sIze):
if i in DISTRI[nC]: return nC
raise Exception()
midCore0 = 0
midCore1 = sIze // 2
midCore2 = sIze
while i not in DISTRI[midCore1] and midCore1 - midCore0 > 2 and midCore2 - midCore1 > 2:
if i > max(DISTRI[midCore1]):
midCore0 = midCore1
midCore1 = (midCore0 + midCore2) // 2
elif i < min(DISTRI[midCore1]):
midCore2 = midCore1
midCore1 = (midCore0 + midCore2) // 2
else:
raise Exception
if i in DISTRI[midCore1]:
return midCore1
elif i > np.max(DISTRI[midCore1]):
for noCore in range(midCore1, midCore2):
if i in DISTRI[noCore]: return noCore
elif i < np.min(DISTRI[midCore1]):
for noCore in range(midCore0, midCore1):
if i in DISTRI[noCore]: return noCore
else:
raise Exception
def element_attach_to_region_side(self, region, side_name):
"""
:param str region:
:param str side_name:
:return:
"""
EGN1 = self._mesh_.___PRIVATE_generate_element_global_numbering_for_region___(region)
if side_name == 'N':
elements = EGN1[ 0, :, :]
elif side_name == 'S':
elements = EGN1[-1, :, :]
elif side_name == 'W':
elements = EGN1[ :, 0, :]
elif side_name == 'E':
elements = EGN1[ :,-1, :]
elif side_name == 'B':
elements = EGN1[ :, :, 0]
elif side_name == 'F':
elements = EGN1[ :, :,-1]
else:
raise Exception()
return elements | objects/CSCG/_3d/mesh/do/find.py | from root.config.main import sIze, np
from screws.freeze.main import FrozenOnly
class _3dCSCG_Mesh_DO_FIND(FrozenOnly):
def __init__(self, DO):
self._DO_ = DO
self._mesh_ = DO._mesh_
self._freeze_self_()
def region_name_of_element(self, i):
"""Find the regions of ith element.
Parameters
----------
i
Returns
-------
"""
region_name = None
for num_elements_accumulation in self._mesh_._num_elements_accumulation_:
if i < num_elements_accumulation:
region_name = self._mesh_._num_elements_accumulation_[num_elements_accumulation]
break
return region_name
def region_name_and_local_indices_of_element(self, i):
"""
Parameters
----------
i
Returns
-------
"""
return self._mesh_.___PRIVATE_do_find_region_name_and_local_indices_of_element___(i)
def reference_origin_and_size_of_element_of_given_local_indices(self, region_name, local_indices):
origin = [None for _ in range(self._mesh_.ndim)]
delta = [None for _ in range(self._mesh_.ndim)]
for i in range(self._mesh_.ndim):
origin[i] = self._mesh_._element_spacing_[region_name][i][local_indices[i]]
delta[i] = self._mesh_._element_ratio_[region_name][i][local_indices[i]]
return tuple(origin), tuple(delta)
def reference_origin_and_size_of_element(self, i):
"""
Find the origin, the UL corner(2D), NWB corner (3D), and the size of
the ith element in the reference regions [0,1]^ndim.
"""
region_name, local_indices = self.region_name_and_local_indices_of_element(i)
return self.reference_origin_and_size_of_element_of_given_local_indices(
region_name, local_indices)
def slave_of_element(self, i: int) -> int:
"""Find the core rank of mesh element #i.
Parameters
----------
i : int
The number of the mesh element.
Returns
-------
midCore1 : int
The core the mesh element #`i` is in.
"""
DISTRI = self._mesh_._element_distribution_
if isinstance(i, str): i = int(i)
if sIze <= 6 or not self._mesh_.___is_occupying_all_cores___:
for nC in range(sIze):
if i in DISTRI[nC]: return nC
raise Exception()
midCore0 = 0
midCore1 = sIze // 2
midCore2 = sIze
while i not in DISTRI[midCore1] and midCore1 - midCore0 > 2 and midCore2 - midCore1 > 2:
if i > max(DISTRI[midCore1]):
midCore0 = midCore1
midCore1 = (midCore0 + midCore2) // 2
elif i < min(DISTRI[midCore1]):
midCore2 = midCore1
midCore1 = (midCore0 + midCore2) // 2
else:
raise Exception
if i in DISTRI[midCore1]:
return midCore1
elif i > np.max(DISTRI[midCore1]):
for noCore in range(midCore1, midCore2):
if i in DISTRI[noCore]: return noCore
elif i < np.min(DISTRI[midCore1]):
for noCore in range(midCore0, midCore1):
if i in DISTRI[noCore]: return noCore
else:
raise Exception
def element_attach_to_region_side(self, region, side_name):
"""
:param str region:
:param str side_name:
:return:
"""
EGN1 = self._mesh_.___PRIVATE_generate_element_global_numbering_for_region___(region)
if side_name == 'N':
elements = EGN1[ 0, :, :]
elif side_name == 'S':
elements = EGN1[-1, :, :]
elif side_name == 'W':
elements = EGN1[ :, 0, :]
elif side_name == 'E':
elements = EGN1[ :,-1, :]
elif side_name == 'B':
elements = EGN1[ :, :, 0]
elif side_name == 'F':
elements = EGN1[ :, :,-1]
else:
raise Exception()
return elements | 0.676727 | 0.341596 |
"""Monitor learning rate during training."""
from composer.core import Callback, State
from composer.loggers import Logger
__all__ = ["LRMonitor"]
class LRMonitor(Callback):
"""Logs the learning rate.
This callback iterates over all optimizers and their parameter groups to log learning rate under the
``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` key.
Example
>>> from composer.callbacks import LRMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[LRMonitor()],
... )
The learning rate is logged by the :class:`~composer.loggers.logger.Logger` to the following key as described
below.
+---------------------------------------------+---------------------------------------+
| Key | Logged data |
+=============================================+=======================================+
| | Learning rate for each optimizer and |
| ``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` | parameter group for that optimizer is |
| | logged to a separate key |
+---------------------------------------------+---------------------------------------+
"""
def __init__(self) -> None:
super().__init__()
def batch_end(self, state: State, logger: Logger):
assert state.optimizers is not None, "optimizers must be defined"
for optimizer in state.optimizers:
lrs = [group['lr'] for group in optimizer.param_groups]
name = optimizer.__class__.__name__
for lr in lrs:
for idx, lr in enumerate(lrs):
logger.data_batch({f'lr-{name}/group{idx}': lr}) | composer/callbacks/lr_monitor.py |
"""Monitor learning rate during training."""
from composer.core import Callback, State
from composer.loggers import Logger
__all__ = ["LRMonitor"]
class LRMonitor(Callback):
"""Logs the learning rate.
This callback iterates over all optimizers and their parameter groups to log learning rate under the
``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` key.
Example
>>> from composer.callbacks import LRMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[LRMonitor()],
... )
The learning rate is logged by the :class:`~composer.loggers.logger.Logger` to the following key as described
below.
+---------------------------------------------+---------------------------------------+
| Key | Logged data |
+=============================================+=======================================+
| | Learning rate for each optimizer and |
| ``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` | parameter group for that optimizer is |
| | logged to a separate key |
+---------------------------------------------+---------------------------------------+
"""
def __init__(self) -> None:
super().__init__()
def batch_end(self, state: State, logger: Logger):
assert state.optimizers is not None, "optimizers must be defined"
for optimizer in state.optimizers:
lrs = [group['lr'] for group in optimizer.param_groups]
name = optimizer.__class__.__name__
for lr in lrs:
for idx, lr in enumerate(lrs):
logger.data_batch({f'lr-{name}/group{idx}': lr}) | 0.921825 | 0.36424 |
import argparse
from argparse import RawTextHelpFormatter
import subprocess
from subprocess import PIPE
import os, sys, time
import logging
import re
from . import filterGraph
def checkStatus(args):
if sys.version_info.major != 3:
logging.error('Please use Python3.x to run this pipeline.')
exit()
if args.ped != None:
if len(args.pacbio) != 3:
raise ValueError('When PED specified, you should have three PACBIO '
'files in the order of MOM, DAD, CHILD.')
elif args.ped == None:
if len(args.pacbio) != 1:
raise ValueError('When no PED specified, you should have only one '
'PACBIO file.')
if not os.path.exists(args.illumina1):
raise FileNotFoundError('No such file or directory: %s' % args.illumina1)
if not os.path.exists(args.illumina2):
raise FileNotFoundError('No such file or directory: %s' % args.illumina2)
for file in args.pacbio:
if not os.path.exists(file):
raise FileNotFoundError('No such file or directory: %s'%file)
def run_pipeline(args):
logging.basicConfig(level=logging.DEBUG)
logging.info('Using python version %s'%sys.version)
PID = os.getpid()
logging.info('PID %d'%PID)
whdenovoPath = '/'.join(sys.path[0].split('/')[:-1])
whdenovoPath = sys.path[0]
vg = "vg"
logging.info('Using vg at: %s'%vg)
graphaligner = "GraphAligner"
if args.output != None:
tempPath = os.getcwd() + '/' + args.output
else:
tempPath = os.getcwd() + '/temp_%d_%s'%(PID, time.strftime("%m%d%H%M%S"))
logging.info('Making temp directories.')
if not os.path.isdir(tempPath):
subprocess.call(['mkdir', tempPath])
subprocess.call(['mkdir', '%s/illumina'%tempPath])
subprocess.call(['mkdir', '%s/bc1'%tempPath])
else:
logging.error('Output directory %s already exits' % tempPath)
sys.exit(1)
# spades ADDED "-m 500" FOR LARGE GENOME
logging.info('bfc error correcting...')
subprocess.call('bfc -t %d -s %s %s 1> %s/cor.1.fq 2>> %s/bfc.log'%(args.t, args.size, args.illumina1, tempPath, tempPath), shell = True)
subprocess.call('bfc -t %d -s %s %s 1> %s/cor.2.fq 2>> %s/bfc.log'%(args.t, args.size, args.illumina2, tempPath, tempPath), shell = True)
logging.info('bfc log saved at %s/bfc.log'%tempPath)
logging.info('Running spades...')
spades_cmd = "spades.py -t %d -k %d -m 500 -1 %s/cor.1.fq -2 %s/cor.2.fq --only-assembler -o %s/illumina/" % (args.t, args.k, tempPath, tempPath, tempPath)
spades_cmd = spades_cmd.split()
a = subprocess.call(spades_cmd, shell=False, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running spades. Error Code: %d'%a)
sys.exit(1)
logging.info('SPAdes log saved at %s/illumina/spades.log'%tempPath)
logging.info('Filtering graph...')
subprocess.call("grep -v '^P' %s/illumina/assembly_graph_with_scaffolds.gfa | awk -F'\\t' '{ if ($2 != $4) print $0}' | %s view --gfa-in - --vg | %s view -g - | awk -F'\\t' '{ if ($2 !=$4) print $0}' > %s/asm1.gfa" % (tempPath , vg, vg, tempPath), shell = True)
filterGraph.flt('%s/asm1.gfa'%tempPath, '%s/illumina/asm1.gfa'%tempPath)
#subprocess.call("python2 %s/whdenovo/printnodedegrees_gfa.py %s/asm1.gfa | awk -F' ' '{ if($2 > 70 || $2==0) printf \"%%s\\n\", $1 }' > %s/asm1.wrongnodes"%(whdenovoPath, tempPath, tempPath), shell = True)
#subprocess.call('python2 %s/whdenovo/remove_wrongnodes.py %s/asm1.wrongnodes %s/asm1.gfa %s/illumina/asm1.gfa'%(whdenovoPath, tempPath, tempPath, tempPath), shell = True)
logging.info('Running snarls...')
subprocess.call('%s view --gfa-in --vg %s/illumina/asm1.gfa > %s/illumina/asm1.vg' % (vg, tempPath, tempPath), shell = True)
subprocess.call('%s snarls -t -r %s/illumina/asm1.trans %s/illumina/asm1.vg > %s/illumina/asm1.snarls' % (vg, tempPath, tempPath, tempPath), shell = True)
logging.info('Aligning...')
if args.ped != None:
for i in range(3):
a = subprocess.call("%s -t %d -g %s/illumina/asm1.gfa -f %s -a %s/aln%d.gam --seeds-mum-count 100000 --seeds-mxm-length 10 -C 500000 -b 35" % (graphaligner, args.t, tempPath, args.pacbio[i], tempPath, i), shell = True, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running GraphAligner. Exit Code:%d'%a)
sys.exit(2)
elif args.ped == None:
a = subprocess.call("%s -t %d -g %s/illumina/asm1.gfa -f %s -a %s/aln.gam --seeds-mum-count 100000 --seeds-mxm-length 10 -C 500000 -b 35" % (graphaligner, args.t, tempPath, args.pacbio[0], tempPath), shell = True, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running GraphAligner. Exit Code: %d'%a)
sys.exit(a)
a = subprocess.call("ls %s/aln*gam | parallel '%s view -a {} > {}.json'"%(tempPath, vg), shell = True)
if a != 0:
logging.error('Error while converting GAM to JSON. Exit Code: %d'%a)
sys.exit(a)
logging.info('Partitioning...')
if args.ped != None:
subprocess.call("whatshap_tri phaseg reads %s %s/illumina/asm1.trans %s/aln0.gam.json %s/aln1.gam.json %s/aln2.gam.json -t %d -p %s/bc1/aln --lowc %d --high %d > %s/partition.log" % (whdenovoPath, args.ped, tempPath, tempPath, tempPath, tempPath, args.t, tempPath, args.lowc, args.highc, tempPath), shell = True)
subprocess.call("ls %s/bc1/*allreads | parallel -j%d \"awk '\\$3 == 1 {print \\$1}' {} > {}.hp1.reads\"" % (tempPath, args.t), shell = True)
subprocess.call("ls %s/bc1/*allreads | parallel -j%d \"awk '\\$3 == 0 {print \\$1}' {} > {}.hp0.reads\"" % (tempPath, args.t), shell = True)
subprocess.call("cat %s/bc1/*hp1.reads | sort | uniq > %s/HP1.reads" % (tempPath,tempPath), shell = True)
subprocess.call("cat %s/bc1/*hp0.reads | sort | uniq > %s/HP0.reads" % (tempPath,tempPath), shell = True)
logging.info('Partitioning finished. Read names of different haplotypes are saved in:')
print('%s/HP0.reads'%tempPath)
print('%s/HP1.reads'%tempPath)
else:
# TODO individual case
pass
def run_test(sth):
print(sth)
def add_arguments(parser):
arg = parser.add_argument
arg('--illumina1', metavar='FASTQ/FASTA', type=str,
required = True,
help='Illumina short-read data for right reads.')
arg('--illumina2', metavar='FASTQ/FASTA', type=str,
required = True,
help='Illumina short-read data for left reads.')
arg('--pacbio', metavar='FASTQ/FASTA', type=str, nargs='+',
required = True, help='PacBio long-read data. If no PED file specified, should only give one file. \nOtherwise, three \
files in the order of MOM, DAD, CHILD.')
arg('-p', '--ped', metavar='ped file', type=str,
required = False, help = "PED file.")
arg('-o', '--output', metavar = 'PATH',
required = False)
arg('-k', type = int, default = 77 , help = 'K-mer size setting for SPAdes, must be odd and no larger than 128. [77].')
arg('-s', '--size', type = str, required = True, help = 'Expected genome size, acceptible example: 50k, 24m, 2g.')
arg('-t', metavar = 'INT', type = int, default = 4, help = "Use multiprocessing in the algorithm, and some steps utilize GNU parallel. [4]")
arg('--lowc', metavar = 'INT', type = int, default = 5, help = 'Lowest threshold for coverage to support edges.')
arg('--highc', metavar = 'INT', type = int, default = 20, help = 'Highest threshold for coverage to detect repeats.')
def main(args):
checkStatus(args)
run_pipeline(args) | whdenovo/partition.py | import argparse
from argparse import RawTextHelpFormatter
import subprocess
from subprocess import PIPE
import os, sys, time
import logging
import re
from . import filterGraph
def checkStatus(args):
if sys.version_info.major != 3:
logging.error('Please use Python3.x to run this pipeline.')
exit()
if args.ped != None:
if len(args.pacbio) != 3:
raise ValueError('When PED specified, you should have three PACBIO '
'files in the order of MOM, DAD, CHILD.')
elif args.ped == None:
if len(args.pacbio) != 1:
raise ValueError('When no PED specified, you should have only one '
'PACBIO file.')
if not os.path.exists(args.illumina1):
raise FileNotFoundError('No such file or directory: %s' % args.illumina1)
if not os.path.exists(args.illumina2):
raise FileNotFoundError('No such file or directory: %s' % args.illumina2)
for file in args.pacbio:
if not os.path.exists(file):
raise FileNotFoundError('No such file or directory: %s'%file)
def run_pipeline(args):
logging.basicConfig(level=logging.DEBUG)
logging.info('Using python version %s'%sys.version)
PID = os.getpid()
logging.info('PID %d'%PID)
whdenovoPath = '/'.join(sys.path[0].split('/')[:-1])
whdenovoPath = sys.path[0]
vg = "vg"
logging.info('Using vg at: %s'%vg)
graphaligner = "GraphAligner"
if args.output != None:
tempPath = os.getcwd() + '/' + args.output
else:
tempPath = os.getcwd() + '/temp_%d_%s'%(PID, time.strftime("%m%d%H%M%S"))
logging.info('Making temp directories.')
if not os.path.isdir(tempPath):
subprocess.call(['mkdir', tempPath])
subprocess.call(['mkdir', '%s/illumina'%tempPath])
subprocess.call(['mkdir', '%s/bc1'%tempPath])
else:
logging.error('Output directory %s already exits' % tempPath)
sys.exit(1)
# spades ADDED "-m 500" FOR LARGE GENOME
logging.info('bfc error correcting...')
subprocess.call('bfc -t %d -s %s %s 1> %s/cor.1.fq 2>> %s/bfc.log'%(args.t, args.size, args.illumina1, tempPath, tempPath), shell = True)
subprocess.call('bfc -t %d -s %s %s 1> %s/cor.2.fq 2>> %s/bfc.log'%(args.t, args.size, args.illumina2, tempPath, tempPath), shell = True)
logging.info('bfc log saved at %s/bfc.log'%tempPath)
logging.info('Running spades...')
spades_cmd = "spades.py -t %d -k %d -m 500 -1 %s/cor.1.fq -2 %s/cor.2.fq --only-assembler -o %s/illumina/" % (args.t, args.k, tempPath, tempPath, tempPath)
spades_cmd = spades_cmd.split()
a = subprocess.call(spades_cmd, shell=False, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running spades. Error Code: %d'%a)
sys.exit(1)
logging.info('SPAdes log saved at %s/illumina/spades.log'%tempPath)
logging.info('Filtering graph...')
subprocess.call("grep -v '^P' %s/illumina/assembly_graph_with_scaffolds.gfa | awk -F'\\t' '{ if ($2 != $4) print $0}' | %s view --gfa-in - --vg | %s view -g - | awk -F'\\t' '{ if ($2 !=$4) print $0}' > %s/asm1.gfa" % (tempPath , vg, vg, tempPath), shell = True)
filterGraph.flt('%s/asm1.gfa'%tempPath, '%s/illumina/asm1.gfa'%tempPath)
#subprocess.call("python2 %s/whdenovo/printnodedegrees_gfa.py %s/asm1.gfa | awk -F' ' '{ if($2 > 70 || $2==0) printf \"%%s\\n\", $1 }' > %s/asm1.wrongnodes"%(whdenovoPath, tempPath, tempPath), shell = True)
#subprocess.call('python2 %s/whdenovo/remove_wrongnodes.py %s/asm1.wrongnodes %s/asm1.gfa %s/illumina/asm1.gfa'%(whdenovoPath, tempPath, tempPath, tempPath), shell = True)
logging.info('Running snarls...')
subprocess.call('%s view --gfa-in --vg %s/illumina/asm1.gfa > %s/illumina/asm1.vg' % (vg, tempPath, tempPath), shell = True)
subprocess.call('%s snarls -t -r %s/illumina/asm1.trans %s/illumina/asm1.vg > %s/illumina/asm1.snarls' % (vg, tempPath, tempPath, tempPath), shell = True)
logging.info('Aligning...')
if args.ped != None:
for i in range(3):
a = subprocess.call("%s -t %d -g %s/illumina/asm1.gfa -f %s -a %s/aln%d.gam --seeds-mum-count 100000 --seeds-mxm-length 10 -C 500000 -b 35" % (graphaligner, args.t, tempPath, args.pacbio[i], tempPath, i), shell = True, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running GraphAligner. Exit Code:%d'%a)
sys.exit(2)
elif args.ped == None:
a = subprocess.call("%s -t %d -g %s/illumina/asm1.gfa -f %s -a %s/aln.gam --seeds-mum-count 100000 --seeds-mxm-length 10 -C 500000 -b 35" % (graphaligner, args.t, tempPath, args.pacbio[0], tempPath), shell = True, stdout=subprocess.PIPE)
if a != 0:
logging.error('Error while running GraphAligner. Exit Code: %d'%a)
sys.exit(a)
a = subprocess.call("ls %s/aln*gam | parallel '%s view -a {} > {}.json'"%(tempPath, vg), shell = True)
if a != 0:
logging.error('Error while converting GAM to JSON. Exit Code: %d'%a)
sys.exit(a)
logging.info('Partitioning...')
if args.ped != None:
subprocess.call("whatshap_tri phaseg reads %s %s/illumina/asm1.trans %s/aln0.gam.json %s/aln1.gam.json %s/aln2.gam.json -t %d -p %s/bc1/aln --lowc %d --high %d > %s/partition.log" % (whdenovoPath, args.ped, tempPath, tempPath, tempPath, tempPath, args.t, tempPath, args.lowc, args.highc, tempPath), shell = True)
subprocess.call("ls %s/bc1/*allreads | parallel -j%d \"awk '\\$3 == 1 {print \\$1}' {} > {}.hp1.reads\"" % (tempPath, args.t), shell = True)
subprocess.call("ls %s/bc1/*allreads | parallel -j%d \"awk '\\$3 == 0 {print \\$1}' {} > {}.hp0.reads\"" % (tempPath, args.t), shell = True)
subprocess.call("cat %s/bc1/*hp1.reads | sort | uniq > %s/HP1.reads" % (tempPath,tempPath), shell = True)
subprocess.call("cat %s/bc1/*hp0.reads | sort | uniq > %s/HP0.reads" % (tempPath,tempPath), shell = True)
logging.info('Partitioning finished. Read names of different haplotypes are saved in:')
print('%s/HP0.reads'%tempPath)
print('%s/HP1.reads'%tempPath)
else:
# TODO individual case
pass
def run_test(sth):
print(sth)
def add_arguments(parser):
arg = parser.add_argument
arg('--illumina1', metavar='FASTQ/FASTA', type=str,
required = True,
help='Illumina short-read data for right reads.')
arg('--illumina2', metavar='FASTQ/FASTA', type=str,
required = True,
help='Illumina short-read data for left reads.')
arg('--pacbio', metavar='FASTQ/FASTA', type=str, nargs='+',
required = True, help='PacBio long-read data. If no PED file specified, should only give one file. \nOtherwise, three \
files in the order of MOM, DAD, CHILD.')
arg('-p', '--ped', metavar='ped file', type=str,
required = False, help = "PED file.")
arg('-o', '--output', metavar = 'PATH',
required = False)
arg('-k', type = int, default = 77 , help = 'K-mer size setting for SPAdes, must be odd and no larger than 128. [77].')
arg('-s', '--size', type = str, required = True, help = 'Expected genome size, acceptible example: 50k, 24m, 2g.')
arg('-t', metavar = 'INT', type = int, default = 4, help = "Use multiprocessing in the algorithm, and some steps utilize GNU parallel. [4]")
arg('--lowc', metavar = 'INT', type = int, default = 5, help = 'Lowest threshold for coverage to support edges.')
arg('--highc', metavar = 'INT', type = int, default = 20, help = 'Highest threshold for coverage to detect repeats.')
def main(args):
checkStatus(args)
run_pipeline(args) | 0.150185 | 0.058051 |
from collections import defaultdict
from itertools import product
N = 5
def evolve(grid):
new_grid = [l[:] for l in grid]
for x in range(N):
for y in range(N):
num_neighbors = 0
for nx, ny in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:
if not (0 <= nx < N and 0 <= ny < N):
continue
if grid[nx][ny] == "#":
num_neighbors += 1
if grid[x][y] == "#" and num_neighbors != 1:
new_grid[x][y] = "."
elif grid[x][y] != "#" and num_neighbors in (1, 2):
new_grid[x][y] = "#"
return new_grid
def neighbors_3d(x, y, z):
neighbors = set()
for dx, dy in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
cx, cy = x + dx, y + dy
added = False
if not (0 <= cx < N):
neighbors.add((2 + dx, 2, z + 1))
added = True
if not (0 <= cy < N):
neighbors.add((2, 2 + dy, z + 1))
added = True
if cx == 2 and cy == 2:
added = True
for k in range(N):
nx = k if dx == 0 else (0 if dx == 1 else 4)
ny = k if dy == 0 else (0 if dy == 1 else 4)
neighbors.add((nx, ny, z - 1))
if not added:
neighbors.add((cx, cy, z))
return neighbors
def evolve2(input_grid):
grid = {}
for j, row in enumerate(input_grid):
for i, val in enumerate(row):
if (i, j) == (2, 2):
continue
grid[(i, j, 0)] = val
min_level, max_level = -1, 1
for _ in range(200):
next_grid = {}
for z in range(min_level, max_level + 1):
for (x, y) in product(range(N), range(N)):
if (x, y) == (2, 2):
continue
num_neighbors = sum(
1 for n in neighbors_3d(x, y, z) if grid.get(n, ".") == "#"
)
curr_val = grid.get((x, y, z), ".")
to_evolve = (curr_val == "#" and num_neighbors == 1) or (
curr_val == "." and (num_neighbors in (1, 2))
)
if to_evolve:
next_grid[(x, y, z)] = "#"
min_level = min(z - 1, min_level)
max_level = max(z + 1, max_level)
grid = next_grid
return sum(1 for v in grid.values() if v == "#")
def biodiversity(grid):
v = 1
total = 0
for x in range(N):
for y in range(N):
if grid[x][y] == "#":
total += v
v *= 2
return total
if __name__ == "__main__":
with open("24.txt") as f:
lines = f.readlines()
orig = [list(l.strip()) for l in lines]
grid = orig
seen = set("\n".join("".join(l) for l in grid))
while True:
grid = evolve(grid)
grid_str = "\n".join("".join(l) for l in grid)
if grid_str in seen:
print("Part 1: {}".format(biodiversity(grid)))
break
seen.add(grid_str)
grid = orig
print("Part 2: {}".format(evolve2(grid))) | Day20-25/24.py | from collections import defaultdict
from itertools import product
N = 5
def evolve(grid):
new_grid = [l[:] for l in grid]
for x in range(N):
for y in range(N):
num_neighbors = 0
for nx, ny in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:
if not (0 <= nx < N and 0 <= ny < N):
continue
if grid[nx][ny] == "#":
num_neighbors += 1
if grid[x][y] == "#" and num_neighbors != 1:
new_grid[x][y] = "."
elif grid[x][y] != "#" and num_neighbors in (1, 2):
new_grid[x][y] = "#"
return new_grid
def neighbors_3d(x, y, z):
neighbors = set()
for dx, dy in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
cx, cy = x + dx, y + dy
added = False
if not (0 <= cx < N):
neighbors.add((2 + dx, 2, z + 1))
added = True
if not (0 <= cy < N):
neighbors.add((2, 2 + dy, z + 1))
added = True
if cx == 2 and cy == 2:
added = True
for k in range(N):
nx = k if dx == 0 else (0 if dx == 1 else 4)
ny = k if dy == 0 else (0 if dy == 1 else 4)
neighbors.add((nx, ny, z - 1))
if not added:
neighbors.add((cx, cy, z))
return neighbors
def evolve2(input_grid):
grid = {}
for j, row in enumerate(input_grid):
for i, val in enumerate(row):
if (i, j) == (2, 2):
continue
grid[(i, j, 0)] = val
min_level, max_level = -1, 1
for _ in range(200):
next_grid = {}
for z in range(min_level, max_level + 1):
for (x, y) in product(range(N), range(N)):
if (x, y) == (2, 2):
continue
num_neighbors = sum(
1 for n in neighbors_3d(x, y, z) if grid.get(n, ".") == "#"
)
curr_val = grid.get((x, y, z), ".")
to_evolve = (curr_val == "#" and num_neighbors == 1) or (
curr_val == "." and (num_neighbors in (1, 2))
)
if to_evolve:
next_grid[(x, y, z)] = "#"
min_level = min(z - 1, min_level)
max_level = max(z + 1, max_level)
grid = next_grid
return sum(1 for v in grid.values() if v == "#")
def biodiversity(grid):
v = 1
total = 0
for x in range(N):
for y in range(N):
if grid[x][y] == "#":
total += v
v *= 2
return total
if __name__ == "__main__":
with open("24.txt") as f:
lines = f.readlines()
orig = [list(l.strip()) for l in lines]
grid = orig
seen = set("\n".join("".join(l) for l in grid))
while True:
grid = evolve(grid)
grid_str = "\n".join("".join(l) for l in grid)
if grid_str in seen:
print("Part 1: {}".format(biodiversity(grid)))
break
seen.add(grid_str)
grid = orig
print("Part 2: {}".format(evolve2(grid))) | 0.560012 | 0.57069 |
import pytest
def test_client_setname(judge_command):
judge_command(
"CLIENT SETNAME foobar", {"command_value": "CLIENT SETNAME", "value": "foobar"}
)
def test_client_unblock(judge_command):
judge_command(
"CLIENT UNBLOCK 33 TIMEOUT",
{
"command_clientid_errorx": "CLIENT UNBLOCK",
"clientid": "33",
"error": "TIMEOUT",
},
)
judge_command(
"CLIENT UNBLOCK 33",
{"command_clientid_errorx": "CLIENT UNBLOCK", "clientid": "33"},
)
def test_flushdb(judge_command):
judge_command("FLUSHDB", {"command_asyncx": "FLUSHDB"})
judge_command("FLUSHDB async", {"command_asyncx": "FLUSHDB", "async": "async"})
judge_command("FLUSHDB ASYNC", {"command_asyncx": "FLUSHDB", "async": "ASYNC"})
judge_command("FLUSHALL ASYNC", {"command_asyncx": "FLUSHALL", "async": "ASYNC"})
def test_client_list(judge_command):
judge_command("client list", {"command_type_conntype_x": "client list"})
judge_command("client list TYPE REPLICA1", None)
judge_command(
"client list type master",
{
"command_type_conntype_x": "client list",
"type_const": "type",
"conntype": "master",
},
)
judge_command(
"client list TYPE REPLICA",
{
"command_type_conntype_x": "client list",
"type_const": "TYPE",
"conntype": "REPLICA",
},
)
def test_configset(judge_command):
judge_command(
"config set foo bar",
{"command_parameter_value": "config set", "parameter": "foo", "value": "bar"},
)
judge_command(
"config set requirepass ''",
{
"command_parameter_value": "config set",
"parameter": "requirepass",
"value": "''",
},
)
def test_shutdown(judge_command):
judge_command("shutdown save", {"command_shutdown": "shutdown", "shutdown": "save"})
judge_command(
"shutdown NOSAVE", {"command_shutdown": "shutdown", "shutdown": "NOSAVE"}
)
def test_clientpause(judge_command):
judge_command(
"client pause 3000", {"command_timeout": "client pause", "timeout": "3000"}
)
def test_client_reply(judge_command):
judge_command("client reply on", {"command_switch": "client reply", "switch": "on"})
def test_client_kill(judge_command):
judge_command(
"CLIENT KILL addr 127.0.0.1:12345 type pubsub",
{
"command_clientkill": "CLIENT KILL",
"addr": "addr",
"ip_port": "127.0.0.1:12345",
"type_const": "type",
"conntype": "pubsub",
},
)
judge_command(
"CLIENT KILL 127.0.0.1:12345 ",
{"command_clientkill": "CLIENT KILL", "ip_port": "127.0.0.1:12345"},
)
judge_command(
"CLIENT KILL id 123455 type pubsub skipme no",
{
"command_clientkill": "CLIENT KILL",
"const_id": "id",
"clientid": "123455",
"type_const": "type",
"conntype": "pubsub",
"skipme": "skipme",
"yes": "no",
},
)
@pytest.mark.xfail(reason="currently no support arbitrary ordered command args")
def test_client_kill_unordered_arguments(judge_command):
judge_command(
"CLIENT KILL type pubsub addr 127.0.0.1:12345",
{
"command_clientkill": "CLIENT KILL",
"addr": "addr",
"ip_port": "127.0.0.1:12345",
"type_const": "type",
"conntype": "pubsub",
},
) | tests/command_parse/test_server.py | import pytest
def test_client_setname(judge_command):
judge_command(
"CLIENT SETNAME foobar", {"command_value": "CLIENT SETNAME", "value": "foobar"}
)
def test_client_unblock(judge_command):
judge_command(
"CLIENT UNBLOCK 33 TIMEOUT",
{
"command_clientid_errorx": "CLIENT UNBLOCK",
"clientid": "33",
"error": "TIMEOUT",
},
)
judge_command(
"CLIENT UNBLOCK 33",
{"command_clientid_errorx": "CLIENT UNBLOCK", "clientid": "33"},
)
def test_flushdb(judge_command):
judge_command("FLUSHDB", {"command_asyncx": "FLUSHDB"})
judge_command("FLUSHDB async", {"command_asyncx": "FLUSHDB", "async": "async"})
judge_command("FLUSHDB ASYNC", {"command_asyncx": "FLUSHDB", "async": "ASYNC"})
judge_command("FLUSHALL ASYNC", {"command_asyncx": "FLUSHALL", "async": "ASYNC"})
def test_client_list(judge_command):
judge_command("client list", {"command_type_conntype_x": "client list"})
judge_command("client list TYPE REPLICA1", None)
judge_command(
"client list type master",
{
"command_type_conntype_x": "client list",
"type_const": "type",
"conntype": "master",
},
)
judge_command(
"client list TYPE REPLICA",
{
"command_type_conntype_x": "client list",
"type_const": "TYPE",
"conntype": "REPLICA",
},
)
def test_configset(judge_command):
judge_command(
"config set foo bar",
{"command_parameter_value": "config set", "parameter": "foo", "value": "bar"},
)
judge_command(
"config set requirepass ''",
{
"command_parameter_value": "config set",
"parameter": "requirepass",
"value": "''",
},
)
def test_shutdown(judge_command):
judge_command("shutdown save", {"command_shutdown": "shutdown", "shutdown": "save"})
judge_command(
"shutdown NOSAVE", {"command_shutdown": "shutdown", "shutdown": "NOSAVE"}
)
def test_clientpause(judge_command):
judge_command(
"client pause 3000", {"command_timeout": "client pause", "timeout": "3000"}
)
def test_client_reply(judge_command):
judge_command("client reply on", {"command_switch": "client reply", "switch": "on"})
def test_client_kill(judge_command):
judge_command(
"CLIENT KILL addr 127.0.0.1:12345 type pubsub",
{
"command_clientkill": "CLIENT KILL",
"addr": "addr",
"ip_port": "127.0.0.1:12345",
"type_const": "type",
"conntype": "pubsub",
},
)
judge_command(
"CLIENT KILL 127.0.0.1:12345 ",
{"command_clientkill": "CLIENT KILL", "ip_port": "127.0.0.1:12345"},
)
judge_command(
"CLIENT KILL id 123455 type pubsub skipme no",
{
"command_clientkill": "CLIENT KILL",
"const_id": "id",
"clientid": "123455",
"type_const": "type",
"conntype": "pubsub",
"skipme": "skipme",
"yes": "no",
},
)
@pytest.mark.xfail(reason="currently no support arbitrary ordered command args")
def test_client_kill_unordered_arguments(judge_command):
judge_command(
"CLIENT KILL type pubsub addr 127.0.0.1:12345",
{
"command_clientkill": "CLIENT KILL",
"addr": "addr",
"ip_port": "127.0.0.1:12345",
"type_const": "type",
"conntype": "pubsub",
},
) | 0.348978 | 0.205914 |
import textwrap
from nbsafety.data_model.code_cell import cells
from nbsafety.line_magics import _USAGE
from nbsafety.run_mode import FlowOrder, ExecutionMode, ExecutionSchedule
from nbsafety.singletons import kernel, nbs
from nbsafety.tracing.nbsafety_tracer import SafetyTracer
from test.utils import make_safety_fixture
# Reset dependency graph before each test
_safety_fixture, run_cell_ = make_safety_fixture()
def run_cell(cell, **kwargs):
# print()
# print('*******************************************')
# print('running', cell)
# print('*******************************************')
# print()
run_cell_(cell, **kwargs)
def test_show_usage():
run_cell("%safety not_a_real_subcommand")
cell1 = cells().from_id(1)
assert cell1.captured_output.stderr.strip() == _USAGE.strip(), (
"got %s" % cell1.captured_output.stderr
)
def test_show_deps_show_stale():
run_cell("x = 0")
run_cell("y = x + 1")
run_cell("logging.info(y)")
run_cell("%safety show_deps y")
cell4 = cells().from_id(4)
assert (
cell4.captured_output.stdout.strip()
== "Symbol y (defined cell: 2; last updated cell: 2) is dependent on {<x>} and is a parent of nothing"
), ("got %s" % cell4.captured_output)
run_cell("%safety show_stale")
cell5 = cells().from_id(5)
assert (
cell5.captured_output.stdout.strip()
== "No symbol has stale dependencies for now!"
), ("got %s" % cell5.captured_output)
run_cell("x = 42")
run_cell("%safety show_stale")
cell7 = cells().from_id(7)
assert (
cell7.captured_output.stdout.strip()
== "Symbol(s) with stale dependencies: {<y>}"
), ("got %s" % cell7.captured_output)
run_cell("y = x + 1")
run_cell("%safety show_stale")
cell9 = cells().from_id(9)
assert (
cell9.captured_output.stdout.strip()
== "No symbol has stale dependencies for now!"
), ("got %s" % cell9.captured_output)
def test_enable_disable_trace_messages():
assert not nbs().trace_messages_enabled
run_cell("%safety trace_messages enable")
assert nbs().trace_messages_enabled
run_cell("%safety trace_messages disable")
assert not nbs().trace_messages_enabled
def test_enable_disable_highlights():
assert nbs().mut_settings.highlights_enabled
run_cell("%safety nohls")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety hls")
assert nbs().mut_settings.highlights_enabled
run_cell("%safety highlights off")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety highlights on")
assert nbs().mut_settings.highlights_enabled
run_cell("%safety highlights disable")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety highlights enable")
def test_make_slice():
run_cell("x = 0")
run_cell("y = x + 1")
run_cell("x = 42")
run_cell("logging.info(y)")
run_cell("%safety slice 4")
cell5 = cells().from_id(5)
assert (
cell5.captured_output.stdout.strip()
== textwrap.dedent(
"""
# Cell 1
x = 0
# Cell 2
y = x + 1
# Cell 4
logging.info(y)
"""
).strip()
), ("got %s" % cell5.captured_output)
def test_set_exec_mode():
assert nbs().mut_settings.exec_mode == ExecutionMode.NORMAL
run_cell(f"%safety mode {ExecutionMode.REACTIVE.value}")
assert nbs().mut_settings.exec_mode == ExecutionMode.REACTIVE
run_cell(f"%safety mode {ExecutionMode.NORMAL.value}")
assert nbs().mut_settings.exec_mode == ExecutionMode.NORMAL
def test_set_exec_schedule_and_flow_order():
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
run_cell(f"%safety flow {FlowOrder.IN_ORDER.value}")
assert nbs().mut_settings.flow_order == FlowOrder.IN_ORDER
for schedule in ExecutionSchedule:
run_cell(f"%safety schedule {schedule.value}")
assert nbs().mut_settings.exec_schedule == schedule
run_cell(f"%safety schedule {ExecutionSchedule.LIVENESS_BASED.value}")
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
run_cell(f"%safety flow {FlowOrder.ANY_ORDER.value}")
assert nbs().mut_settings.flow_order == FlowOrder.ANY_ORDER
run_cell(f"%safety schedule {ExecutionSchedule.STRICT.value}")
# strict schedule only works for in_order semantics
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
def test_register_deregister_tracer():
assert SafetyTracer in kernel().registered_tracers
run_cell(f"%safety deregister {SafetyTracer.__module__}.{SafetyTracer.__name__}")
assert SafetyTracer not in kernel().registered_tracers
run_cell(f"%safety register {SafetyTracer.__module__}.{SafetyTracer.__name__}")
assert SafetyTracer in kernel().registered_tracers
def test_clear():
run_cell("%safety clear")
assert nbs().min_timestamp == nbs().cell_counter()
run_cell("x = 42")
assert nbs().min_timestamp == nbs().cell_counter() - 1
run_cell("%safety clear")
assert nbs().min_timestamp == nbs().cell_counter() | test/test_line_magics.py | import textwrap
from nbsafety.data_model.code_cell import cells
from nbsafety.line_magics import _USAGE
from nbsafety.run_mode import FlowOrder, ExecutionMode, ExecutionSchedule
from nbsafety.singletons import kernel, nbs
from nbsafety.tracing.nbsafety_tracer import SafetyTracer
from test.utils import make_safety_fixture
# Reset dependency graph before each test
_safety_fixture, run_cell_ = make_safety_fixture()
def run_cell(cell, **kwargs):
# print()
# print('*******************************************')
# print('running', cell)
# print('*******************************************')
# print()
run_cell_(cell, **kwargs)
def test_show_usage():
run_cell("%safety not_a_real_subcommand")
cell1 = cells().from_id(1)
assert cell1.captured_output.stderr.strip() == _USAGE.strip(), (
"got %s" % cell1.captured_output.stderr
)
def test_show_deps_show_stale():
run_cell("x = 0")
run_cell("y = x + 1")
run_cell("logging.info(y)")
run_cell("%safety show_deps y")
cell4 = cells().from_id(4)
assert (
cell4.captured_output.stdout.strip()
== "Symbol y (defined cell: 2; last updated cell: 2) is dependent on {<x>} and is a parent of nothing"
), ("got %s" % cell4.captured_output)
run_cell("%safety show_stale")
cell5 = cells().from_id(5)
assert (
cell5.captured_output.stdout.strip()
== "No symbol has stale dependencies for now!"
), ("got %s" % cell5.captured_output)
run_cell("x = 42")
run_cell("%safety show_stale")
cell7 = cells().from_id(7)
assert (
cell7.captured_output.stdout.strip()
== "Symbol(s) with stale dependencies: {<y>}"
), ("got %s" % cell7.captured_output)
run_cell("y = x + 1")
run_cell("%safety show_stale")
cell9 = cells().from_id(9)
assert (
cell9.captured_output.stdout.strip()
== "No symbol has stale dependencies for now!"
), ("got %s" % cell9.captured_output)
def test_enable_disable_trace_messages():
assert not nbs().trace_messages_enabled
run_cell("%safety trace_messages enable")
assert nbs().trace_messages_enabled
run_cell("%safety trace_messages disable")
assert not nbs().trace_messages_enabled
def test_enable_disable_highlights():
assert nbs().mut_settings.highlights_enabled
run_cell("%safety nohls")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety hls")
assert nbs().mut_settings.highlights_enabled
run_cell("%safety highlights off")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety highlights on")
assert nbs().mut_settings.highlights_enabled
run_cell("%safety highlights disable")
assert not nbs().mut_settings.highlights_enabled
run_cell("%safety highlights enable")
def test_make_slice():
run_cell("x = 0")
run_cell("y = x + 1")
run_cell("x = 42")
run_cell("logging.info(y)")
run_cell("%safety slice 4")
cell5 = cells().from_id(5)
assert (
cell5.captured_output.stdout.strip()
== textwrap.dedent(
"""
# Cell 1
x = 0
# Cell 2
y = x + 1
# Cell 4
logging.info(y)
"""
).strip()
), ("got %s" % cell5.captured_output)
def test_set_exec_mode():
assert nbs().mut_settings.exec_mode == ExecutionMode.NORMAL
run_cell(f"%safety mode {ExecutionMode.REACTIVE.value}")
assert nbs().mut_settings.exec_mode == ExecutionMode.REACTIVE
run_cell(f"%safety mode {ExecutionMode.NORMAL.value}")
assert nbs().mut_settings.exec_mode == ExecutionMode.NORMAL
def test_set_exec_schedule_and_flow_order():
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
run_cell(f"%safety flow {FlowOrder.IN_ORDER.value}")
assert nbs().mut_settings.flow_order == FlowOrder.IN_ORDER
for schedule in ExecutionSchedule:
run_cell(f"%safety schedule {schedule.value}")
assert nbs().mut_settings.exec_schedule == schedule
run_cell(f"%safety schedule {ExecutionSchedule.LIVENESS_BASED.value}")
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
run_cell(f"%safety flow {FlowOrder.ANY_ORDER.value}")
assert nbs().mut_settings.flow_order == FlowOrder.ANY_ORDER
run_cell(f"%safety schedule {ExecutionSchedule.STRICT.value}")
# strict schedule only works for in_order semantics
assert nbs().mut_settings.exec_schedule == ExecutionSchedule.LIVENESS_BASED
def test_register_deregister_tracer():
assert SafetyTracer in kernel().registered_tracers
run_cell(f"%safety deregister {SafetyTracer.__module__}.{SafetyTracer.__name__}")
assert SafetyTracer not in kernel().registered_tracers
run_cell(f"%safety register {SafetyTracer.__module__}.{SafetyTracer.__name__}")
assert SafetyTracer in kernel().registered_tracers
def test_clear():
run_cell("%safety clear")
assert nbs().min_timestamp == nbs().cell_counter()
run_cell("x = 42")
assert nbs().min_timestamp == nbs().cell_counter() - 1
run_cell("%safety clear")
assert nbs().min_timestamp == nbs().cell_counter() | 0.460289 | 0.405213 |
from requests_pkcs12 import Pkcs12Adapter
from pyravendb.commands.raven_commands import GetTopologyCommand, GetStatisticsCommand
from pyravendb.connection.requests_helpers import *
from pyravendb.custom_exceptions import exceptions
from OpenSSL import crypto
from pyravendb.data.document_conventions import DocumentConventions
from threading import Lock
from pyravendb.tools.utils import Utils
from datetime import datetime, timedelta
import time
import requests
import json
import hashlib
import errno
import os
import io
import logging
logging.basicConfig(filename='requests_executor_info.log', level=logging.DEBUG)
log = logging.getLogger()
TOPOLOGY_FILES_DIR = os.path.join(os.getcwd(), "topology_files")
class RequestsExecutor(object):
def __new__(cls, *args, **kwargs):
instance = super(RequestsExecutor, cls).__new__(cls)
try:
os.makedirs(TOPOLOGY_FILES_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return instance
@staticmethod
def initialize_certificate(certificate):
if not isinstance(certificate, dict):
return certificate, None
pfx = certificate["pfx"]
password = certificate.get("password", None)
adapter = Pkcs12Adapter(pkcs12_data=pfx, pkcs12_password=password)
return None, adapter
def __init__(self, database_name, certificate, conventions=None, **kwargs):
self._database_name = database_name
(self._certificate, self._adapter) = self.initialize_certificate(certificate)
self.topology_etag = kwargs.get("topology_etag", 0)
self._last_return_response = datetime.utcnow()
self.conventions = conventions if conventions is not None else DocumentConventions()
self._node_selector = kwargs.get("node_selector", None)
self._last_known_urls = None
self.headers = {"Accept": "application/json",
"Raven-Client-Version": "5.0.0.1"}
self.update_topology_lock = Lock()
self.update_timer_lock = Lock()
self.lock = Lock()
self._disable_topology_updates = kwargs.get("disable_topology_updates", False)
self._failed_nodes_timers = {}
self.update_topology_timer = None
self._first_topology_update = None
self.cluster_token = None
self.topology_nodes = None
self._closed = False
@property
def certificate(self):
return self._certificate
@staticmethod
def create(urls, database_name, certificate, conventions=None):
executor = RequestsExecutor(database_name, certificate, conventions)
executor.start_first_topology_thread(urls)
return executor
@staticmethod
def create_for_single_node(url, database_name, certificate):
topology = Topology(etag=-1, nodes=[ServerNode(url, database_name)])
return RequestsExecutor(database_name, certificate, node_selector=NodeSelector(topology), topology_etag=-2,
disable_topology_updates=True)
def start_first_topology_thread(self, urls):
self._first_topology_update = PropagatingThread(target=self.first_topology_update, args=(urls,), daemon=True)
self._first_topology_update.start()
def ensure_node_selector(self):
if self._first_topology_update and self._first_topology_update.is_alive():
self._first_topology_update.join()
if not self._node_selector:
self._node_selector = NodeSelector(Topology(etag=self.topology_etag, nodes=self.topology_nodes))
def get_preferred_node(self):
self.ensure_node_selector()
return self._node_selector.get_current_node()
def execute(self, raven_command, should_retry=True):
if not hasattr(raven_command, 'raven_command'):
raise ValueError("Not a valid command")
topology_update = self._first_topology_update
if not self._disable_topology_updates:
if topology_update is None or topology_update.is_alive():
try:
if topology_update is None:
with self.lock:
if self._first_topology_update is None:
if self._last_known_urls is None:
raise exceptions.InvalidOperationException(
"No known topology and no previously known one, cannot proceed, likely a bug")
self.start_first_topology_thread(self._last_known_urls)
topology_update = self._first_topology_update
topology_update.join()
except Exception as e:
log.debug(str(e))
with self.lock:
if self._first_topology_update == topology_update:
self._first_topology_update = None
raise
if self._node_selector is None:
raise exceptions.InvalidOperationException("A connection with the server could not be established",
"node_selector cannot be None, please check your connection "
"or supply a valid node_selector")
chosen_node = self._node_selector.get_current_node()
return self.execute_with_node(chosen_node, raven_command, should_retry)
def execute_with_node(self, chosen_node, raven_command, should_retry):
while True:
raven_command.create_request(chosen_node)
node_index = 0 if self._node_selector is None else self._node_selector.current_node_index
with requests.session() as session:
if self._adapter is not None:
session.mount("https://", self._adapter)
if raven_command.is_raft_request:
prefix = '&' if '?' in raven_command.url else '?'
raven_command.url += f"{prefix}raft-request-id={raven_command.raft_unique_request_id}"
raven_command.headers.update(self.headers)
if not self._disable_topology_updates:
raven_command.headers["Topology-Etag"] = "\"{0}\"".format(self.topology_etag)
data = raven_command.data
if data and not isinstance(data, io.BufferedIOBase):
data = json.dumps(raven_command.data, default=self.conventions.json_default_method)
if raven_command.files:
data = {"data": data}
start_time = time.time() * 1000
end_time = None
try:
response = session.request(raven_command.method, url=raven_command.url, data=data,
files=raven_command.files,
cert=self._certificate, headers=raven_command.headers,
stream=raven_command.use_stream)
except Exception as e:
end_time = time.time() * 1000
if not should_retry:
raise
if not self.handle_server_down(chosen_node, node_index, raven_command, e):
raise exceptions.AllTopologyNodesDownException(
"Tried to send request to all configured nodes in the topology, "
"all of them seem to be down or not responding.", e)
chosen_node = self._node_selector.get_current_node()
continue
finally:
if not end_time:
end_time = time.time() * 1000
elapsed_time = end_time - start_time
chosen_node.response_time = elapsed_time
if response is None:
raise ValueError("response is invalid.")
if response.status_code == 404:
return raven_command.set_response(None)
if response.status_code == 403:
if self._certificate is not None:
cert = self._certificate
if isinstance(cert, tuple):
(cert, _) = cert
with open(cert, 'rb') as pem:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, pem.read())
name = str(cert.get_subject().get_components()[0][1])
raise exceptions.AuthorizationException(
"Forbidden access to " + chosen_node.database + "@" + chosen_node.url + ", " +
("a certificate is required." if self._certificate is None
else name + " does not have permission to access it or is unknown.") +
response.request.method + " " + response.request.path_url)
if response.status_code == 410:
if should_retry:
self.update_topology(chosen_node, True)
# TODO update this after build make fastest node selector
if response.status_code == 408 or response.status_code == 502 or response.status_code == 503 or response.status_code == 504:
if len(raven_command.failed_nodes) == 1:
node = list(raven_command.failed_nodes.keys())[0]
database_missing = response.headers.get("Database-Missing", None)
if database_missing:
raise exceptions.DatabaseDoesNotExistException(
"Database " + database_missing + " does not exists")
raise exceptions.UnsuccessfulRequestException(node.url, raven_command.failed_nodes[node])
try:
e = response.json()["Message"]
except ValueError:
e = None
if self.handle_server_down(chosen_node, node_index, raven_command, e):
chosen_node = self._node_selector.get_current_node()
continue
if response.status_code == 409:
# TODO: Conflict resolution
# current implementation is temporary
try:
response = response.json()
if "Error" in response:
raise Exception(response["Message"], response["Type"])
except ValueError:
raise response.raise_for_status()
if "Refresh-Topology" in response.headers:
self.update_topology(ServerNode(chosen_node.url, self._database_name))
self._last_return_response = datetime.utcnow()
return raven_command.set_response(response)
def first_topology_update(self, initial_urls):
error_list = []
for url in initial_urls:
try:
self.update_topology(ServerNode(url, self._database_name))
self.update_topology_timer = Utils.start_a_timer(60 * 5, self.update_topology_callback, daemon=True)
self.topology_nodes = self._node_selector.topology.nodes
return
except exceptions.DatabaseDoesNotExistException:
# Will happen on all node in the cluster, so errors immediately
self._last_known_urls = initial_urls
raise
except Exception as e:
if len(initial_urls) == 0:
self._last_known_urls = initial_urls
raise exceptions.InvalidOperationException("Cannot get topology from server: " + url, e)
error_list.append((url, e))
# Failed to update topology trying update from cache
for url in initial_urls:
if self.try_load_from_cache(url):
self.topology_nodes = self._node_selector.topology.nodes
return
self._last_known_urls = initial_urls
self.raise_exceptions(error_list)
def raise_exceptions(self, error_list):
raise exceptions.AggregateException("Failed to retrieve database topology from all known nodes", error_list)
def try_load_from_cache(self, url):
server_hash = hashlib.md5(
"{0}{1}".format(url, self._database_name).encode(
'utf-8')).hexdigest()
topology_file_path = os.path.join(TOPOLOGY_FILES_DIR, server_hash + ".raven-topology")
try:
with open(topology_file_path, 'r') as topology_file:
json_file = json.load(topology_file)
self._node_selector = NodeSelector(
Topology.convert_json_topology_to_entity(json_file))
self.topology_etag = -2
self.update_topology_timer = Utils.start_a_timer(60 * 5, self.update_topology_callback, daemon=True)
return True
except (FileNotFoundError, json.JSONDecodeError) as e:
log.info(e)
return False
def update_topology(self, node, force_update=False):
if self._closed:
return
if self.update_topology_lock.acquire(False):
try:
if self._closed:
return False
command = GetTopologyCommand()
response = self.execute_with_node(node, command, should_retry=False)
hash_name = hashlib.md5(
"{0}{1}".format(node.url, node.database).encode(
'utf-8')).hexdigest()
topology_file = os.path.join(TOPOLOGY_FILES_DIR, hash_name + ".raven-topology")
try:
with open(topology_file, 'w') as outfile:
json.dump(response, outfile, ensure_ascii=False)
except (IOError, json.JSONDecodeError):
pass
topology = Topology.convert_json_topology_to_entity(response)
if self._node_selector is None:
self._node_selector = NodeSelector(topology)
elif self._node_selector.on_update_topology(topology, force_update):
self.cancel_all_failed_nodes_timers()
self.topology_etag = self._node_selector.topology.etag
finally:
self.update_topology_lock.release()
else:
return False
def handle_server_down(self, chosen_node, node_index, command, e):
command.failed_nodes.update({chosen_node: {"url": command.url, "error": str(e), "type": type(e).__name__}})
node_selector = self._node_selector
if node_selector is None:
return False
if chosen_node not in self._failed_nodes_timers:
node_status = NodeStatus(self, node_index, chosen_node)
with self.update_timer_lock:
if self._failed_nodes_timers.get(chosen_node, None) is None:
self._failed_nodes_timers.update({chosen_node: node_status})
node_status.start_timer()
node_selector.on_failed_request(node_index)
current_node = node_selector.get_current_node()
if command.is_failed_with_node(current_node):
return False
return True
def cancel_all_failed_nodes_timers(self):
failed_nodes_timers = self._failed_nodes_timers
self._failed_nodes_timers.clear()
for _, timer in failed_nodes_timers.items():
timer.cancel()
timer.join()
def check_node_status(self, node_status):
if self._node_selector is not None:
nodes = self._node_selector.topology.nodes
if node_status.node_index >= len(nodes):
return
server_node = nodes[node_status.node_index]
if server_node is not node_status.node:
self.perform_health_check(server_node, node_status)
def perform_health_check(self, node, node_status):
command = GetStatisticsCommand(debug_tag="failure=check")
try:
self.execute_with_node(node, command, should_retry=False)
except Exception as e:
log.info("{0} is still down".format(node.cluster_tag), e)
failed_node_timer = self._failed_nodes_timers.get(node_status.node, None)
if failed_node_timer is not None:
failed_node_timer.start_timer()
return
failed_node_timer = self._failed_nodes_timers.pop(node_status.node, None)
if failed_node_timer:
del failed_node_timer
self._node_selector.restore_node_index(node_status.node_index)
def update_topology_callback(self):
time = datetime.utcnow()
if time - self._last_return_response <= timedelta(minutes=5):
return
try:
self.update_topology(self._node_selector.get_current_node())
except Exception as e:
log.info("Couldn't Update Topology from _updateTopologyTimer task", e)
def close(self):
if self._closed:
return
self._closed = True
self.cancel_all_failed_nodes_timers()
if self.update_topology_timer:
self.update_topology_timer.cancel() | pyravendb/connection/requests_executor.py | from requests_pkcs12 import Pkcs12Adapter
from pyravendb.commands.raven_commands import GetTopologyCommand, GetStatisticsCommand
from pyravendb.connection.requests_helpers import *
from pyravendb.custom_exceptions import exceptions
from OpenSSL import crypto
from pyravendb.data.document_conventions import DocumentConventions
from threading import Lock
from pyravendb.tools.utils import Utils
from datetime import datetime, timedelta
import time
import requests
import json
import hashlib
import errno
import os
import io
import logging
logging.basicConfig(filename='requests_executor_info.log', level=logging.DEBUG)
log = logging.getLogger()
TOPOLOGY_FILES_DIR = os.path.join(os.getcwd(), "topology_files")
class RequestsExecutor(object):
def __new__(cls, *args, **kwargs):
instance = super(RequestsExecutor, cls).__new__(cls)
try:
os.makedirs(TOPOLOGY_FILES_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return instance
@staticmethod
def initialize_certificate(certificate):
if not isinstance(certificate, dict):
return certificate, None
pfx = certificate["pfx"]
password = certificate.get("password", None)
adapter = Pkcs12Adapter(pkcs12_data=pfx, pkcs12_password=password)
return None, adapter
def __init__(self, database_name, certificate, conventions=None, **kwargs):
self._database_name = database_name
(self._certificate, self._adapter) = self.initialize_certificate(certificate)
self.topology_etag = kwargs.get("topology_etag", 0)
self._last_return_response = datetime.utcnow()
self.conventions = conventions if conventions is not None else DocumentConventions()
self._node_selector = kwargs.get("node_selector", None)
self._last_known_urls = None
self.headers = {"Accept": "application/json",
"Raven-Client-Version": "5.0.0.1"}
self.update_topology_lock = Lock()
self.update_timer_lock = Lock()
self.lock = Lock()
self._disable_topology_updates = kwargs.get("disable_topology_updates", False)
self._failed_nodes_timers = {}
self.update_topology_timer = None
self._first_topology_update = None
self.cluster_token = None
self.topology_nodes = None
self._closed = False
@property
def certificate(self):
return self._certificate
@staticmethod
def create(urls, database_name, certificate, conventions=None):
executor = RequestsExecutor(database_name, certificate, conventions)
executor.start_first_topology_thread(urls)
return executor
@staticmethod
def create_for_single_node(url, database_name, certificate):
topology = Topology(etag=-1, nodes=[ServerNode(url, database_name)])
return RequestsExecutor(database_name, certificate, node_selector=NodeSelector(topology), topology_etag=-2,
disable_topology_updates=True)
def start_first_topology_thread(self, urls):
self._first_topology_update = PropagatingThread(target=self.first_topology_update, args=(urls,), daemon=True)
self._first_topology_update.start()
def ensure_node_selector(self):
if self._first_topology_update and self._first_topology_update.is_alive():
self._first_topology_update.join()
if not self._node_selector:
self._node_selector = NodeSelector(Topology(etag=self.topology_etag, nodes=self.topology_nodes))
def get_preferred_node(self):
self.ensure_node_selector()
return self._node_selector.get_current_node()
def execute(self, raven_command, should_retry=True):
if not hasattr(raven_command, 'raven_command'):
raise ValueError("Not a valid command")
topology_update = self._first_topology_update
if not self._disable_topology_updates:
if topology_update is None or topology_update.is_alive():
try:
if topology_update is None:
with self.lock:
if self._first_topology_update is None:
if self._last_known_urls is None:
raise exceptions.InvalidOperationException(
"No known topology and no previously known one, cannot proceed, likely a bug")
self.start_first_topology_thread(self._last_known_urls)
topology_update = self._first_topology_update
topology_update.join()
except Exception as e:
log.debug(str(e))
with self.lock:
if self._first_topology_update == topology_update:
self._first_topology_update = None
raise
if self._node_selector is None:
raise exceptions.InvalidOperationException("A connection with the server could not be established",
"node_selector cannot be None, please check your connection "
"or supply a valid node_selector")
chosen_node = self._node_selector.get_current_node()
return self.execute_with_node(chosen_node, raven_command, should_retry)
def execute_with_node(self, chosen_node, raven_command, should_retry):
while True:
raven_command.create_request(chosen_node)
node_index = 0 if self._node_selector is None else self._node_selector.current_node_index
with requests.session() as session:
if self._adapter is not None:
session.mount("https://", self._adapter)
if raven_command.is_raft_request:
prefix = '&' if '?' in raven_command.url else '?'
raven_command.url += f"{prefix}raft-request-id={raven_command.raft_unique_request_id}"
raven_command.headers.update(self.headers)
if not self._disable_topology_updates:
raven_command.headers["Topology-Etag"] = "\"{0}\"".format(self.topology_etag)
data = raven_command.data
if data and not isinstance(data, io.BufferedIOBase):
data = json.dumps(raven_command.data, default=self.conventions.json_default_method)
if raven_command.files:
data = {"data": data}
start_time = time.time() * 1000
end_time = None
try:
response = session.request(raven_command.method, url=raven_command.url, data=data,
files=raven_command.files,
cert=self._certificate, headers=raven_command.headers,
stream=raven_command.use_stream)
except Exception as e:
end_time = time.time() * 1000
if not should_retry:
raise
if not self.handle_server_down(chosen_node, node_index, raven_command, e):
raise exceptions.AllTopologyNodesDownException(
"Tried to send request to all configured nodes in the topology, "
"all of them seem to be down or not responding.", e)
chosen_node = self._node_selector.get_current_node()
continue
finally:
if not end_time:
end_time = time.time() * 1000
elapsed_time = end_time - start_time
chosen_node.response_time = elapsed_time
if response is None:
raise ValueError("response is invalid.")
if response.status_code == 404:
return raven_command.set_response(None)
if response.status_code == 403:
if self._certificate is not None:
cert = self._certificate
if isinstance(cert, tuple):
(cert, _) = cert
with open(cert, 'rb') as pem:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, pem.read())
name = str(cert.get_subject().get_components()[0][1])
raise exceptions.AuthorizationException(
"Forbidden access to " + chosen_node.database + "@" + chosen_node.url + ", " +
("a certificate is required." if self._certificate is None
else name + " does not have permission to access it or is unknown.") +
response.request.method + " " + response.request.path_url)
if response.status_code == 410:
if should_retry:
self.update_topology(chosen_node, True)
# TODO update this after build make fastest node selector
if response.status_code == 408 or response.status_code == 502 or response.status_code == 503 or response.status_code == 504:
if len(raven_command.failed_nodes) == 1:
node = list(raven_command.failed_nodes.keys())[0]
database_missing = response.headers.get("Database-Missing", None)
if database_missing:
raise exceptions.DatabaseDoesNotExistException(
"Database " + database_missing + " does not exists")
raise exceptions.UnsuccessfulRequestException(node.url, raven_command.failed_nodes[node])
try:
e = response.json()["Message"]
except ValueError:
e = None
if self.handle_server_down(chosen_node, node_index, raven_command, e):
chosen_node = self._node_selector.get_current_node()
continue
if response.status_code == 409:
# TODO: Conflict resolution
# current implementation is temporary
try:
response = response.json()
if "Error" in response:
raise Exception(response["Message"], response["Type"])
except ValueError:
raise response.raise_for_status()
if "Refresh-Topology" in response.headers:
self.update_topology(ServerNode(chosen_node.url, self._database_name))
self._last_return_response = datetime.utcnow()
return raven_command.set_response(response)
def first_topology_update(self, initial_urls):
error_list = []
for url in initial_urls:
try:
self.update_topology(ServerNode(url, self._database_name))
self.update_topology_timer = Utils.start_a_timer(60 * 5, self.update_topology_callback, daemon=True)
self.topology_nodes = self._node_selector.topology.nodes
return
except exceptions.DatabaseDoesNotExistException:
# Will happen on all node in the cluster, so errors immediately
self._last_known_urls = initial_urls
raise
except Exception as e:
if len(initial_urls) == 0:
self._last_known_urls = initial_urls
raise exceptions.InvalidOperationException("Cannot get topology from server: " + url, e)
error_list.append((url, e))
# Failed to update topology trying update from cache
for url in initial_urls:
if self.try_load_from_cache(url):
self.topology_nodes = self._node_selector.topology.nodes
return
self._last_known_urls = initial_urls
self.raise_exceptions(error_list)
def raise_exceptions(self, error_list):
raise exceptions.AggregateException("Failed to retrieve database topology from all known nodes", error_list)
def try_load_from_cache(self, url):
server_hash = hashlib.md5(
"{0}{1}".format(url, self._database_name).encode(
'utf-8')).hexdigest()
topology_file_path = os.path.join(TOPOLOGY_FILES_DIR, server_hash + ".raven-topology")
try:
with open(topology_file_path, 'r') as topology_file:
json_file = json.load(topology_file)
self._node_selector = NodeSelector(
Topology.convert_json_topology_to_entity(json_file))
self.topology_etag = -2
self.update_topology_timer = Utils.start_a_timer(60 * 5, self.update_topology_callback, daemon=True)
return True
except (FileNotFoundError, json.JSONDecodeError) as e:
log.info(e)
return False
def update_topology(self, node, force_update=False):
if self._closed:
return
if self.update_topology_lock.acquire(False):
try:
if self._closed:
return False
command = GetTopologyCommand()
response = self.execute_with_node(node, command, should_retry=False)
hash_name = hashlib.md5(
"{0}{1}".format(node.url, node.database).encode(
'utf-8')).hexdigest()
topology_file = os.path.join(TOPOLOGY_FILES_DIR, hash_name + ".raven-topology")
try:
with open(topology_file, 'w') as outfile:
json.dump(response, outfile, ensure_ascii=False)
except (IOError, json.JSONDecodeError):
pass
topology = Topology.convert_json_topology_to_entity(response)
if self._node_selector is None:
self._node_selector = NodeSelector(topology)
elif self._node_selector.on_update_topology(topology, force_update):
self.cancel_all_failed_nodes_timers()
self.topology_etag = self._node_selector.topology.etag
finally:
self.update_topology_lock.release()
else:
return False
def handle_server_down(self, chosen_node, node_index, command, e):
command.failed_nodes.update({chosen_node: {"url": command.url, "error": str(e), "type": type(e).__name__}})
node_selector = self._node_selector
if node_selector is None:
return False
if chosen_node not in self._failed_nodes_timers:
node_status = NodeStatus(self, node_index, chosen_node)
with self.update_timer_lock:
if self._failed_nodes_timers.get(chosen_node, None) is None:
self._failed_nodes_timers.update({chosen_node: node_status})
node_status.start_timer()
node_selector.on_failed_request(node_index)
current_node = node_selector.get_current_node()
if command.is_failed_with_node(current_node):
return False
return True
def cancel_all_failed_nodes_timers(self):
failed_nodes_timers = self._failed_nodes_timers
self._failed_nodes_timers.clear()
for _, timer in failed_nodes_timers.items():
timer.cancel()
timer.join()
def check_node_status(self, node_status):
if self._node_selector is not None:
nodes = self._node_selector.topology.nodes
if node_status.node_index >= len(nodes):
return
server_node = nodes[node_status.node_index]
if server_node is not node_status.node:
self.perform_health_check(server_node, node_status)
def perform_health_check(self, node, node_status):
command = GetStatisticsCommand(debug_tag="failure=check")
try:
self.execute_with_node(node, command, should_retry=False)
except Exception as e:
log.info("{0} is still down".format(node.cluster_tag), e)
failed_node_timer = self._failed_nodes_timers.get(node_status.node, None)
if failed_node_timer is not None:
failed_node_timer.start_timer()
return
failed_node_timer = self._failed_nodes_timers.pop(node_status.node, None)
if failed_node_timer:
del failed_node_timer
self._node_selector.restore_node_index(node_status.node_index)
def update_topology_callback(self):
time = datetime.utcnow()
if time - self._last_return_response <= timedelta(minutes=5):
return
try:
self.update_topology(self._node_selector.get_current_node())
except Exception as e:
log.info("Couldn't Update Topology from _updateTopologyTimer task", e)
def close(self):
if self._closed:
return
self._closed = True
self.cancel_all_failed_nodes_timers()
if self.update_topology_timer:
self.update_topology_timer.cancel() | 0.538498 | 0.080105 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from .comfunc import rerange_index
class MSMLoss(paddle.nn.Layer):
"""
MSMLoss Loss, based on triplet loss. USE P * K samples.
the batch size is fixed. Batch_size = P * K; but the K may vary between batches.
same label gather together
supported_metrics = [
'euclidean',
'sqeuclidean',
'cityblock',
]
only consider samples_each_class = 2
"""
def __init__(self, batch_size=120, samples_each_class=2, margin=0.1):
super(MSMLoss, self).__init__()
self.margin = margin
self.samples_each_class = samples_each_class
self.batch_size = batch_size
self.rerange_index = rerange_index(batch_size, samples_each_class)
def forward(self, input, target=None):
#normalization
features = input["features"]
features = self._nomalize(features)
samples_each_class = self.samples_each_class
rerange_index = paddle.to_tensor(self.rerange_index)
#calc sm
diffs = paddle.unsqueeze(
features, axis=1) - paddle.unsqueeze(
features, axis=0)
similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)
#rerange
tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
tmp = paddle.gather(tmp, index=rerange_index)
similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])
#split
ignore, pos, neg = paddle.split(
similary_matrix,
num_or_sections=[1, samples_each_class - 1, -1],
axis=1)
ignore.stop_gradient = True
hard_pos = paddle.max(pos)
hard_neg = paddle.min(neg)
loss = hard_pos + self.margin - hard_neg
loss = paddle.nn.ReLU()(loss)
return {"msmloss": loss}
def _nomalize(self, input):
input_norm = paddle.sqrt(
paddle.sum(paddle.square(input), axis=1, keepdim=True))
return paddle.divide(input, input_norm) | paddlex/ppcls/loss/msmloss.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from .comfunc import rerange_index
class MSMLoss(paddle.nn.Layer):
"""
MSMLoss Loss, based on triplet loss. USE P * K samples.
the batch size is fixed. Batch_size = P * K; but the K may vary between batches.
same label gather together
supported_metrics = [
'euclidean',
'sqeuclidean',
'cityblock',
]
only consider samples_each_class = 2
"""
def __init__(self, batch_size=120, samples_each_class=2, margin=0.1):
super(MSMLoss, self).__init__()
self.margin = margin
self.samples_each_class = samples_each_class
self.batch_size = batch_size
self.rerange_index = rerange_index(batch_size, samples_each_class)
def forward(self, input, target=None):
#normalization
features = input["features"]
features = self._nomalize(features)
samples_each_class = self.samples_each_class
rerange_index = paddle.to_tensor(self.rerange_index)
#calc sm
diffs = paddle.unsqueeze(
features, axis=1) - paddle.unsqueeze(
features, axis=0)
similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)
#rerange
tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
tmp = paddle.gather(tmp, index=rerange_index)
similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])
#split
ignore, pos, neg = paddle.split(
similary_matrix,
num_or_sections=[1, samples_each_class - 1, -1],
axis=1)
ignore.stop_gradient = True
hard_pos = paddle.max(pos)
hard_neg = paddle.min(neg)
loss = hard_pos + self.margin - hard_neg
loss = paddle.nn.ReLU()(loss)
return {"msmloss": loss}
def _nomalize(self, input):
input_norm = paddle.sqrt(
paddle.sum(paddle.square(input), axis=1, keepdim=True))
return paddle.divide(input, input_norm) | 0.836688 | 0.237233 |
import pybamm
import unittest
import numpy as np
class TestQuickPlot(unittest.TestCase):
def test_simple_ode_model(self):
model = pybamm.lithium_ion.BaseModel(name="Simple ODE Model")
whole_cell = ["negative electrode", "separator", "positive electrode"]
# Create variables: domain is explicitly empty since these variables are only
# functions of time
a = pybamm.Variable("a", domain=[])
b = pybamm.Variable("b", domain=[])
c = pybamm.Variable("c", domain=[])
# Simple ODEs
model.rhs = {a: pybamm.Scalar(2), b: pybamm.Scalar(0), c: -c}
# Simple initial conditions
model.initial_conditions = {
a: pybamm.Scalar(0),
b: pybamm.Scalar(1),
c: pybamm.Scalar(1),
}
# no boundary conditions for an ODE model
# Broadcast some of the variables
model.variables = {
"a": a,
"b broadcasted": pybamm.FullBroadcast(b, whole_cell, "current collector"),
"c broadcasted": pybamm.FullBroadcast(
c, ["negative electrode", "separator"], "current collector"
),
"b broadcasted negative electrode": pybamm.PrimaryBroadcast(
b, "negative particle"
),
"c broadcasted positive electrode": pybamm.PrimaryBroadcast(
c, "positive particle"
),
}
model.timescale = pybamm.Scalar(1)
# ODEs only (don't use jacobian)
model.use_jacobian = False
# Process and solve
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
t_eval = np.linspace(0, 2, 100)
solution = solver.solve(model, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"a",
"b broadcasted",
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
quick_plot.axis_limits.update({("a",): new_axis})
self.assertEqual(quick_plot.axis_limits[("a",)], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[("a",)], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test with different output variables
quick_plot = pybamm.QuickPlot(solution, ["b broadcasted"])
self.assertEqual(len(quick_plot.axis_limits), 1)
quick_plot.plot(0)
quick_plot = pybamm.QuickPlot(
solution,
[
["a", "a"],
["b broadcasted", "b broadcasted"],
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
self.assertEqual(len(quick_plot.axis_limits), 5)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
var_key = ("c broadcasted",)
quick_plot.axis_limits.update({var_key: new_axis})
self.assertEqual(quick_plot.axis_limits[var_key], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[var_key], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test longer name
model.variables["Variable with a very long name"] = model.variables["a"]
quick_plot = pybamm.QuickPlot(solution, ["Variable with a very long name"])
quick_plot.plot(0)
# Test different inputs
quick_plot = pybamm.QuickPlot(
[solution, solution],
["a"],
colors=["r", "g", "b"],
linestyles=["-", "--"],
figsize=(1, 2),
labels=["sol 1", "sol 2"],
)
self.assertEqual(quick_plot.colors, ["r", "g", "b"])
self.assertEqual(quick_plot.linestyles, ["-", "--"])
self.assertEqual(quick_plot.figsize, (1, 2))
self.assertEqual(quick_plot.labels, ["sol 1", "sol 2"])
# Test different time units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 1)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="seconds")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 1)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="minutes")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 60)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 60
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="hours")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 3600)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 3600
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
with self.assertRaisesRegex(ValueError, "time unit"):
pybamm.QuickPlot(solution, ["a"], time_unit="bad unit")
# long solution defaults to hours instead of seconds
solution_long = solver.solve(model, np.linspace(0, 1e5))
quick_plot = pybamm.QuickPlot(solution_long, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 3600)
# Test different spatial units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="m")
self.assertEqual(quick_plot.spatial_unit, "m")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="mm")
self.assertEqual(quick_plot.spatial_unit, "mm")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="um")
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
with self.assertRaisesRegex(ValueError, "spatial unit"):
pybamm.QuickPlot(solution, ["a"], spatial_unit="bad unit")
# Test 2D variables
model.variables["2D variable"] = disc.process_symbol(
pybamm.FullBroadcast(
1, "negative particle", {"secondary": "negative electrode"}
)
)
quick_plot = pybamm.QuickPlot(solution, ["2D variable"])
quick_plot.plot(0)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
with self.assertRaisesRegex(NotImplementedError, "Cannot plot 2D variables"):
pybamm.QuickPlot([solution, solution], ["2D variable"])
# Test different variable limits
quick_plot = pybamm.QuickPlot(
solution, ["a", ["c broadcasted", "c broadcasted"]], variable_limits="tight"
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["2D variable"], variable_limits="tight"
)
self.assertEqual(quick_plot.variable_limits[("2D variable",)], (None, None))
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution,
["a", ["c broadcasted", "c broadcasted"]],
variable_limits={"a": [1, 2], ("c broadcasted", "c broadcasted"): [3, 4]},
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [1, 2])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [3, 4]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits={"a": "tight"}
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertNotEqual(
quick_plot.axis_limits[("b broadcasted",)][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
with self.assertRaisesRegex(
TypeError, "variable_limits must be 'fixed', 'tight', or a dict"
):
pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits="bad variable limits"
)
# Test errors
with self.assertRaisesRegex(ValueError, "Mismatching variable domains"):
pybamm.QuickPlot(solution, [["a", "b broadcasted"]])
with self.assertRaisesRegex(ValueError, "labels"):
pybamm.QuickPlot(
[solution, solution], ["a"], labels=["sol 1", "sol 2", "sol 3"]
)
# No variable can be NaN
model.variables["NaN variable"] = disc.process_symbol(pybamm.Scalar(np.nan))
with self.assertRaisesRegex(
ValueError, "All-NaN variable 'NaN variable' provided"
):
pybamm.QuickPlot(solution, ["NaN variable"])
pybamm.close_plots()
def test_spm_simulation(self):
# SPM
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
t_eval = np.linspace(0, 10, 2)
sim.solve(t_eval)
# mixed simulation and solution input
# solution should be extracted from the simulation
quick_plot = pybamm.QuickPlot([sim, sim.solution])
quick_plot.plot(0)
pybamm.close_plots()
def test_loqs_spme(self):
t_eval = np.linspace(0, 10, 2)
for model in [pybamm.lithium_ion.SPMe(), pybamm.lead_acid.LOQS()]:
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5}
mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
solution = solver.solve(model, t_eval)
pybamm.QuickPlot(solution)
# check 1D (space) variables update properly for different time units
t = solution["Time [s]"].entries
c_e_var = solution["Electrolyte concentration [mol.m-3]"]
# 1D variables should be evaluated on edges
L_x = param.evaluate(pybamm.geometric_parameters.L_x)
c_e = c_e_var(t=t, x=mesh.combine_submeshes(*c_e_var.domain).edges * L_x)
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0][:, 0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 1])
# test quick plot of particle for spme
if model.name == "Single Particle Model with electrolyte":
output_variables = [
"X-averaged negative particle concentration [mol.m-3]",
"X-averaged positive particle concentration [mol.m-3]",
"Negative particle concentration [mol.m-3]",
"Positive particle concentration [mol.m-3]",
]
pybamm.QuickPlot(solution, output_variables)
# check 2D (space) variables update properly for different time units
c_n = solution["Negative particle concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution,
["Negative particle concentration [mol.m-3]"],
time_unit=unit,
)
quick_plot.plot(0)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 1])
pybamm.close_plots()
def test_plot_1plus1D_spme(self):
spm = pybamm.lithium_ion.SPMe(
{"current collector": "potential pair", "dimensionality": 1}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5, var.z: 5}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
# check 2D (x,z space) variables update properly for different time units
# Note: these should be the transpose of the entries in the processed variable
c_e = solution["Electrolyte concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, -1])
pybamm.close_plots()
def test_plot_2plus1D_spm(self):
spm = pybamm.lithium_ion.SPM(
{"current collector": "potential pair", "dimensionality": 2}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: 5,
var.x_s: 5,
var.x_p: 5,
var.r_n: 5,
var.r_p: 5,
var.y: 5,
var.z: 5,
}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"Terminal voltage [V]",
],
)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(1)
# check 2D (y,z space) variables update properly for different time units
phi_n = solution["Negative current collector potential [V]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Negative current collector potential [V]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, -1])
with self.assertRaisesRegex(NotImplementedError, "Shape not recognized for"):
pybamm.QuickPlot(solution, ["Negative particle concentration [mol.m-3]"])
pybamm.close_plots()
def test_failure(self):
with self.assertRaisesRegex(TypeError, "solutions must be"):
pybamm.QuickPlot(1)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main() | tests/unit/test_quick_plot.py | import pybamm
import unittest
import numpy as np
class TestQuickPlot(unittest.TestCase):
def test_simple_ode_model(self):
model = pybamm.lithium_ion.BaseModel(name="Simple ODE Model")
whole_cell = ["negative electrode", "separator", "positive electrode"]
# Create variables: domain is explicitly empty since these variables are only
# functions of time
a = pybamm.Variable("a", domain=[])
b = pybamm.Variable("b", domain=[])
c = pybamm.Variable("c", domain=[])
# Simple ODEs
model.rhs = {a: pybamm.Scalar(2), b: pybamm.Scalar(0), c: -c}
# Simple initial conditions
model.initial_conditions = {
a: pybamm.Scalar(0),
b: pybamm.Scalar(1),
c: pybamm.Scalar(1),
}
# no boundary conditions for an ODE model
# Broadcast some of the variables
model.variables = {
"a": a,
"b broadcasted": pybamm.FullBroadcast(b, whole_cell, "current collector"),
"c broadcasted": pybamm.FullBroadcast(
c, ["negative electrode", "separator"], "current collector"
),
"b broadcasted negative electrode": pybamm.PrimaryBroadcast(
b, "negative particle"
),
"c broadcasted positive electrode": pybamm.PrimaryBroadcast(
c, "positive particle"
),
}
model.timescale = pybamm.Scalar(1)
# ODEs only (don't use jacobian)
model.use_jacobian = False
# Process and solve
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
t_eval = np.linspace(0, 2, 100)
solution = solver.solve(model, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"a",
"b broadcasted",
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
quick_plot.axis_limits.update({("a",): new_axis})
self.assertEqual(quick_plot.axis_limits[("a",)], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[("a",)], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test with different output variables
quick_plot = pybamm.QuickPlot(solution, ["b broadcasted"])
self.assertEqual(len(quick_plot.axis_limits), 1)
quick_plot.plot(0)
quick_plot = pybamm.QuickPlot(
solution,
[
["a", "a"],
["b broadcasted", "b broadcasted"],
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
self.assertEqual(len(quick_plot.axis_limits), 5)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
var_key = ("c broadcasted",)
quick_plot.axis_limits.update({var_key: new_axis})
self.assertEqual(quick_plot.axis_limits[var_key], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[var_key], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test longer name
model.variables["Variable with a very long name"] = model.variables["a"]
quick_plot = pybamm.QuickPlot(solution, ["Variable with a very long name"])
quick_plot.plot(0)
# Test different inputs
quick_plot = pybamm.QuickPlot(
[solution, solution],
["a"],
colors=["r", "g", "b"],
linestyles=["-", "--"],
figsize=(1, 2),
labels=["sol 1", "sol 2"],
)
self.assertEqual(quick_plot.colors, ["r", "g", "b"])
self.assertEqual(quick_plot.linestyles, ["-", "--"])
self.assertEqual(quick_plot.figsize, (1, 2))
self.assertEqual(quick_plot.labels, ["sol 1", "sol 2"])
# Test different time units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 1)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="seconds")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 1)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="minutes")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 60)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 60
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="hours")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 3600)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 3600
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
with self.assertRaisesRegex(ValueError, "time unit"):
pybamm.QuickPlot(solution, ["a"], time_unit="bad unit")
# long solution defaults to hours instead of seconds
solution_long = solver.solve(model, np.linspace(0, 1e5))
quick_plot = pybamm.QuickPlot(solution_long, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 3600)
# Test different spatial units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="m")
self.assertEqual(quick_plot.spatial_unit, "m")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="mm")
self.assertEqual(quick_plot.spatial_unit, "mm")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="um")
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
with self.assertRaisesRegex(ValueError, "spatial unit"):
pybamm.QuickPlot(solution, ["a"], spatial_unit="bad unit")
# Test 2D variables
model.variables["2D variable"] = disc.process_symbol(
pybamm.FullBroadcast(
1, "negative particle", {"secondary": "negative electrode"}
)
)
quick_plot = pybamm.QuickPlot(solution, ["2D variable"])
quick_plot.plot(0)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
with self.assertRaisesRegex(NotImplementedError, "Cannot plot 2D variables"):
pybamm.QuickPlot([solution, solution], ["2D variable"])
# Test different variable limits
quick_plot = pybamm.QuickPlot(
solution, ["a", ["c broadcasted", "c broadcasted"]], variable_limits="tight"
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["2D variable"], variable_limits="tight"
)
self.assertEqual(quick_plot.variable_limits[("2D variable",)], (None, None))
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution,
["a", ["c broadcasted", "c broadcasted"]],
variable_limits={"a": [1, 2], ("c broadcasted", "c broadcasted"): [3, 4]},
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [1, 2])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [3, 4]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits={"a": "tight"}
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertNotEqual(
quick_plot.axis_limits[("b broadcasted",)][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
with self.assertRaisesRegex(
TypeError, "variable_limits must be 'fixed', 'tight', or a dict"
):
pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits="bad variable limits"
)
# Test errors
with self.assertRaisesRegex(ValueError, "Mismatching variable domains"):
pybamm.QuickPlot(solution, [["a", "b broadcasted"]])
with self.assertRaisesRegex(ValueError, "labels"):
pybamm.QuickPlot(
[solution, solution], ["a"], labels=["sol 1", "sol 2", "sol 3"]
)
# No variable can be NaN
model.variables["NaN variable"] = disc.process_symbol(pybamm.Scalar(np.nan))
with self.assertRaisesRegex(
ValueError, "All-NaN variable 'NaN variable' provided"
):
pybamm.QuickPlot(solution, ["NaN variable"])
pybamm.close_plots()
def test_spm_simulation(self):
# SPM
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
t_eval = np.linspace(0, 10, 2)
sim.solve(t_eval)
# mixed simulation and solution input
# solution should be extracted from the simulation
quick_plot = pybamm.QuickPlot([sim, sim.solution])
quick_plot.plot(0)
pybamm.close_plots()
def test_loqs_spme(self):
t_eval = np.linspace(0, 10, 2)
for model in [pybamm.lithium_ion.SPMe(), pybamm.lead_acid.LOQS()]:
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5}
mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
solution = solver.solve(model, t_eval)
pybamm.QuickPlot(solution)
# check 1D (space) variables update properly for different time units
t = solution["Time [s]"].entries
c_e_var = solution["Electrolyte concentration [mol.m-3]"]
# 1D variables should be evaluated on edges
L_x = param.evaluate(pybamm.geometric_parameters.L_x)
c_e = c_e_var(t=t, x=mesh.combine_submeshes(*c_e_var.domain).edges * L_x)
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0][:, 0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 1])
# test quick plot of particle for spme
if model.name == "Single Particle Model with electrolyte":
output_variables = [
"X-averaged negative particle concentration [mol.m-3]",
"X-averaged positive particle concentration [mol.m-3]",
"Negative particle concentration [mol.m-3]",
"Positive particle concentration [mol.m-3]",
]
pybamm.QuickPlot(solution, output_variables)
# check 2D (space) variables update properly for different time units
c_n = solution["Negative particle concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution,
["Negative particle concentration [mol.m-3]"],
time_unit=unit,
)
quick_plot.plot(0)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 1])
pybamm.close_plots()
def test_plot_1plus1D_spme(self):
spm = pybamm.lithium_ion.SPMe(
{"current collector": "potential pair", "dimensionality": 1}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5, var.z: 5}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
# check 2D (x,z space) variables update properly for different time units
# Note: these should be the transpose of the entries in the processed variable
c_e = solution["Electrolyte concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, -1])
pybamm.close_plots()
def test_plot_2plus1D_spm(self):
spm = pybamm.lithium_ion.SPM(
{"current collector": "potential pair", "dimensionality": 2}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: 5,
var.x_s: 5,
var.x_p: 5,
var.r_n: 5,
var.r_p: 5,
var.y: 5,
var.z: 5,
}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"Terminal voltage [V]",
],
)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(1)
# check 2D (y,z space) variables update properly for different time units
phi_n = solution["Negative current collector potential [V]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Negative current collector potential [V]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, -1])
with self.assertRaisesRegex(NotImplementedError, "Shape not recognized for"):
pybamm.QuickPlot(solution, ["Negative particle concentration [mol.m-3]"])
pybamm.close_plots()
def test_failure(self):
with self.assertRaisesRegex(TypeError, "solutions must be"):
pybamm.QuickPlot(1)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main() | 0.789315 | 0.764056 |
from math import ceil, log
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from .base_dense_head import BaseDenseHead
class BiCornerPool(nn.Module):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True)):
super(BiCornerPool, self).__init__()
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)):
super(CornerHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
"""Initialize weights of the head."""
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between nn.Conv2d
# and ConvModule. Our experiments show that using the original
# initialization of nn.Conv2d increases the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's differnet branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view([-1, 1])
clses = batch_clses.view([-1, 1])
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view([-1, 4])
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if labels.numel() == 0:
return bboxes, labels
out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
cfg.nms_cfg)
out_labels = labels[keep]
if len(out_bboxes) > 0:
idx = torch.argsort(out_bboxes[:, -1], descending=True)
idx = idx[:cfg.max_per_img]
out_bboxes = out_bboxes[idx]
out_labels = out_labels[idx]
return out_bboxes, out_labels
def _gather_feat(self, feat, ind, mask=None):
"""Gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
mask (Tensor | None): Mask of featuremap. Default: None.
Returns:
feat (Tensor): Gathered feature.
"""
dim = feat.size(2)
ind = ind.unsqueeze(2).repeat(1, 1, dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _local_maximum(self, heat, kernel=3):
"""Extract local maximum pixel with given kernal.
Args:
heat (Tensor): Target heatmap.
kernel (int): Kernel size of max pooling. Default: 3.
Returns:
heat (Tensor): A heatmap where local maximum pixels maintain its
own value and other positions are 0.
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _transpose_and_gather_feat(self, feat, ind):
"""Transpose and gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
Returns:
feat (Tensor): Transposed and gathered feature.
"""
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = self._gather_feat(feat, ind)
return feat
def _topk(self, scores, k=20):
"""Get top k positions from heatmap.
Args:
scores (Tensor): Target heatmap with shape
[batch, num_classes, height, width].
k (int): Target number. Default: 20.
Returns:
tuple[torch.Tensor]: Scores, indexes, categories and coords of
topk keypoint. Containing following Tensors:
- topk_scores (Tensor): Max scores of each topk keypoint.
- topk_inds (Tensor): Indexes of each topk keypoint.
- topk_clses (Tensor): Categories of each topk keypoint.
- topk_ys (Tensor): Y-coord of each topk keypoint.
- topk_xs (Tensor): X-coord of each topk keypoint.
"""
batch, _, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
topk_clses = topk_inds // (height * width)
topk_inds = topk_inds % (height * width)
topk_ys = topk_inds // width
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = self._local_maximum(tl_heat, kernel=kernel)
br_heat = self._local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = self._transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = self._transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = self._transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
br_xs *= br_xs.gt(0.0).type_as(br_xs)
br_ys *= br_ys.gt(0.0).type_as(br_ys)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
scores[cls_inds] = -1
scores[width_inds] = -1
scores[height_inds] = -1
scores[dist_inds] = -1
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = self._gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = self._gather_feat(clses, inds).float()
return bboxes, scores, clses | mmdet/models/dense_heads/corner_head.py | from math import ceil, log
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from .base_dense_head import BaseDenseHead
class BiCornerPool(nn.Module):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True)):
super(BiCornerPool, self).__init__()
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)):
super(CornerHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
"""Initialize weights of the head."""
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between nn.Conv2d
# and ConvModule. Our experiments show that using the original
# initialization of nn.Conv2d increases the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's differnet branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view([-1, 1])
clses = batch_clses.view([-1, 1])
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view([-1, 4])
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if labels.numel() == 0:
return bboxes, labels
out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
cfg.nms_cfg)
out_labels = labels[keep]
if len(out_bboxes) > 0:
idx = torch.argsort(out_bboxes[:, -1], descending=True)
idx = idx[:cfg.max_per_img]
out_bboxes = out_bboxes[idx]
out_labels = out_labels[idx]
return out_bboxes, out_labels
def _gather_feat(self, feat, ind, mask=None):
"""Gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
mask (Tensor | None): Mask of featuremap. Default: None.
Returns:
feat (Tensor): Gathered feature.
"""
dim = feat.size(2)
ind = ind.unsqueeze(2).repeat(1, 1, dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _local_maximum(self, heat, kernel=3):
"""Extract local maximum pixel with given kernal.
Args:
heat (Tensor): Target heatmap.
kernel (int): Kernel size of max pooling. Default: 3.
Returns:
heat (Tensor): A heatmap where local maximum pixels maintain its
own value and other positions are 0.
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _transpose_and_gather_feat(self, feat, ind):
"""Transpose and gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
Returns:
feat (Tensor): Transposed and gathered feature.
"""
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = self._gather_feat(feat, ind)
return feat
def _topk(self, scores, k=20):
"""Get top k positions from heatmap.
Args:
scores (Tensor): Target heatmap with shape
[batch, num_classes, height, width].
k (int): Target number. Default: 20.
Returns:
tuple[torch.Tensor]: Scores, indexes, categories and coords of
topk keypoint. Containing following Tensors:
- topk_scores (Tensor): Max scores of each topk keypoint.
- topk_inds (Tensor): Indexes of each topk keypoint.
- topk_clses (Tensor): Categories of each topk keypoint.
- topk_ys (Tensor): Y-coord of each topk keypoint.
- topk_xs (Tensor): X-coord of each topk keypoint.
"""
batch, _, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
topk_clses = topk_inds // (height * width)
topk_inds = topk_inds % (height * width)
topk_ys = topk_inds // width
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = self._local_maximum(tl_heat, kernel=kernel)
br_heat = self._local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = self._transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = self._transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = self._transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
br_xs *= br_xs.gt(0.0).type_as(br_xs)
br_ys *= br_ys.gt(0.0).type_as(br_ys)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
scores[cls_inds] = -1
scores[width_inds] = -1
scores[height_inds] = -1
scores[dist_inds] = -1
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = self._gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = self._gather_feat(clses, inds).float()
return bboxes, scores, clses | 0.962285 | 0.350005 |
import numpy as np
from alive_progress import alive_bar
from copy import copy
class neuroevolution:
def __init__(self, inshape, outshape, PopulationSize, model):
self.inshape, self.outshape, self.PopulationSize = inshape, outshape, PopulationSize
self.model = np.zeros((PopulationSize, len(model)), dtype=object)
for i in range(PopulationSize):
inshape = self.inshape
for j in range(len(model)):
self.model[i, j] = copy(model[j])
inshape = self.model[i, j].modelinit(inshape)
def forwardmulti(self, input):
out = np.zeros((self.PopulationSize, self.outshape))
for i in range(self.PopulationSize):
Hid = input
for j in range(self.model.shape[1]):
Hid = self.model[i, j].forward(Hid)
out[i] = Hid
return out
def forwardsingle(self, input, Player):
out = np.zeros(self.outshape)
for j in range(self.model.shape[1]):
input = self.model[Player, j].forward(input)
return input
def mutate(self, FavorablePlayer):
Favorablemodel = self.model[FavorablePlayer]
for i in range(self.model.shape[1]):
self.model[0, i].weights = Favorablemodel[i].weights
self.model[0, i].biases = Favorablemodel[i].biases
for i in range(1, self.PopulationSize):
for j in range(self.model.shape[1]):
self.model[i, j].weights = Favorablemodel[j].weights
self.model[i, j].biases = Favorablemodel[j].biases
self.model[i, j].weights *= np.random.choice([0, 1], self.model[i, j].weights.shape, p=[self.model[i, j].learningrate, 1 - self.model[i, j].learningrate])
self.model[i, j].weights = np.where(self.model[i, j].weights == 0, np.random.uniform(-1, 1, self.model[i, j].weights.shape), self.model[i, j].weights)
self.model[i, j].biases *= np.random.choice([0, 1], self.model[i, j].biases.shape, p=[self.model[i, j].learningrate, 1 - self.model[i, j].learningrate])
self.model[i, j].biases = np.where(self.model[i, j].biases == 0, np.random.uniform(-1, 1, self.model[i, j].biases.shape), self.model[i, j].biases) | testing/testsrc/neuroevolution.py | import numpy as np
from alive_progress import alive_bar
from copy import copy
class neuroevolution:
def __init__(self, inshape, outshape, PopulationSize, model):
self.inshape, self.outshape, self.PopulationSize = inshape, outshape, PopulationSize
self.model = np.zeros((PopulationSize, len(model)), dtype=object)
for i in range(PopulationSize):
inshape = self.inshape
for j in range(len(model)):
self.model[i, j] = copy(model[j])
inshape = self.model[i, j].modelinit(inshape)
def forwardmulti(self, input):
out = np.zeros((self.PopulationSize, self.outshape))
for i in range(self.PopulationSize):
Hid = input
for j in range(self.model.shape[1]):
Hid = self.model[i, j].forward(Hid)
out[i] = Hid
return out
def forwardsingle(self, input, Player):
out = np.zeros(self.outshape)
for j in range(self.model.shape[1]):
input = self.model[Player, j].forward(input)
return input
def mutate(self, FavorablePlayer):
Favorablemodel = self.model[FavorablePlayer]
for i in range(self.model.shape[1]):
self.model[0, i].weights = Favorablemodel[i].weights
self.model[0, i].biases = Favorablemodel[i].biases
for i in range(1, self.PopulationSize):
for j in range(self.model.shape[1]):
self.model[i, j].weights = Favorablemodel[j].weights
self.model[i, j].biases = Favorablemodel[j].biases
self.model[i, j].weights *= np.random.choice([0, 1], self.model[i, j].weights.shape, p=[self.model[i, j].learningrate, 1 - self.model[i, j].learningrate])
self.model[i, j].weights = np.where(self.model[i, j].weights == 0, np.random.uniform(-1, 1, self.model[i, j].weights.shape), self.model[i, j].weights)
self.model[i, j].biases *= np.random.choice([0, 1], self.model[i, j].biases.shape, p=[self.model[i, j].learningrate, 1 - self.model[i, j].learningrate])
self.model[i, j].biases = np.where(self.model[i, j].biases == 0, np.random.uniform(-1, 1, self.model[i, j].biases.shape), self.model[i, j].biases) | 0.491456 | 0.208682 |
import youtube_dl
import asyncio
import discord
import aiohttp
import re
youtube_dl.utils.bug_reports_message = lambda: ''
ydl = youtube_dl.YoutubeDL({"format": "bestaudio/best", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": True, "logtostderr": False, "quiet": True, "no_warnings": True, "source_address": "0.0.0.0"})
class EmptyQueue(Exception):
"""Cannot skip because queue is empty"""
class NotConnectedToVoice(Exception):
"""Cannot create the player because bot is not connected to voice"""
class NotPlaying(Exception):
"""Cannot <do something> because nothing is being played"""
async def ytbettersearch(query):
url = f"https://www.youtube.com/results?search_query={query}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
html = await resp.text()
index = html.find('watch?v')
url = ""
while True:
char = html[index]
if char == '"':
break
url += char
index += 1
url = f"https://www.youtube.com/{url}"
return url
async def get_video_data(url, search, bettersearch, loop):
if not search and not bettersearch:
data = await loop.run_in_executor(None, lambda: ydl.extract_info(url, download=False))
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
else:
if bettersearch:
url = await ytbettersearch(url)
data = await loop.run_in_executor(None, lambda: ydl.extract_info(url, download=False))
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
elif search:
ytdl = youtube_dl.YoutubeDL({"format": "bestaudio/best", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": True, "logtostderr": False, "quiet": True, "no_warnings": True, "default_search": "auto", "source_address": "0.0.0.0"})
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=False))
try:
data = data["entries"][0]
except KeyError:
pass
del ytdl
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
def check_queue(ctx, opts, music, after, on_play, loop):
try:
song = music.queue[ctx.guild.id][0]
except IndexError:
return
if not song.is_looping:
try:
music.queue[ctx.guild.id].pop(0)
except IndexError:
return
if len(music.queue[ctx.guild.id]) > 0:
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source, **opts))
ctx.voice_client.play(source, after=lambda error: after(ctx, opts, music, after, on_play, loop))
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
else:
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source, **opts))
ctx.voice_client.play(source, after=lambda error: after(ctx, opts, music, after, on_play, loop))
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
class Music(object):
def __init__(self):
self.queue = {}
self.players = []
def create_player(self, ctx, **kwargs):
if not ctx.voice_client:
raise NotConnectedToVoice("Cannot create the player because bot is not connected to voice")
player = MusicPlayer(ctx, self, **kwargs)
self.players.append(player)
return player
def get_player(self, **kwargs):
guild = kwargs.get("guild_id")
channel = kwargs.get("channel_id")
for player in self.players:
if guild and channel and player.ctx.guild.id == guild and player.voice.channel.id == channel:
return player
elif not guild and channel and player.voice.channel.id == channel:
return player
elif not channel and guild and player.ctx.guild.id == guild:
return player
else:
return None
class MusicPlayer(object):
def __init__(self, ctx, music, **kwargs):
self.ctx = ctx
self.voice = ctx.voice_client
self.loop = ctx.bot.loop
self.music = music
if self.ctx.guild.id not in self.music.queue.keys():
self.music.queue[self.ctx.guild.id] = []
self.after_func = check_queue
self.on_play_func = self.on_queue_func = self.on_skip_func = self.on_stop_func = self.on_pause_func = self.on_resume_func = self.on_loop_toggle_func = self.on_volume_change_func = self.on_remove_from_queue_func = None
ffmpeg_error = kwargs.get("ffmpeg_error_betterfix", kwargs.get("ffmpeg_error_fix"))
if ffmpeg_error and "ffmpeg_error_betterfix" in kwargs.keys():
self.ffmpeg_opts = {"options": "-vn -loglevel quiet -hide_banner -nostats", "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin"}
elif ffmpeg_error:
self.ffmpeg_opts = {"options": "-vn", "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin"}
else:
self.ffmpeg_opts = {"options": "-vn", "before_options": "-nostdin"}
def disable(self):
self.music.players.remove(self)
def on_queue(self, func):
self.on_queue_func = func
def on_play(self, func):
self.on_play_func = func
def on_skip(self, func):
self.on_skip_func = func
def on_stop(self, func):
self.on_stop_func = func
def on_pause(self, func):
self.on_pause_func = func
def on_resume(self, func):
self.on_resume_func = func
def on_loop_toggle(self, func):
self.on_loop_toggle_func = func
def on_volume_change(self, func):
self.on_volume_change_func = func
def on_remove_from_queue(self, func):
self.on_remove_from_queue_func = func
async def queue(self, url, search=False, bettersearch=False):
song = await get_video_data(url, search, bettersearch, self.loop)
self.music.queue[self.ctx.guild.id].append(song)
if self.on_queue_func:
await self.on_queue_func(self.ctx, song)
return song
async def play(self):
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(self.music.queue[self.ctx.guild.id][0].source, **self.ffmpeg_opts))
self.voice.play(source, after=lambda error: self.after_func(self.ctx, self.ffmpeg_opts, self.music, self.after_func, self.on_play_func, self.loop))
song = self.music.queue[self.ctx.guild.id][0]
if self.on_play_func:
await self.on_play_func(self.ctx, song)
return song
async def skip(self, force=False):
if len(self.music.queue[self.ctx.guild.id]) == 0:
raise NotPlaying("Cannot loop because nothing is being played")
elif not len(self.music.queue[self.ctx.guild.id]) > 1 and not force:
raise EmptyQueue("Cannot skip because queue is empty")
else:
old = self.music.queue[self.ctx.guild.id][0]
old.is_looping = False if old.is_looping else False
self.voice.stop()
try:
new = self.music.queue[self.ctx.guild.id][0]
if self.on_skip_func:
await self.on_skip_func(self.ctx, old, new)
return (old, new)
except IndexError:
if self.on_skip_func:
await self.on_skip_func(self.ctx, old)
return old
async def stop(self):
try:
self.music.queue[self.ctx.guild.id] = []
self.voice.stop()
self.music.players.remove(self)
except:
raise NotPlaying("Cannot loop because nothing is being played")
if self.on_stop_func:
await self.on_stop_func(self.ctx)
async def pause(self):
try:
self.voice.pause()
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot pause because nothing is being played")
if self.on_pause_func:
await self.on_pause_func(self.ctx, song)
return song
async def resume(self):
try:
self.voice.resume()
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot resume because nothing is being played")
if self.on_resume_func:
await self.on_resume_func(self.ctx, song)
return song
def current_queue(self):
try:
return self.music.queue[self.ctx.guild.id]
except KeyError:
raise EmptyQueue("Queue is empty")
def now_playing(self):
try:
return self.music.queue[self.ctx.guild.id][0]
except:
return None
async def toggle_song_loop(self):
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
if not song.is_looping:
song.is_looping = True
else:
song.is_looping = False
if self.on_loop_toggle_func:
await self.on_loop_toggle_func(self.ctx, song)
return song
async def change_volume(self, vol):
self.voice.source.volume = vol
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
if self.on_volume_change_func:
await self.on_volume_change_func(self.ctx, song, vol)
return (song, vol)
async def remove_from_queue(self, index):
if index == 0:
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
await self.skip(force=True)
return song
song = self.music.queue[self.ctx.guild.id][index]
self.music.queue[self.ctx.guild.id].pop(index)
if self.on_remove_from_queue_func:
await self.on_remove_from_queue_func(self.ctx, song)
return song
def delete(self):
self.music.players.remove(self)
class Song(object):
def __init__(self, source, url, title, description, views, duration, thumbnail, channel, channel_url, loop):
self.source = source
self.url = url
self.title = title
self.description = description
self.views = views
self.name = title
self.duration = duration
self.thumbnail = thumbnail
self.channel = channel
self.channel_url = channel_url
self.is_looping = loop | DiscordUtils/Music.py | import youtube_dl
import asyncio
import discord
import aiohttp
import re
youtube_dl.utils.bug_reports_message = lambda: ''
ydl = youtube_dl.YoutubeDL({"format": "bestaudio/best", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": True, "logtostderr": False, "quiet": True, "no_warnings": True, "source_address": "0.0.0.0"})
class EmptyQueue(Exception):
"""Cannot skip because queue is empty"""
class NotConnectedToVoice(Exception):
"""Cannot create the player because bot is not connected to voice"""
class NotPlaying(Exception):
"""Cannot <do something> because nothing is being played"""
async def ytbettersearch(query):
url = f"https://www.youtube.com/results?search_query={query}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
html = await resp.text()
index = html.find('watch?v')
url = ""
while True:
char = html[index]
if char == '"':
break
url += char
index += 1
url = f"https://www.youtube.com/{url}"
return url
async def get_video_data(url, search, bettersearch, loop):
if not search and not bettersearch:
data = await loop.run_in_executor(None, lambda: ydl.extract_info(url, download=False))
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
else:
if bettersearch:
url = await ytbettersearch(url)
data = await loop.run_in_executor(None, lambda: ydl.extract_info(url, download=False))
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
elif search:
ytdl = youtube_dl.YoutubeDL({"format": "bestaudio/best", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": True, "logtostderr": False, "quiet": True, "no_warnings": True, "default_search": "auto", "source_address": "0.0.0.0"})
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=False))
try:
data = data["entries"][0]
except KeyError:
pass
del ytdl
source = data["url"]
url = "https://www.youtube.com/watch?v="+data["id"]
title = data["title"]
description = data["description"]
likes = data["like_count"]
dislikes = data["dislike_count"]
views = data["view_count"]
duration = data["duration"]
thumbnail = data["thumbnail"]
channel = data["uploader"]
channel_url = data["uploader_url"]
return Song(source, url, title, description, views, duration, thumbnail, channel, channel_url, False)
def check_queue(ctx, opts, music, after, on_play, loop):
try:
song = music.queue[ctx.guild.id][0]
except IndexError:
return
if not song.is_looping:
try:
music.queue[ctx.guild.id].pop(0)
except IndexError:
return
if len(music.queue[ctx.guild.id]) > 0:
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source, **opts))
ctx.voice_client.play(source, after=lambda error: after(ctx, opts, music, after, on_play, loop))
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
else:
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source, **opts))
ctx.voice_client.play(source, after=lambda error: after(ctx, opts, music, after, on_play, loop))
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
class Music(object):
def __init__(self):
self.queue = {}
self.players = []
def create_player(self, ctx, **kwargs):
if not ctx.voice_client:
raise NotConnectedToVoice("Cannot create the player because bot is not connected to voice")
player = MusicPlayer(ctx, self, **kwargs)
self.players.append(player)
return player
def get_player(self, **kwargs):
guild = kwargs.get("guild_id")
channel = kwargs.get("channel_id")
for player in self.players:
if guild and channel and player.ctx.guild.id == guild and player.voice.channel.id == channel:
return player
elif not guild and channel and player.voice.channel.id == channel:
return player
elif not channel and guild and player.ctx.guild.id == guild:
return player
else:
return None
class MusicPlayer(object):
def __init__(self, ctx, music, **kwargs):
self.ctx = ctx
self.voice = ctx.voice_client
self.loop = ctx.bot.loop
self.music = music
if self.ctx.guild.id not in self.music.queue.keys():
self.music.queue[self.ctx.guild.id] = []
self.after_func = check_queue
self.on_play_func = self.on_queue_func = self.on_skip_func = self.on_stop_func = self.on_pause_func = self.on_resume_func = self.on_loop_toggle_func = self.on_volume_change_func = self.on_remove_from_queue_func = None
ffmpeg_error = kwargs.get("ffmpeg_error_betterfix", kwargs.get("ffmpeg_error_fix"))
if ffmpeg_error and "ffmpeg_error_betterfix" in kwargs.keys():
self.ffmpeg_opts = {"options": "-vn -loglevel quiet -hide_banner -nostats", "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin"}
elif ffmpeg_error:
self.ffmpeg_opts = {"options": "-vn", "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin"}
else:
self.ffmpeg_opts = {"options": "-vn", "before_options": "-nostdin"}
def disable(self):
self.music.players.remove(self)
def on_queue(self, func):
self.on_queue_func = func
def on_play(self, func):
self.on_play_func = func
def on_skip(self, func):
self.on_skip_func = func
def on_stop(self, func):
self.on_stop_func = func
def on_pause(self, func):
self.on_pause_func = func
def on_resume(self, func):
self.on_resume_func = func
def on_loop_toggle(self, func):
self.on_loop_toggle_func = func
def on_volume_change(self, func):
self.on_volume_change_func = func
def on_remove_from_queue(self, func):
self.on_remove_from_queue_func = func
async def queue(self, url, search=False, bettersearch=False):
song = await get_video_data(url, search, bettersearch, self.loop)
self.music.queue[self.ctx.guild.id].append(song)
if self.on_queue_func:
await self.on_queue_func(self.ctx, song)
return song
async def play(self):
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(self.music.queue[self.ctx.guild.id][0].source, **self.ffmpeg_opts))
self.voice.play(source, after=lambda error: self.after_func(self.ctx, self.ffmpeg_opts, self.music, self.after_func, self.on_play_func, self.loop))
song = self.music.queue[self.ctx.guild.id][0]
if self.on_play_func:
await self.on_play_func(self.ctx, song)
return song
async def skip(self, force=False):
if len(self.music.queue[self.ctx.guild.id]) == 0:
raise NotPlaying("Cannot loop because nothing is being played")
elif not len(self.music.queue[self.ctx.guild.id]) > 1 and not force:
raise EmptyQueue("Cannot skip because queue is empty")
else:
old = self.music.queue[self.ctx.guild.id][0]
old.is_looping = False if old.is_looping else False
self.voice.stop()
try:
new = self.music.queue[self.ctx.guild.id][0]
if self.on_skip_func:
await self.on_skip_func(self.ctx, old, new)
return (old, new)
except IndexError:
if self.on_skip_func:
await self.on_skip_func(self.ctx, old)
return old
async def stop(self):
try:
self.music.queue[self.ctx.guild.id] = []
self.voice.stop()
self.music.players.remove(self)
except:
raise NotPlaying("Cannot loop because nothing is being played")
if self.on_stop_func:
await self.on_stop_func(self.ctx)
async def pause(self):
try:
self.voice.pause()
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot pause because nothing is being played")
if self.on_pause_func:
await self.on_pause_func(self.ctx, song)
return song
async def resume(self):
try:
self.voice.resume()
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot resume because nothing is being played")
if self.on_resume_func:
await self.on_resume_func(self.ctx, song)
return song
def current_queue(self):
try:
return self.music.queue[self.ctx.guild.id]
except KeyError:
raise EmptyQueue("Queue is empty")
def now_playing(self):
try:
return self.music.queue[self.ctx.guild.id][0]
except:
return None
async def toggle_song_loop(self):
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
if not song.is_looping:
song.is_looping = True
else:
song.is_looping = False
if self.on_loop_toggle_func:
await self.on_loop_toggle_func(self.ctx, song)
return song
async def change_volume(self, vol):
self.voice.source.volume = vol
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
if self.on_volume_change_func:
await self.on_volume_change_func(self.ctx, song, vol)
return (song, vol)
async def remove_from_queue(self, index):
if index == 0:
try:
song = self.music.queue[self.ctx.guild.id][0]
except:
raise NotPlaying("Cannot loop because nothing is being played")
await self.skip(force=True)
return song
song = self.music.queue[self.ctx.guild.id][index]
self.music.queue[self.ctx.guild.id].pop(index)
if self.on_remove_from_queue_func:
await self.on_remove_from_queue_func(self.ctx, song)
return song
def delete(self):
self.music.players.remove(self)
class Song(object):
def __init__(self, source, url, title, description, views, duration, thumbnail, channel, channel_url, loop):
self.source = source
self.url = url
self.title = title
self.description = description
self.views = views
self.name = title
self.duration = duration
self.thumbnail = thumbnail
self.channel = channel
self.channel_url = channel_url
self.is_looping = loop | 0.389547 | 0.198899 |
__docformat__ = """epytext"""
__authors__ = """<NAME>"""
__copyright__ = """Copyright (C) 2011-2015 <NAME>"""
import math
import logging
import wx
from .spControl import spControl
from .channelctrl import ChannelCtrl, WavePreferences
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
SELECTION_BRUSH_COLOUR = wx.Colour(180,200,230,128)
SELECTION_PEN_COLOUR = wx.Colour(30,70,110)
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Class SelectArea
# ---------------------------------------------------------------------------
class SelectArea:
"""
@author: <NAME>
@contact: <EMAIL>
@license: GPL
@summary: This class is used to select an area on a displayed speech file.
SelectArea holds information about a selected part in WaveCtrl.
Start value and End value are related to the user selection
(this means that start can be greater than end).
"""
def __init__(self):
"""Constructor."""
# Wave... is focused?
self._isselected = False
# mouse selection start point
self._pos1 = 0
# mouse selection end point
self._pos2 = 0
# End __init__
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Getters
# -----------------------------------------------------------------------
def IsSelected(self):
return self._isselected
def IsEmpty(self):
return (self._pos1 == 0 and self._pos2 == 0)
# -----------------------------------------------------------------------
# Setters
# -----------------------------------------------------------------------
def Select(self, value):
self._isselected = value
def SetArea(self, xstart, xend):
self._pos1 = xstart
self._pos2 = xend
def SetStart(self, xstart):
self._pos1 = xstart
def SetEnd(self, xend):
self._pos2 = xend
def SetEmpty(self):
self._pos1 = 0
self._pos2 = 0
# -----------------------------------------------------------------------
# Getters
# -----------------------------------------------------------------------
def GetStart(self):
return self._pos1
def GetEnd(self):
return self._pos2
# -----------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Class WaveCtrl
# ---------------------------------------------------------------------------
#TODO: Rename to AudioCtrl, thanks to Nicolas!
class WaveCtrl( spControl ):
"""
WaveCtrl implements an audio window that can be placed anywhere
to any wxPython widget.
@author: <NAME>
@contact: brigitte.bigi((AATT))lpl-aix.fr
@license: GPL
@summary: This class is used to display an audio speech file.
"""
def __init__(self, parent, id=-1,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
audio=None):
"""
Constructor.
Non-wxPython related parameter:
- audio (sppasAudioPCM): the audio instance.
"""
spControl.__init__(self, parent, id, pos, size)
# Preferences
self._preferences = WavePreferences()
# Disable Pane (because each channel has its own pane)
self._infopanep = wx.ALIGN_CENTRE
# Members
self._selectarea = SelectArea()
self._m_dragging = False
# Wave
self.__set( audio )
# Handling mouse moving.
wx.EVT_MOUSE_EVENTS(self, self.OnMouseEvents)
# End __init__
#------------------------------------------------------------------------
def __set(self, audio):
self._audio = audio
self._channels = []
if audio is None:
return
# find the maximum amplitude value
datamax = int(math.pow(2, (8*self._audio.get_sampwidth())) / 2) - 1
# estimate the exact height of each channel
(x,y) = (0,0)
(w,h) = self.GetSize()
cheight = self._getChannelHeight(h)
for i in range( self._audio.get_nchannels() ):
pos = wx.Point( x,y )
size = wx.Size( w,cheight )
c = ChannelCtrl(self, -1, pos, size, datamax)
self._channels.append( c )
y = y + cheight
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Members: Getters and Setters
#------------------------------------------------------------------------
def SetDataMax(self, value):
"""
Set a new data max value, used for the vertical scroll.
@param value (int)
"""
for i in range( self._audio.get_nchannels() ):
self._channels.SetDataMax( value )
# End SetDataMax
#------------------------------------------------------------------------
# TODO: Rename as SetAudio
def SetWave(self, audio):
"""
Set a new Wave.
"""
self.__set(audio)
# End SetWave
#------------------------------------------------------------------------
def get_duration(self):
"""
Return the duration of the Wave, in seconds, or 0 if no Wave.
"""
if self._audio is None:
return 0.0
return float(self._audio.get_nframes())/float(self._audio.get_framerate())
# End get_duration
#------------------------------------------------------------------------
def GetBegin(self):
"""Override."""
return 0.0
# End GetBegin
#------------------------------------------------------------------------
def GetEnd(self):
"""Override."""
return self.get_duration()
# End GetEnd
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Preferences
#------------------------------------------------------------------------
def SetAutoAdjust(self, value):
"""
Set auto-ajustment vertical scroll.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetAutomaticScroll( value ):
self._preferences.SetAutomaticScroll( value )
self.RequestRedraw()
# End SetAutoAdjust
#------------------------------------------------------------------------
def SetAutoColor(self, value):
"""
Activate/Disable the random foreground color, used to draw the amplitude lines.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetRandomForeground( value ):
self._preferences.SetRandomForeground( value )
self.RequestRedraw()
# End SetAutoColor
#------------------------------------------------------------------------
def SetGradientBackground(self, value):
"""
Activate/Disable gradient background.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetGradientBackground( value ):
self._preferences.SetGradientBackground( value )
self.RequestRedraw()
# End SetBackgroundGradient
#------------------------------------------------------------------------
def SetBackgroundGradientColour(self, color):
"""
Set the background gradient color (used if gradient-background is turned-on).
"""
for c in self._channels:
c.SetBackgroundGradientColour( color )
# End SetBackgroundGradientColour
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# WaveCtrl look
#------------------------------------------------------------------------
def SetBackgroundColour(self, colour):
"""
Override. Sets the WaveCtrl background color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetBackgroundColour( self,colour )
for c in self._channels:
c.SetBackgroundColour( colour )
# End SetBackgroundColour
#-------------------------------------------------------------------------
def SetForegroundColour(self, colour):
"""
Sets the WaveCtrl foreground color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetForegroundColour( self,colour )
for c in self._channels:
c.SetForegroundColour( colour )
# End SetForegroundColour
#-------------------------------------------------------------------------
def SetHandlesColour(self, colour):
"""
Sets the WaveCtrl handles color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetHandlesColour( self,colour )
for c in self._channels:
c.SetHandlesColour( colour )
# End SetHandlesColour
#-------------------------------------------------------------------------
def SetTextColour(self, colour):
"""
Sets the WaveCtrl text color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetTextColour( self,colour )
for c in self._channels:
c.SetTextColour( colour )
# End SetTextColour
#-------------------------------------------------------------------------
def SetFont(self, font):
"""
Sets the WaveCtrl text font.
Ask to redraw only if color has changed.
@param font (wx.Font)
"""
spControl.SetFont( self,font )
for c in self._channels:
c.SetFont( font )
# End SetFont
#-------------------------------------------------------------------------
#------------------------------------------------------------------------
# WaveCtrl display
#------------------------------------------------------------------------
def SetPanePosition(self, value):
"""
Override. Fix the position of the information pane for channels.
@param value is one of wx.ALIGN_LEFT, wx.ALIGN_CENTRE or wx.ALIGN_RIGHT.
"""
for tdc in self._channels:
tdc.SetPanePosition( value )
self.RequestRedraw()
# End SetPanePosition
#-------------------------------------------------------------------------
def SetPaneWidth(self, value):
"""
Override. Fix the width of the information pane.
@param value (int) is between 10 and 200.
"""
spControl.SetPaneWidth(self, value)
for tdc in self._channels:
tdc.SetPaneWidth( value )
# End SetPaneWidth
#-------------------------------------------------------------------------
# -----------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------
def OnMouseEvents(self, event):
"""Handles the wx.EVT_MOUSE_EVENTS event for WaveCtrl."""
if event.Moving():
wx.PostEvent(self.GetParent().GetEventHandler(), event)
elif event.LeftDown():
self.OnMouseLeftDown(event)
elif event.LeftUp():
spControl.OnMouseLeftUp(self,event)
self.OnMouseLeftUp(event)
elif event.Dragging():
# moving while a button is pressed
self.OnMouseDragging(event)
event.Skip()
# End OnMouseEvents
#-------------------------------------------------------------------------
def OnMouseLeftDown(self, event):
"""
Respond to mouse events.
"""
self._m_dragging = False
self.ResetMouseSelection()
if self._selectarea.IsSelected() is True:
# Left mouse button down, change cursor to
# something else to denote event capture
sx,sy = event.GetPosition()
self._selectarea.SetStart(sx)
self._selectarea.SetEnd(sx)
cur = wx.StockCursor(wx.CURSOR_SIZING)
self.SetCursor(cur)
# invalidate current canvas
self.RequestRedraw()
# cache current position
self._m_dragging = True
# End onMouseLeftDown
#-------------------------------------------------------------------------
def OnMouseLeftUp(self,event):
"""
Override. Respond to mouse events.
"""
spControl.OnMouseLeftUp(self, event)
if self._m_dragging is True:
self._m_dragging = False
sx,sy = event.GetPosition()
self._selectarea.SetEnd(sx)
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# send such info to the parent!
wx.PostEvent(self.GetParent().GetEventHandler(), wx.PyCommandEvent(wx.EVT_TOOL_RANGE.typeId, self.GetId()))
self.RequestRedraw()
# End onMouseLeftUp
#-------------------------------------------------------------------------
def OnMouseDragging(self, event):
"""
Respond to mouse events.
"""
if self._m_dragging is True:
# Draw a selected area
sx,sy = event.GetPosition()
self._selectarea.SetEnd(sx)
# Show changes
self.RequestRedraw()
# End onMouseDragging
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Selection area
#-------------------------------------------------------------------------
def GetCurrentMouseSelection(self):
"""
Return the current selected area,
as a tuple (left-x, right-x).
"""
if self._selectarea.IsEmpty() is True:
return (0,0)
# user dragged mouse to right
if self._selectarea.GetEnd() >= self._selectarea.GetStart():
return self._selectarea.GetStart() , self._selectarea.GetEnd()
# user dragged mouse to left
return self._selectarea.GetEnd() , self._selectarea.GetStart()
# GetCurrentMouseSelection
#-------------------------------------------------------------------------
def ResetMouseSelection(self):
"""Resets the mouse selection."""
self._selectarea.SetEmpty()
# ResetMouseSelection
#-------------------------------------------------------------------------
def SetMouseSelection(self, point1, point2):
"""
Sets the mouse selection to a specific position.
Only point.x will be used.
"""
self._selectarea.GetStart(point1)
self._selectarea.SetEnd(point2)
# SetMouseSelection
#-------------------------------------------------------------------------
#------------------------------------------------------------------------
# Drawing the amplitude
#------------------------------------------------------------------------
def DrawPane(self, dc, x,y, w,h):
"""Do not draw anything (each channel draw its own pane)."""
return
# End DrawPane
# -----------------------------------------------------------------------
def DrawContent(self, dc, x,y, w,h):
"""
Draw each channel of the wav on the DC, in the range of the given time period.
"""
logging.debug(' ******* audiocontrol.drawcontent. x=%d,y=%d,w=%d,h=%d'%(x,y,w,h))
if self._audio is None:
return
# the period is not covering this audio file: do not draw anything
if self._mintime > self.GetEnd():
return
# Normal case
self._drawChannels(dc,x,y,w,h)
# End DrawContent
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
# Private...
#------------------------------------------------------------------------
def _getChannelHeight(self, h):
seph = 0 #self._sep.GetPenWidth()
septh = seph * (self._audio.get_nchannels()-1)
return int( (h-septh) / self._audio.get_nchannels() )
def _drawChannels(self, dc, x, y, w, h):
"""
Draw the audio on the DC, in the range of the given time period.
"""
if self._audio is None or self._mintime > self.GetEnd():
return
# estimate the exact height of each channel
cheight = self._getChannelHeight(h)
# set current prefs and look to each channel
for c in self._channels:
c.SetPreferences( self._preferences )
# draw the selected area
(l,r) = self.GetCurrentMouseSelection()
if (r-l)>2:
self._drawMouseSelection(dc)
# draw each channel amplitude
# Position in the audio
pos = int(self._mintime * self._audio.get_framerate())
# Read samples
duration = self._maxtime - self._mintime
self._audio.seek( pos )
nframes = int( duration * self._audio.get_framerate() )
data = self._audio.read_samples( nframes )
# draw each channel
for i,c in enumerate(self._channels):
ww = w
# the period is overlaping this channel: draw partly
if self._mintime < self.GetEnd() and self._maxtime > self.GetEnd():
## reduce w (to cover until the end of the tier)
missing = self._maxtime - self.GetEnd()
real_w = w - c.GetPaneWidth()
ww = w - int ((missing * float(real_w) ) / (self._maxtime-self._mintime))
c.MoveWindow( wx.Point(x,y), wx.Size(ww,cheight) )
c.SetData(data[i], self._mintime, self._maxtime)
y = y + cheight
if y != h:
self.SetSize(wx.Size(w,y))
# End _drawWave
#------------------------------------------------------------------------
def _drawMouseSelection(self, dc):
"""
Draw mouse selection area for this WaveCtrl.
"""
dc.SetBrush(wx.Brush(SELECTION_BRUSH_COLOUR, wx.SOLID))
dc.SetPen(wx.Pen(SELECTION_PEN_COLOUR, 1, wx.SOLID))
(x,y) = self.GetDrawingPosition()
(w,h) = self.GetSize()
(l,r) = self.GetCurrentMouseSelection()
x = x+l
w = r-l
dc.DrawRectangle(x, y, w, h)
# End drawMouseSelection
# -----------------------------------------------------------------------
#---------------------------------------------------------------------------- | sppas/sppas/src/ui/wxgui/ui/wavectrl.py |
__docformat__ = """epytext"""
__authors__ = """<NAME>"""
__copyright__ = """Copyright (C) 2011-2015 <NAME>"""
import math
import logging
import wx
from .spControl import spControl
from .channelctrl import ChannelCtrl, WavePreferences
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
SELECTION_BRUSH_COLOUR = wx.Colour(180,200,230,128)
SELECTION_PEN_COLOUR = wx.Colour(30,70,110)
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Class SelectArea
# ---------------------------------------------------------------------------
class SelectArea:
"""
@author: <NAME>
@contact: <EMAIL>
@license: GPL
@summary: This class is used to select an area on a displayed speech file.
SelectArea holds information about a selected part in WaveCtrl.
Start value and End value are related to the user selection
(this means that start can be greater than end).
"""
def __init__(self):
"""Constructor."""
# Wave... is focused?
self._isselected = False
# mouse selection start point
self._pos1 = 0
# mouse selection end point
self._pos2 = 0
# End __init__
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Getters
# -----------------------------------------------------------------------
def IsSelected(self):
return self._isselected
def IsEmpty(self):
return (self._pos1 == 0 and self._pos2 == 0)
# -----------------------------------------------------------------------
# Setters
# -----------------------------------------------------------------------
def Select(self, value):
self._isselected = value
def SetArea(self, xstart, xend):
self._pos1 = xstart
self._pos2 = xend
def SetStart(self, xstart):
self._pos1 = xstart
def SetEnd(self, xend):
self._pos2 = xend
def SetEmpty(self):
self._pos1 = 0
self._pos2 = 0
# -----------------------------------------------------------------------
# Getters
# -----------------------------------------------------------------------
def GetStart(self):
return self._pos1
def GetEnd(self):
return self._pos2
# -----------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Class WaveCtrl
# ---------------------------------------------------------------------------
#TODO: Rename to AudioCtrl, thanks to Nicolas!
class WaveCtrl( spControl ):
"""
WaveCtrl implements an audio window that can be placed anywhere
to any wxPython widget.
@author: <NAME>
@contact: brigitte.bigi((AATT))lpl-aix.fr
@license: GPL
@summary: This class is used to display an audio speech file.
"""
def __init__(self, parent, id=-1,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
audio=None):
"""
Constructor.
Non-wxPython related parameter:
- audio (sppasAudioPCM): the audio instance.
"""
spControl.__init__(self, parent, id, pos, size)
# Preferences
self._preferences = WavePreferences()
# Disable Pane (because each channel has its own pane)
self._infopanep = wx.ALIGN_CENTRE
# Members
self._selectarea = SelectArea()
self._m_dragging = False
# Wave
self.__set( audio )
# Handling mouse moving.
wx.EVT_MOUSE_EVENTS(self, self.OnMouseEvents)
# End __init__
#------------------------------------------------------------------------
def __set(self, audio):
self._audio = audio
self._channels = []
if audio is None:
return
# find the maximum amplitude value
datamax = int(math.pow(2, (8*self._audio.get_sampwidth())) / 2) - 1
# estimate the exact height of each channel
(x,y) = (0,0)
(w,h) = self.GetSize()
cheight = self._getChannelHeight(h)
for i in range( self._audio.get_nchannels() ):
pos = wx.Point( x,y )
size = wx.Size( w,cheight )
c = ChannelCtrl(self, -1, pos, size, datamax)
self._channels.append( c )
y = y + cheight
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Members: Getters and Setters
#------------------------------------------------------------------------
def SetDataMax(self, value):
"""
Set a new data max value, used for the vertical scroll.
@param value (int)
"""
for i in range( self._audio.get_nchannels() ):
self._channels.SetDataMax( value )
# End SetDataMax
#------------------------------------------------------------------------
# TODO: Rename as SetAudio
def SetWave(self, audio):
"""
Set a new Wave.
"""
self.__set(audio)
# End SetWave
#------------------------------------------------------------------------
def get_duration(self):
"""
Return the duration of the Wave, in seconds, or 0 if no Wave.
"""
if self._audio is None:
return 0.0
return float(self._audio.get_nframes())/float(self._audio.get_framerate())
# End get_duration
#------------------------------------------------------------------------
def GetBegin(self):
"""Override."""
return 0.0
# End GetBegin
#------------------------------------------------------------------------
def GetEnd(self):
"""Override."""
return self.get_duration()
# End GetEnd
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Preferences
#------------------------------------------------------------------------
def SetAutoAdjust(self, value):
"""
Set auto-ajustment vertical scroll.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetAutomaticScroll( value ):
self._preferences.SetAutomaticScroll( value )
self.RequestRedraw()
# End SetAutoAdjust
#------------------------------------------------------------------------
def SetAutoColor(self, value):
"""
Activate/Disable the random foreground color, used to draw the amplitude lines.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetRandomForeground( value ):
self._preferences.SetRandomForeground( value )
self.RequestRedraw()
# End SetAutoColor
#------------------------------------------------------------------------
def SetGradientBackground(self, value):
"""
Activate/Disable gradient background.
@param value (Boolean) is True to active, False to disable.
"""
if value != self._preferences.SetGradientBackground( value ):
self._preferences.SetGradientBackground( value )
self.RequestRedraw()
# End SetBackgroundGradient
#------------------------------------------------------------------------
def SetBackgroundGradientColour(self, color):
"""
Set the background gradient color (used if gradient-background is turned-on).
"""
for c in self._channels:
c.SetBackgroundGradientColour( color )
# End SetBackgroundGradientColour
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# WaveCtrl look
#------------------------------------------------------------------------
def SetBackgroundColour(self, colour):
"""
Override. Sets the WaveCtrl background color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetBackgroundColour( self,colour )
for c in self._channels:
c.SetBackgroundColour( colour )
# End SetBackgroundColour
#-------------------------------------------------------------------------
def SetForegroundColour(self, colour):
"""
Sets the WaveCtrl foreground color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetForegroundColour( self,colour )
for c in self._channels:
c.SetForegroundColour( colour )
# End SetForegroundColour
#-------------------------------------------------------------------------
def SetHandlesColour(self, colour):
"""
Sets the WaveCtrl handles color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetHandlesColour( self,colour )
for c in self._channels:
c.SetHandlesColour( colour )
# End SetHandlesColour
#-------------------------------------------------------------------------
def SetTextColour(self, colour):
"""
Sets the WaveCtrl text color.
Ask to redraw only if color has changed.
@param colour (wx.Colour)
"""
spControl.SetTextColour( self,colour )
for c in self._channels:
c.SetTextColour( colour )
# End SetTextColour
#-------------------------------------------------------------------------
def SetFont(self, font):
"""
Sets the WaveCtrl text font.
Ask to redraw only if color has changed.
@param font (wx.Font)
"""
spControl.SetFont( self,font )
for c in self._channels:
c.SetFont( font )
# End SetFont
#-------------------------------------------------------------------------
#------------------------------------------------------------------------
# WaveCtrl display
#------------------------------------------------------------------------
def SetPanePosition(self, value):
"""
Override. Fix the position of the information pane for channels.
@param value is one of wx.ALIGN_LEFT, wx.ALIGN_CENTRE or wx.ALIGN_RIGHT.
"""
for tdc in self._channels:
tdc.SetPanePosition( value )
self.RequestRedraw()
# End SetPanePosition
#-------------------------------------------------------------------------
def SetPaneWidth(self, value):
"""
Override. Fix the width of the information pane.
@param value (int) is between 10 and 200.
"""
spControl.SetPaneWidth(self, value)
for tdc in self._channels:
tdc.SetPaneWidth( value )
# End SetPaneWidth
#-------------------------------------------------------------------------
# -----------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------
def OnMouseEvents(self, event):
"""Handles the wx.EVT_MOUSE_EVENTS event for WaveCtrl."""
if event.Moving():
wx.PostEvent(self.GetParent().GetEventHandler(), event)
elif event.LeftDown():
self.OnMouseLeftDown(event)
elif event.LeftUp():
spControl.OnMouseLeftUp(self,event)
self.OnMouseLeftUp(event)
elif event.Dragging():
# moving while a button is pressed
self.OnMouseDragging(event)
event.Skip()
# End OnMouseEvents
#-------------------------------------------------------------------------
def OnMouseLeftDown(self, event):
"""
Respond to mouse events.
"""
self._m_dragging = False
self.ResetMouseSelection()
if self._selectarea.IsSelected() is True:
# Left mouse button down, change cursor to
# something else to denote event capture
sx,sy = event.GetPosition()
self._selectarea.SetStart(sx)
self._selectarea.SetEnd(sx)
cur = wx.StockCursor(wx.CURSOR_SIZING)
self.SetCursor(cur)
# invalidate current canvas
self.RequestRedraw()
# cache current position
self._m_dragging = True
# End onMouseLeftDown
#-------------------------------------------------------------------------
def OnMouseLeftUp(self,event):
"""
Override. Respond to mouse events.
"""
spControl.OnMouseLeftUp(self, event)
if self._m_dragging is True:
self._m_dragging = False
sx,sy = event.GetPosition()
self._selectarea.SetEnd(sx)
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# send such info to the parent!
wx.PostEvent(self.GetParent().GetEventHandler(), wx.PyCommandEvent(wx.EVT_TOOL_RANGE.typeId, self.GetId()))
self.RequestRedraw()
# End onMouseLeftUp
#-------------------------------------------------------------------------
def OnMouseDragging(self, event):
"""
Respond to mouse events.
"""
if self._m_dragging is True:
# Draw a selected area
sx,sy = event.GetPosition()
self._selectarea.SetEnd(sx)
# Show changes
self.RequestRedraw()
# End onMouseDragging
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Selection area
#-------------------------------------------------------------------------
def GetCurrentMouseSelection(self):
"""
Return the current selected area,
as a tuple (left-x, right-x).
"""
if self._selectarea.IsEmpty() is True:
return (0,0)
# user dragged mouse to right
if self._selectarea.GetEnd() >= self._selectarea.GetStart():
return self._selectarea.GetStart() , self._selectarea.GetEnd()
# user dragged mouse to left
return self._selectarea.GetEnd() , self._selectarea.GetStart()
# GetCurrentMouseSelection
#-------------------------------------------------------------------------
def ResetMouseSelection(self):
"""Resets the mouse selection."""
self._selectarea.SetEmpty()
# ResetMouseSelection
#-------------------------------------------------------------------------
def SetMouseSelection(self, point1, point2):
"""
Sets the mouse selection to a specific position.
Only point.x will be used.
"""
self._selectarea.GetStart(point1)
self._selectarea.SetEnd(point2)
# SetMouseSelection
#-------------------------------------------------------------------------
#------------------------------------------------------------------------
# Drawing the amplitude
#------------------------------------------------------------------------
def DrawPane(self, dc, x,y, w,h):
"""Do not draw anything (each channel draw its own pane)."""
return
# End DrawPane
# -----------------------------------------------------------------------
def DrawContent(self, dc, x,y, w,h):
"""
Draw each channel of the wav on the DC, in the range of the given time period.
"""
logging.debug(' ******* audiocontrol.drawcontent. x=%d,y=%d,w=%d,h=%d'%(x,y,w,h))
if self._audio is None:
return
# the period is not covering this audio file: do not draw anything
if self._mintime > self.GetEnd():
return
# Normal case
self._drawChannels(dc,x,y,w,h)
# End DrawContent
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
# Private...
#------------------------------------------------------------------------
def _getChannelHeight(self, h):
seph = 0 #self._sep.GetPenWidth()
septh = seph * (self._audio.get_nchannels()-1)
return int( (h-septh) / self._audio.get_nchannels() )
def _drawChannels(self, dc, x, y, w, h):
"""
Draw the audio on the DC, in the range of the given time period.
"""
if self._audio is None or self._mintime > self.GetEnd():
return
# estimate the exact height of each channel
cheight = self._getChannelHeight(h)
# set current prefs and look to each channel
for c in self._channels:
c.SetPreferences( self._preferences )
# draw the selected area
(l,r) = self.GetCurrentMouseSelection()
if (r-l)>2:
self._drawMouseSelection(dc)
# draw each channel amplitude
# Position in the audio
pos = int(self._mintime * self._audio.get_framerate())
# Read samples
duration = self._maxtime - self._mintime
self._audio.seek( pos )
nframes = int( duration * self._audio.get_framerate() )
data = self._audio.read_samples( nframes )
# draw each channel
for i,c in enumerate(self._channels):
ww = w
# the period is overlaping this channel: draw partly
if self._mintime < self.GetEnd() and self._maxtime > self.GetEnd():
## reduce w (to cover until the end of the tier)
missing = self._maxtime - self.GetEnd()
real_w = w - c.GetPaneWidth()
ww = w - int ((missing * float(real_w) ) / (self._maxtime-self._mintime))
c.MoveWindow( wx.Point(x,y), wx.Size(ww,cheight) )
c.SetData(data[i], self._mintime, self._maxtime)
y = y + cheight
if y != h:
self.SetSize(wx.Size(w,y))
# End _drawWave
#------------------------------------------------------------------------
def _drawMouseSelection(self, dc):
"""
Draw mouse selection area for this WaveCtrl.
"""
dc.SetBrush(wx.Brush(SELECTION_BRUSH_COLOUR, wx.SOLID))
dc.SetPen(wx.Pen(SELECTION_PEN_COLOUR, 1, wx.SOLID))
(x,y) = self.GetDrawingPosition()
(w,h) = self.GetSize()
(l,r) = self.GetCurrentMouseSelection()
x = x+l
w = r-l
dc.DrawRectangle(x, y, w, h)
# End drawMouseSelection
# -----------------------------------------------------------------------
#---------------------------------------------------------------------------- | 0.52342 | 0.196209 |
import inspect
import sys
import traceback
from collections import namedtuple
from queue import Queue
from threading import Lock, Thread
import stopit
from pypeln import utils as pypeln_utils
from . import utils
class Stage(pypeln_utils.BaseStage):
def __init__(self, f, on_start, on_done, dependencies, timeout):
self.f = f
self.on_start = on_start
self.on_done = on_done
self.timeout = timeout
self.dependencies = dependencies
self.f_args = pypeln_utils.function_args(self.f) if self.f else set()
self.on_start_args = (
pypeln_utils.function_args(self.on_start) if self.on_start else set()
)
self.on_done_args = (
pypeln_utils.function_args(self.on_done) if self.on_done else set()
)
def iter_dependencies(self):
iterators = [
iter(dependency.to_iterable(maxsize=0, return_index=True))
for dependency in self.dependencies
]
while len(iterators) > 0:
for iterator in tuple(iterators):
try:
yield next(iterator)
except StopIteration:
iterators.remove(iterator)
def process(self, **kwargs) -> None:
for x in self.iter_dependencies():
with (
stopit.ThreadingTimeout(self.timeout)
if self.timeout
else utils.NoOpContext()
):
yield from self.apply(x, **kwargs)
def run(self):
worker_info = pypeln_utils.WorkerInfo(index=0)
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in self.on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
yield from self.process(
**{key: value for key, value in kwargs.items() if key in self.f_args}
)
if self.on_done is not None:
kwargs.setdefault(
"stage_status", utils.StageStatus(),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in self.on_done_args
}
)
def __iter__(self):
return self.to_iterable(maxsize=0, return_index=False)
def to_iterable(self, maxsize, return_index):
for elem in self.run():
if return_index:
yield elem
else:
yield elem.value | pypeln/sync/stage.py | import inspect
import sys
import traceback
from collections import namedtuple
from queue import Queue
from threading import Lock, Thread
import stopit
from pypeln import utils as pypeln_utils
from . import utils
class Stage(pypeln_utils.BaseStage):
def __init__(self, f, on_start, on_done, dependencies, timeout):
self.f = f
self.on_start = on_start
self.on_done = on_done
self.timeout = timeout
self.dependencies = dependencies
self.f_args = pypeln_utils.function_args(self.f) if self.f else set()
self.on_start_args = (
pypeln_utils.function_args(self.on_start) if self.on_start else set()
)
self.on_done_args = (
pypeln_utils.function_args(self.on_done) if self.on_done else set()
)
def iter_dependencies(self):
iterators = [
iter(dependency.to_iterable(maxsize=0, return_index=True))
for dependency in self.dependencies
]
while len(iterators) > 0:
for iterator in tuple(iterators):
try:
yield next(iterator)
except StopIteration:
iterators.remove(iterator)
def process(self, **kwargs) -> None:
for x in self.iter_dependencies():
with (
stopit.ThreadingTimeout(self.timeout)
if self.timeout
else utils.NoOpContext()
):
yield from self.apply(x, **kwargs)
def run(self):
worker_info = pypeln_utils.WorkerInfo(index=0)
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in self.on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
yield from self.process(
**{key: value for key, value in kwargs.items() if key in self.f_args}
)
if self.on_done is not None:
kwargs.setdefault(
"stage_status", utils.StageStatus(),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in self.on_done_args
}
)
def __iter__(self):
return self.to_iterable(maxsize=0, return_index=False)
def to_iterable(self, maxsize, return_index):
for elem in self.run():
if return_index:
yield elem
else:
yield elem.value | 0.247351 | 0.084078 |
from time import sleep
class Text:
def animated_text(self,text):
for i in text:
print(i,end="",flush=True)
sleep(0.050)
print()
def logintext(self):
text = """
Welcome to EasytoMan , abbr. of Easy to Manipulate, this tool helps in creating & hiding security(encrypted) passwords.
Please choose what you want to do , and pass next step.
1- Login as [S]uper[U]ser
2- Help
3- Exit
If you want more description about EasytoMan , choose "help"
"""
self.animated_text(text)
def firsttext(self):
text = """
Welcome to EasytoMan , abbr. of Easy to Manipulate, this tool helps in creating & hiding security(encrypted) passwords.
Apparently, the first time in this program.You must be change new password to Super User.
If you want more description about EasytoMan , choose "help"
"""
self.animated_text(text)
def helptext(self):
text = """
EasytoMan , created in 2016, September by ExpectoTR.This tool's main mission is create more powerful and hacked to almost impossible passwords and safety hiding it.
Briefly, this tool takes a word and a key from you to which platform(twitter, mega, reddit, wifi etc.) want you to do encryption.The word and key must be MEMORABLE.
This tool encrypts according to a specific algorithm with key and word.Then save the encrypted cipher in "hashes.txt"
|> What is [S]uper[U]ser ?
SuperUser is most authorized user in tool, like root in Linux systems.
If you choose SuperUser (can choose 1 instead "su"), it will ask you SuperUser password.When enter the correct password, you log in as root user.So, this tool not be asked you key and word.
Just give platform name and it gives your platform password.Also, SuperUser can change the authority on the platform passwords.(access,change etc.)
|> Why use Easytoman ?
Normal passwords occurs just one text(including alphanumeric etc.).. For example = ExpectoTR2016
When the malicious people are want your account, it will be easy to break your password.
But if you encrypt with Easytoman, you will enter two values(word and key) and Easytoman encrypt this values with special encryption algorithm.
So, break your password not although impossible, more than enough difficult...
"""
self.animated_text(text)
def process(self):
while True:
text = """
Choose what you want to do ?
1- Get password of Platform
2- Create new Platform
"""
self.animated_text(text)
option = input(self.animated_text("Choose one (1 or 2) "))
if option == "1":
return 1
elif option == "2":
return 0
else:
print("Wrong Value! ") | texts.py |
from time import sleep
class Text:
def animated_text(self,text):
for i in text:
print(i,end="",flush=True)
sleep(0.050)
print()
def logintext(self):
text = """
Welcome to EasytoMan , abbr. of Easy to Manipulate, this tool helps in creating & hiding security(encrypted) passwords.
Please choose what you want to do , and pass next step.
1- Login as [S]uper[U]ser
2- Help
3- Exit
If you want more description about EasytoMan , choose "help"
"""
self.animated_text(text)
def firsttext(self):
text = """
Welcome to EasytoMan , abbr. of Easy to Manipulate, this tool helps in creating & hiding security(encrypted) passwords.
Apparently, the first time in this program.You must be change new password to Super User.
If you want more description about EasytoMan , choose "help"
"""
self.animated_text(text)
def helptext(self):
text = """
EasytoMan , created in 2016, September by ExpectoTR.This tool's main mission is create more powerful and hacked to almost impossible passwords and safety hiding it.
Briefly, this tool takes a word and a key from you to which platform(twitter, mega, reddit, wifi etc.) want you to do encryption.The word and key must be MEMORABLE.
This tool encrypts according to a specific algorithm with key and word.Then save the encrypted cipher in "hashes.txt"
|> What is [S]uper[U]ser ?
SuperUser is most authorized user in tool, like root in Linux systems.
If you choose SuperUser (can choose 1 instead "su"), it will ask you SuperUser password.When enter the correct password, you log in as root user.So, this tool not be asked you key and word.
Just give platform name and it gives your platform password.Also, SuperUser can change the authority on the platform passwords.(access,change etc.)
|> Why use Easytoman ?
Normal passwords occurs just one text(including alphanumeric etc.).. For example = ExpectoTR2016
When the malicious people are want your account, it will be easy to break your password.
But if you encrypt with Easytoman, you will enter two values(word and key) and Easytoman encrypt this values with special encryption algorithm.
So, break your password not although impossible, more than enough difficult...
"""
self.animated_text(text)
def process(self):
while True:
text = """
Choose what you want to do ?
1- Get password of Platform
2- Create new Platform
"""
self.animated_text(text)
option = input(self.animated_text("Choose one (1 or 2) "))
if option == "1":
return 1
elif option == "2":
return 0
else:
print("Wrong Value! ") | 0.258794 | 0.292557 |
from cStringIO import StringIO
import json
import unittest
from zipfile import ZipFile
from compiled_file_system import CompiledFileSystem
from content_provider import ContentProvider
from file_system import FileNotFoundError
from object_store_creator import ObjectStoreCreator
from path_canonicalizer import PathCanonicalizer
from test_file_system import TestFileSystem
from third_party.handlebar import Handlebar
_REDIRECTS_JSON = json.dumps({
'oldfile.html': 'storage.html',
'index.html': 'https://developers.google.com/chrome',
})
_MARKDOWN_CONTENT = (
('# Header 1 #', u'<h1 id="header-1">Header 1</h1>'),
('1. Foo\n', u'<ol>\n<li>Foo</li>\n</ol>'),
('\n',
'<p><img alt="alt text" src="/path/img.jpg" title="Title" /></p>'),
('* Unordered item 1', u'<ul>\n<li>Unordered item 1</li>\n</ul>')
)
# Test file system data which exercises many different mimetypes.
_TEST_DATA = {
'dir': {
'a.txt': 'a.txt content',
'b.txt': 'b.txt content',
'c': {
'd.txt': 'd.txt content',
},
},
'dir2': {
'dir3': {
'a.txt': 'a.txt content',
'b.txt': 'b.txt content',
'c': {
'd.txt': 'd.txt content',
},
},
},
'dir4': {
'index.html': 'index.html content 1'
},
'dir5': {
'index.html': 'index.html content 2'
},
'dir6': {
'notindex.html': 'notindex.html content'
},
'dir7': {
'index.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT)
},
'dir.txt': 'dir.txt content',
'dir5.html': 'dir5.html content',
'img.png': 'img.png content',
'index.html': 'index.html content',
'read.txt': 'read.txt content',
'redirects.json': _REDIRECTS_JSON,
'noextension': 'noextension content',
'run.js': 'run.js content',
'site.css': 'site.css content',
'storage.html': 'storage.html content',
'markdown.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT)
}
class ContentProviderUnittest(unittest.TestCase):
def setUp(self):
self._content_provider = self._CreateContentProvider()
def _CreateContentProvider(self, supports_zip=False):
object_store_creator = ObjectStoreCreator.ForTest()
test_file_system = TestFileSystem(_TEST_DATA)
return ContentProvider(
'foo',
CompiledFileSystem.Factory(object_store_creator),
test_file_system,
object_store_creator,
default_extensions=('.html', '.md'),
# TODO(kalman): Test supports_templates=False.
supports_templates=True,
supports_zip=supports_zip)
def _assertContent(self, content, content_type, content_and_type):
# Assert type so that str is differentiated from unicode.
self.assertEqual(type(content), type(content_and_type.content))
self.assertEqual(content, content_and_type.content)
self.assertEqual(content_type, content_and_type.content_type)
def _assertTemplateContent(self, content, path):
content_and_type = self._content_provider.GetContentAndType(path).Get()
self.assertEqual(Handlebar, type(content_and_type.content))
content_and_type.content = content_and_type.content.source
self._assertContent(content, 'text/html', content_and_type)
def _assertMarkdownContent(self, content, path):
content_and_type = self._content_provider.GetContentAndType(path).Get()
content_and_type.content = content_and_type.content.source
self._assertContent(content, 'text/html', content_and_type)
def testPlainText(self):
self._assertContent(
u'a.txt content', 'text/plain',
self._content_provider.GetContentAndType('dir/a.txt').Get())
self._assertContent(
u'd.txt content', 'text/plain',
self._content_provider.GetContentAndType('dir/c/d.txt').Get())
self._assertContent(
u'read.txt content', 'text/plain',
self._content_provider.GetContentAndType('read.txt').Get())
self._assertContent(
unicode(_REDIRECTS_JSON, 'utf-8'), 'application/json',
self._content_provider.GetContentAndType('redirects.json').Get())
self._assertContent(
u'run.js content', 'application/javascript',
self._content_provider.GetContentAndType('run.js').Get())
self._assertContent(
u'site.css content', 'text/css',
self._content_provider.GetContentAndType('site.css').Get())
def testTemplate(self):
self._assertTemplateContent(u'storage.html content', 'storage.html')
def testImage(self):
self._assertContent(
'img.png content', 'image/png',
self._content_provider.GetContentAndType('img.png').Get())
def testZipTopLevel(self):
zip_content_provider = self._CreateContentProvider(supports_zip=True)
content_and_type = zip_content_provider.GetContentAndType('dir.zip').Get()
zipfile = ZipFile(StringIO(content_and_type.content))
content_and_type.content = zipfile.namelist()
self._assertContent(
['dir/a.txt', 'dir/b.txt', 'dir/c/d.txt'], 'application/zip',
content_and_type)
def testZip2ndLevel(self):
zip_content_provider = self._CreateContentProvider(supports_zip=True)
content_and_type = zip_content_provider.GetContentAndType(
'dir2/dir3.zip').Get()
zipfile = ZipFile(StringIO(content_and_type.content))
content_and_type.content = zipfile.namelist()
self._assertContent(
['dir3/a.txt', 'dir3/b.txt', 'dir3/c/d.txt'], 'application/zip',
content_and_type)
def testCanonicalZipPaths(self):
# Without supports_zip the path is canonicalized as a file.
self.assertEqual(
'dir.txt',
self._content_provider.GetCanonicalPath('dir.zip'))
self.assertEqual(
'dir.txt',
self._content_provider.GetCanonicalPath('diR.zip'))
# With supports_zip the path is canonicalized as the zip file which
# corresponds to the canonical directory.
zip_content_provider = self._CreateContentProvider(supports_zip=True)
self.assertEqual(
'dir.zip',
zip_content_provider.GetCanonicalPath('dir.zip'))
self.assertEqual(
'dir.zip',
zip_content_provider.GetCanonicalPath('diR.zip'))
def testMarkdown(self):
self._assertMarkdownContent(
'\n'.join(text[1] for text in _MARKDOWN_CONTENT),
'markdown')
def testNotFound(self):
self.assertRaises(
FileNotFoundError,
self._content_provider.GetContentAndType('oops').Get)
def testIndexRedirect(self):
self._assertTemplateContent(u'index.html content', '')
self._assertTemplateContent(u'index.html content 1', 'dir4')
self._assertTemplateContent(u'dir5.html content', 'dir5')
self._assertMarkdownContent(
'\n'.join(text[1] for text in _MARKDOWN_CONTENT),
'dir7')
self._assertContent(
'noextension content', 'text/plain',
self._content_provider.GetContentAndType('noextension').Get())
self.assertRaises(
FileNotFoundError,
self._content_provider.GetContentAndType('dir6').Get)
if __name__ == '__main__':
unittest.main() | chrome/common/extensions/docs/server2/content_provider_test.py |
from cStringIO import StringIO
import json
import unittest
from zipfile import ZipFile
from compiled_file_system import CompiledFileSystem
from content_provider import ContentProvider
from file_system import FileNotFoundError
from object_store_creator import ObjectStoreCreator
from path_canonicalizer import PathCanonicalizer
from test_file_system import TestFileSystem
from third_party.handlebar import Handlebar
_REDIRECTS_JSON = json.dumps({
'oldfile.html': 'storage.html',
'index.html': 'https://developers.google.com/chrome',
})
_MARKDOWN_CONTENT = (
('# Header 1 #', u'<h1 id="header-1">Header 1</h1>'),
('1. Foo\n', u'<ol>\n<li>Foo</li>\n</ol>'),
('\n',
'<p><img alt="alt text" src="/path/img.jpg" title="Title" /></p>'),
('* Unordered item 1', u'<ul>\n<li>Unordered item 1</li>\n</ul>')
)
# Test file system data which exercises many different mimetypes.
_TEST_DATA = {
'dir': {
'a.txt': 'a.txt content',
'b.txt': 'b.txt content',
'c': {
'd.txt': 'd.txt content',
},
},
'dir2': {
'dir3': {
'a.txt': 'a.txt content',
'b.txt': 'b.txt content',
'c': {
'd.txt': 'd.txt content',
},
},
},
'dir4': {
'index.html': 'index.html content 1'
},
'dir5': {
'index.html': 'index.html content 2'
},
'dir6': {
'notindex.html': 'notindex.html content'
},
'dir7': {
'index.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT)
},
'dir.txt': 'dir.txt content',
'dir5.html': 'dir5.html content',
'img.png': 'img.png content',
'index.html': 'index.html content',
'read.txt': 'read.txt content',
'redirects.json': _REDIRECTS_JSON,
'noextension': 'noextension content',
'run.js': 'run.js content',
'site.css': 'site.css content',
'storage.html': 'storage.html content',
'markdown.md': '\n'.join(text[0] for text in _MARKDOWN_CONTENT)
}
class ContentProviderUnittest(unittest.TestCase):
def setUp(self):
self._content_provider = self._CreateContentProvider()
def _CreateContentProvider(self, supports_zip=False):
object_store_creator = ObjectStoreCreator.ForTest()
test_file_system = TestFileSystem(_TEST_DATA)
return ContentProvider(
'foo',
CompiledFileSystem.Factory(object_store_creator),
test_file_system,
object_store_creator,
default_extensions=('.html', '.md'),
# TODO(kalman): Test supports_templates=False.
supports_templates=True,
supports_zip=supports_zip)
def _assertContent(self, content, content_type, content_and_type):
# Assert type so that str is differentiated from unicode.
self.assertEqual(type(content), type(content_and_type.content))
self.assertEqual(content, content_and_type.content)
self.assertEqual(content_type, content_and_type.content_type)
def _assertTemplateContent(self, content, path):
content_and_type = self._content_provider.GetContentAndType(path).Get()
self.assertEqual(Handlebar, type(content_and_type.content))
content_and_type.content = content_and_type.content.source
self._assertContent(content, 'text/html', content_and_type)
def _assertMarkdownContent(self, content, path):
content_and_type = self._content_provider.GetContentAndType(path).Get()
content_and_type.content = content_and_type.content.source
self._assertContent(content, 'text/html', content_and_type)
def testPlainText(self):
self._assertContent(
u'a.txt content', 'text/plain',
self._content_provider.GetContentAndType('dir/a.txt').Get())
self._assertContent(
u'd.txt content', 'text/plain',
self._content_provider.GetContentAndType('dir/c/d.txt').Get())
self._assertContent(
u'read.txt content', 'text/plain',
self._content_provider.GetContentAndType('read.txt').Get())
self._assertContent(
unicode(_REDIRECTS_JSON, 'utf-8'), 'application/json',
self._content_provider.GetContentAndType('redirects.json').Get())
self._assertContent(
u'run.js content', 'application/javascript',
self._content_provider.GetContentAndType('run.js').Get())
self._assertContent(
u'site.css content', 'text/css',
self._content_provider.GetContentAndType('site.css').Get())
def testTemplate(self):
self._assertTemplateContent(u'storage.html content', 'storage.html')
def testImage(self):
self._assertContent(
'img.png content', 'image/png',
self._content_provider.GetContentAndType('img.png').Get())
def testZipTopLevel(self):
zip_content_provider = self._CreateContentProvider(supports_zip=True)
content_and_type = zip_content_provider.GetContentAndType('dir.zip').Get()
zipfile = ZipFile(StringIO(content_and_type.content))
content_and_type.content = zipfile.namelist()
self._assertContent(
['dir/a.txt', 'dir/b.txt', 'dir/c/d.txt'], 'application/zip',
content_and_type)
def testZip2ndLevel(self):
zip_content_provider = self._CreateContentProvider(supports_zip=True)
content_and_type = zip_content_provider.GetContentAndType(
'dir2/dir3.zip').Get()
zipfile = ZipFile(StringIO(content_and_type.content))
content_and_type.content = zipfile.namelist()
self._assertContent(
['dir3/a.txt', 'dir3/b.txt', 'dir3/c/d.txt'], 'application/zip',
content_and_type)
def testCanonicalZipPaths(self):
# Without supports_zip the path is canonicalized as a file.
self.assertEqual(
'dir.txt',
self._content_provider.GetCanonicalPath('dir.zip'))
self.assertEqual(
'dir.txt',
self._content_provider.GetCanonicalPath('diR.zip'))
# With supports_zip the path is canonicalized as the zip file which
# corresponds to the canonical directory.
zip_content_provider = self._CreateContentProvider(supports_zip=True)
self.assertEqual(
'dir.zip',
zip_content_provider.GetCanonicalPath('dir.zip'))
self.assertEqual(
'dir.zip',
zip_content_provider.GetCanonicalPath('diR.zip'))
def testMarkdown(self):
self._assertMarkdownContent(
'\n'.join(text[1] for text in _MARKDOWN_CONTENT),
'markdown')
def testNotFound(self):
self.assertRaises(
FileNotFoundError,
self._content_provider.GetContentAndType('oops').Get)
def testIndexRedirect(self):
self._assertTemplateContent(u'index.html content', '')
self._assertTemplateContent(u'index.html content 1', 'dir4')
self._assertTemplateContent(u'dir5.html content', 'dir5')
self._assertMarkdownContent(
'\n'.join(text[1] for text in _MARKDOWN_CONTENT),
'dir7')
self._assertContent(
'noextension content', 'text/plain',
self._content_provider.GetContentAndType('noextension').Get())
self.assertRaises(
FileNotFoundError,
self._content_provider.GetContentAndType('dir6').Get)
if __name__ == '__main__':
unittest.main() | 0.308503 | 0.231202 |
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any, Awaitable
## Bokeh imports
if TYPE_CHECKING:
from ..document.events import DocumentPatchedEvent
from ..protocol import Protocol, messages as msg
from ..protocol.message import Message
from .contexts import ApplicationContext
from .session import ServerSession
from .views.ws import WSHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ServerConnection',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class ServerConnection:
''' Wraps a websocket connection to a client.
.. autoclasstoc::
'''
_session: ServerSession | None
def __init__(self, protocol: Protocol, socket: WSHandler,
application_context: ApplicationContext, session: ServerSession) -> None:
self._protocol = protocol
self._socket = socket
self._application_context = application_context
self._session = session
self._session.subscribe(self)
self._ping_count = 0
@property
def session(self) -> ServerSession:
assert self._session is not None
return self._session
@property
def application_context(self) -> ApplicationContext:
return self._application_context
def detach_session(self) -> None:
"""Allow the session to be discarded and don't get change notifications from it anymore"""
if self._session is not None:
self._session.unsubscribe(self)
self._session = None
def ok(self, message: Message[Any]) -> msg.ok:
return self.protocol.create('OK', message.header['msgid'])
def error(self, message: Message[Any], text: str) -> msg.error:
return self.protocol.create('ERROR', message.header['msgid'], text)
def send_patch_document(self, event: DocumentPatchedEvent) -> Awaitable[None]:
""" Sends a PATCH-DOC message, returning a Future that's completed when it's written out. """
msg = self.protocol.create('PATCH-DOC', [event])
# yes, *return* the awaitable, it will be awaited when pending writes are processed
return self._socket.send_message(msg)
def send_ping(self) -> None:
self._socket.ping(str(self._ping_count).encode("utf-8"))
self._ping_count += 1
@property
def protocol(self) -> Protocol:
return self._protocol
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | bokeh/server/connection.py | #-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any, Awaitable
## Bokeh imports
if TYPE_CHECKING:
from ..document.events import DocumentPatchedEvent
from ..protocol import Protocol, messages as msg
from ..protocol.message import Message
from .contexts import ApplicationContext
from .session import ServerSession
from .views.ws import WSHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ServerConnection',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class ServerConnection:
''' Wraps a websocket connection to a client.
.. autoclasstoc::
'''
_session: ServerSession | None
def __init__(self, protocol: Protocol, socket: WSHandler,
application_context: ApplicationContext, session: ServerSession) -> None:
self._protocol = protocol
self._socket = socket
self._application_context = application_context
self._session = session
self._session.subscribe(self)
self._ping_count = 0
@property
def session(self) -> ServerSession:
assert self._session is not None
return self._session
@property
def application_context(self) -> ApplicationContext:
return self._application_context
def detach_session(self) -> None:
"""Allow the session to be discarded and don't get change notifications from it anymore"""
if self._session is not None:
self._session.unsubscribe(self)
self._session = None
def ok(self, message: Message[Any]) -> msg.ok:
return self.protocol.create('OK', message.header['msgid'])
def error(self, message: Message[Any], text: str) -> msg.error:
return self.protocol.create('ERROR', message.header['msgid'], text)
def send_patch_document(self, event: DocumentPatchedEvent) -> Awaitable[None]:
""" Sends a PATCH-DOC message, returning a Future that's completed when it's written out. """
msg = self.protocol.create('PATCH-DOC', [event])
# yes, *return* the awaitable, it will be awaited when pending writes are processed
return self._socket.send_message(msg)
def send_ping(self) -> None:
self._socket.ping(str(self._ping_count).encode("utf-8"))
self._ping_count += 1
@property
def protocol(self) -> Protocol:
return self._protocol
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | 0.644449 | 0.0809 |
from typing import Optional, List, Dict
from torch_geometric.typing import Adj, OptTensor
import torch
from torch import Tensor
from torch.nn import ModuleList, Sequential, Linear, ReLU
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import reset
from torch_geometric.utils import degree
from comparison_methods.PNA.aggregators import AGGREGATORS
from comparison_methods.PNA.scalers import SCALERS
# Implemented with the help of <NAME>, author of PyTorch Geometric
# For an example see https://github.com/rusty1s/pytorch_geometric/blob/master/examples/pna.py
class PNAConv(MessagePassing):
r"""The Principal Neighbourhood Aggregation graph convolution operator
from the `"Principal Neighbourhood Aggregation for Graph Nets"
<https://arxiv.org/abs/2004.05718>`_ paper
.. math::
\bigoplus = \underbrace{\begin{bmatrix}I \\ S(D, \alpha=1) \\
S(D, \alpha=-1) \end{bmatrix} }_{\text{scalers}}
\otimes \underbrace{\begin{bmatrix} \mu \\ \sigma \\ \max \\ \min
\end{bmatrix}}_{\text{aggregators}},
in:
.. math::
X_i^{(t+1)} = U \left( X_i^{(t)}, \underset{(j,i) \in E}{\bigoplus}
M \left( X_i^{(t)}, X_j^{(t)} \right) \right)
where :math:`M` and :math:`U` denote the MLP referred to with pretrans
and posttrans respectively.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
aggregators (list of str): Set of aggregation function identifiers,
namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"var"` and :obj:`"std"`.
scalers: (list of str): Set of scaling function identifiers, namely
:obj:`"identity"`, :obj:`"amplification"`,
:obj:`"attenuation"`, :obj:`"linear"` and
:obj:`"inverse_linear"`.
deg (Tensor): Histogram of in-degrees of nodes in the training set,
used by scalers to normalize.
edge_dim (int, optional): Edge feature dimensionality (in case
there are any). (default :obj:`None`)
towers (int, optional): Number of towers (default: :obj:`1`).
pre_layers (int, optional): Number of transformation layers before
aggregation (default: :obj:`1`).
post_layers (int, optional): Number of transformation layers after
aggregation (default: :obj:`1`).
divide_input (bool, optional): Whether the input features should
be split between towers or not (default: :obj:`False`).
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: int, out_channels: int,
aggregators: List[str], scalers: List[str], deg: Tensor,
edge_dim: Optional[int] = None, towers: int = 1,
pre_layers: int = 1, post_layers: int = 1,
divide_input: bool = False, **kwargs):
super(PNAConv, self).__init__(aggr=None, node_dim=0, **kwargs)
if divide_input:
assert in_channels % towers == 0
assert out_channels % towers == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.edge_dim = edge_dim
self.towers = towers
self.divide_input = divide_input
self.F_in = in_channels // towers if divide_input else in_channels
self.F_out = self.out_channels // towers
deg = deg.to(torch.float)
self.avg_deg: Dict[str, float] = {
'lin': deg.mean().item(),
'log': (deg + 1).log().mean().item(),
'exp': deg.exp().mean().item(),
}
if self.edge_dim is not None:
self.edge_encoder = Linear(edge_dim, self.F_in)
self.pre_nns = ModuleList()
self.post_nns = ModuleList()
for _ in range(towers):
modules = [Linear((3 if edge_dim else 2) * self.F_in, self.F_in)]
for _ in range(pre_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_in, self.F_in)]
self.pre_nns.append(Sequential(*modules))
in_channels = (len(aggregators) * len(scalers) + 1) * self.F_in
modules = [Linear(in_channels, self.F_out)]
for _ in range(post_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_out, self.F_out)]
self.post_nns.append(Sequential(*modules))
self.lin = Linear(out_channels, out_channels)
self.reset_parameters()
def reset_parameters(self):
if self.edge_dim is not None:
self.edge_encoder.reset_parameters()
for nn in self.pre_nns:
reset(nn)
for nn in self.post_nns:
reset(nn)
self.lin.reset_parameters()
def forward(self, x: Tensor, edge_index: Adj,
edge_attr: OptTensor = None) -> Tensor:
if self.divide_input:
x = x.view(-1, self.towers, self.F_in)
else:
x = x.view(-1, 1, self.F_in).repeat(1, self.towers, 1)
# propagate_type: (x: Tensor, edge_attr: OptTensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None)
out = torch.cat([x, out], dim=-1)
outs = [nn(out[:, i]) for i, nn in enumerate(self.post_nns)]
out = torch.cat(outs, dim=1)
return self.lin(out)
def message(self, x_i: Tensor, x_j: Tensor,
edge_attr: OptTensor) -> Tensor:
h: Tensor = x_i # Dummy.
if edge_attr is not None:
edge_attr = self.edge_encoder(edge_attr)
edge_attr = edge_attr.view(-1, 1, self.F_in)
edge_attr = edge_attr.repeat(1, self.towers, 1)
h = torch.cat([x_i, x_j, edge_attr], dim=-1)
else:
h = torch.cat([x_i, x_j], dim=-1)
hs = [nn(h[:, i]) for i, nn in enumerate(self.pre_nns)]
return torch.stack(hs, dim=1)
def aggregate(self, inputs: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
out = torch.cat(outs, dim=-1)
deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
return torch.cat(outs, dim=-1)
def __repr__(self):
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, towers={self.towers}, dim={self.dim})')
raise NotImplementedError
class PNAConvSimple(MessagePassing):
r"""The Principal Neighbourhood Aggregation graph convolution operator
from the `"Principal Neighbourhood Aggregation for Graph Nets"
<https://arxiv.org/abs/2004.05718>`_ paper
.. math::
\bigoplus = \underbrace{\begin{bmatrix}I \\ S(D, \alpha=1) \\
S(D, \alpha=-1) \end{bmatrix} }_{\text{scalers}}
\otimes \underbrace{\begin{bmatrix} \mu \\ \sigma \\ \max \\ \min
\end{bmatrix}}_{\text{aggregators}},
in:
.. math::
X_i^{(t+1)} = U \left( \underset{(j,i) \in E}{\bigoplus}
M \left(X_j^{(t)} \right) \right)
where :math:`U` denote the MLP referred to with posttrans.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
aggregators (list of str): Set of aggregation function identifiers,
namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"var"` and :obj:`"std"`.
scalers: (list of str): Set of scaling function identifiers, namely
:obj:`"identity"`, :obj:`"amplification"`,
:obj:`"attenuation"`, :obj:`"linear"` and
:obj:`"inverse_linear"`.
deg (Tensor): Histogram of in-degrees of nodes in the training set,
used by scalers to normalize.
post_layers (int, optional): Number of transformation layers after
aggregation (default: :obj:`1`).
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: int, out_channels: int,
aggregators: List[str], scalers: List[str], deg: Tensor,
post_layers: int = 1, **kwargs):
super(PNAConvSimple, self).__init__(aggr=None, node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.F_in = in_channels
self.F_out = self.out_channels
deg = deg.to(torch.float)
self.avg_deg: Dict[str, float] = {
'lin': deg.mean().item(),
'log': (deg + 1).log().mean().item(),
'exp': deg.exp().mean().item(),
}
in_channels = (len(aggregators) * len(scalers)) * self.F_in
modules = [Linear(in_channels, self.F_out)]
for _ in range(post_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_out, self.F_out)]
self.post_nn = Sequential(*modules)
self.reset_parameters()
def reset_parameters(self):
reset(self.post_nn)
def forward(self, x: Tensor, edge_index: Adj, edge_attr: OptTensor = None) -> Tensor:
# propagate_type: (x: Tensor)
out = self.propagate(edge_index, x=x, size=None)
return self.post_nn(out)
def message(self, x_j: Tensor) -> Tensor:
return x_j
def aggregate(self, inputs: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
out = torch.cat(outs, dim=-1)
deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
return torch.cat(outs, dim=-1)
def __repr__(self):
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}')
raise NotImplementedError | comparison_methods/PNA/pna.py | from typing import Optional, List, Dict
from torch_geometric.typing import Adj, OptTensor
import torch
from torch import Tensor
from torch.nn import ModuleList, Sequential, Linear, ReLU
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import reset
from torch_geometric.utils import degree
from comparison_methods.PNA.aggregators import AGGREGATORS
from comparison_methods.PNA.scalers import SCALERS
# Implemented with the help of <NAME>, author of PyTorch Geometric
# For an example see https://github.com/rusty1s/pytorch_geometric/blob/master/examples/pna.py
class PNAConv(MessagePassing):
r"""The Principal Neighbourhood Aggregation graph convolution operator
from the `"Principal Neighbourhood Aggregation for Graph Nets"
<https://arxiv.org/abs/2004.05718>`_ paper
.. math::
\bigoplus = \underbrace{\begin{bmatrix}I \\ S(D, \alpha=1) \\
S(D, \alpha=-1) \end{bmatrix} }_{\text{scalers}}
\otimes \underbrace{\begin{bmatrix} \mu \\ \sigma \\ \max \\ \min
\end{bmatrix}}_{\text{aggregators}},
in:
.. math::
X_i^{(t+1)} = U \left( X_i^{(t)}, \underset{(j,i) \in E}{\bigoplus}
M \left( X_i^{(t)}, X_j^{(t)} \right) \right)
where :math:`M` and :math:`U` denote the MLP referred to with pretrans
and posttrans respectively.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
aggregators (list of str): Set of aggregation function identifiers,
namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"var"` and :obj:`"std"`.
scalers: (list of str): Set of scaling function identifiers, namely
:obj:`"identity"`, :obj:`"amplification"`,
:obj:`"attenuation"`, :obj:`"linear"` and
:obj:`"inverse_linear"`.
deg (Tensor): Histogram of in-degrees of nodes in the training set,
used by scalers to normalize.
edge_dim (int, optional): Edge feature dimensionality (in case
there are any). (default :obj:`None`)
towers (int, optional): Number of towers (default: :obj:`1`).
pre_layers (int, optional): Number of transformation layers before
aggregation (default: :obj:`1`).
post_layers (int, optional): Number of transformation layers after
aggregation (default: :obj:`1`).
divide_input (bool, optional): Whether the input features should
be split between towers or not (default: :obj:`False`).
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: int, out_channels: int,
aggregators: List[str], scalers: List[str], deg: Tensor,
edge_dim: Optional[int] = None, towers: int = 1,
pre_layers: int = 1, post_layers: int = 1,
divide_input: bool = False, **kwargs):
super(PNAConv, self).__init__(aggr=None, node_dim=0, **kwargs)
if divide_input:
assert in_channels % towers == 0
assert out_channels % towers == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.edge_dim = edge_dim
self.towers = towers
self.divide_input = divide_input
self.F_in = in_channels // towers if divide_input else in_channels
self.F_out = self.out_channels // towers
deg = deg.to(torch.float)
self.avg_deg: Dict[str, float] = {
'lin': deg.mean().item(),
'log': (deg + 1).log().mean().item(),
'exp': deg.exp().mean().item(),
}
if self.edge_dim is not None:
self.edge_encoder = Linear(edge_dim, self.F_in)
self.pre_nns = ModuleList()
self.post_nns = ModuleList()
for _ in range(towers):
modules = [Linear((3 if edge_dim else 2) * self.F_in, self.F_in)]
for _ in range(pre_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_in, self.F_in)]
self.pre_nns.append(Sequential(*modules))
in_channels = (len(aggregators) * len(scalers) + 1) * self.F_in
modules = [Linear(in_channels, self.F_out)]
for _ in range(post_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_out, self.F_out)]
self.post_nns.append(Sequential(*modules))
self.lin = Linear(out_channels, out_channels)
self.reset_parameters()
def reset_parameters(self):
if self.edge_dim is not None:
self.edge_encoder.reset_parameters()
for nn in self.pre_nns:
reset(nn)
for nn in self.post_nns:
reset(nn)
self.lin.reset_parameters()
def forward(self, x: Tensor, edge_index: Adj,
edge_attr: OptTensor = None) -> Tensor:
if self.divide_input:
x = x.view(-1, self.towers, self.F_in)
else:
x = x.view(-1, 1, self.F_in).repeat(1, self.towers, 1)
# propagate_type: (x: Tensor, edge_attr: OptTensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None)
out = torch.cat([x, out], dim=-1)
outs = [nn(out[:, i]) for i, nn in enumerate(self.post_nns)]
out = torch.cat(outs, dim=1)
return self.lin(out)
def message(self, x_i: Tensor, x_j: Tensor,
edge_attr: OptTensor) -> Tensor:
h: Tensor = x_i # Dummy.
if edge_attr is not None:
edge_attr = self.edge_encoder(edge_attr)
edge_attr = edge_attr.view(-1, 1, self.F_in)
edge_attr = edge_attr.repeat(1, self.towers, 1)
h = torch.cat([x_i, x_j, edge_attr], dim=-1)
else:
h = torch.cat([x_i, x_j], dim=-1)
hs = [nn(h[:, i]) for i, nn in enumerate(self.pre_nns)]
return torch.stack(hs, dim=1)
def aggregate(self, inputs: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
out = torch.cat(outs, dim=-1)
deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
return torch.cat(outs, dim=-1)
def __repr__(self):
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, towers={self.towers}, dim={self.dim})')
raise NotImplementedError
class PNAConvSimple(MessagePassing):
r"""The Principal Neighbourhood Aggregation graph convolution operator
from the `"Principal Neighbourhood Aggregation for Graph Nets"
<https://arxiv.org/abs/2004.05718>`_ paper
.. math::
\bigoplus = \underbrace{\begin{bmatrix}I \\ S(D, \alpha=1) \\
S(D, \alpha=-1) \end{bmatrix} }_{\text{scalers}}
\otimes \underbrace{\begin{bmatrix} \mu \\ \sigma \\ \max \\ \min
\end{bmatrix}}_{\text{aggregators}},
in:
.. math::
X_i^{(t+1)} = U \left( \underset{(j,i) \in E}{\bigoplus}
M \left(X_j^{(t)} \right) \right)
where :math:`U` denote the MLP referred to with posttrans.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
aggregators (list of str): Set of aggregation function identifiers,
namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"var"` and :obj:`"std"`.
scalers: (list of str): Set of scaling function identifiers, namely
:obj:`"identity"`, :obj:`"amplification"`,
:obj:`"attenuation"`, :obj:`"linear"` and
:obj:`"inverse_linear"`.
deg (Tensor): Histogram of in-degrees of nodes in the training set,
used by scalers to normalize.
post_layers (int, optional): Number of transformation layers after
aggregation (default: :obj:`1`).
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: int, out_channels: int,
aggregators: List[str], scalers: List[str], deg: Tensor,
post_layers: int = 1, **kwargs):
super(PNAConvSimple, self).__init__(aggr=None, node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.F_in = in_channels
self.F_out = self.out_channels
deg = deg.to(torch.float)
self.avg_deg: Dict[str, float] = {
'lin': deg.mean().item(),
'log': (deg + 1).log().mean().item(),
'exp': deg.exp().mean().item(),
}
in_channels = (len(aggregators) * len(scalers)) * self.F_in
modules = [Linear(in_channels, self.F_out)]
for _ in range(post_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_out, self.F_out)]
self.post_nn = Sequential(*modules)
self.reset_parameters()
def reset_parameters(self):
reset(self.post_nn)
def forward(self, x: Tensor, edge_index: Adj, edge_attr: OptTensor = None) -> Tensor:
# propagate_type: (x: Tensor)
out = self.propagate(edge_index, x=x, size=None)
return self.post_nn(out)
def message(self, x_j: Tensor) -> Tensor:
return x_j
def aggregate(self, inputs: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
out = torch.cat(outs, dim=-1)
deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
return torch.cat(outs, dim=-1)
def __repr__(self):
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}')
raise NotImplementedError | 0.964035 | 0.671329 |
from view.modes import NormalMode, EditMode
import curses
import logging
log = logging.getLogger("wfcli")
class LateralCursor:
def __init__(self):
self._index = float("-Inf")
self.allowed_offset = 0
def align_cursor(self, current_node):
if self._index != self.in_line(current_node):
self._index = self.in_line(current_node)
return True
return False
def in_line(self, node_pair):
current_node = node_pair[0]
linelength = max(
len(current_node.name) - 1 + self.allowed_offset,
0,
)
current = max(0, self._index)
res = min(linelength, current)
return res
def nav_left(self, current_node):
found_x = self.in_line(current_node)
if found_x > 0:
self._index = found_x - 1
else:
self._index = 0
def nav_right(self, current_node):
found_x = self.in_line(current_node)
log.info("Nav_right found_x: {}".format(found_x))
max_allowed = len(current_node[0].name) - 1 + self.allowed_offset
log.info("Nav_right max: {}".format(max_allowed))
if found_x < max_allowed:
self._index = found_x + 1
else:
self._index = max_allowed
def dollar_sign(self):
self._index = float("Inf")
def zero(self):
self._index = float("-Inf")
class View:
# SETUP METHODS
def __init__(self):
self.lc = LateralCursor()
self.indent_size = 2
self.inset = 1
self.downset = 1
self.mode_map = {
"normal": NormalMode(),
"edit": EditMode(),
}
self.change_mode("normal")
@staticmethod
def init_colors():
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
def __enter__(self):
self.sc = curses.initscr()
curses.start_color()
self.init_colors()
curses.noecho()
curses.curs_set(False)
curses.cbreak()
self.sc.timeout(10)
self.open = True
self.keygen = self.get_keypress_wait()
return self
def __exit__(self, *args):
curses.echo()
curses.nocbreak()
curses.endwin()
def send_command(self):
while self.open:
yield self.mode.get_command(self.keygen)
# MODE METHODS
def change_mode(self, mode):
if mode in self.mode_map:
self.mode = self.mode_map[mode]
self.lc.allowed_offset = self.mode.eol_offset
else:
raise ValueError("There isn't a {} mode".format(mode))
# CURSOR METHODS
def align_cursor(self, current_node):
return self.lc.align_cursor(current_node)
def cursor_x(self, current_node):
return self.lc.in_line(current_node)
def nav_left(self, current_node):
self.lc.nav_left(current_node)
def nav_right(self, current_node):
self.lc.nav_right(current_node)
# KEYPRESS METHODS
def get_keypress_no_wait(self):
return self.sc.getch()
def get_keypress_wait(self):
while True:
keypress = self.sc.getch()
if keypress < 0:
continue
# 27 is a special case, because it could mean I pressed
# the escape key, or it could mean it's an escape code
if keypress == 27:
a = self.get_keypress_no_wait()
if a == -1:
yield 27
else:
b = self.get_keypress_no_wait()
if b == -1:
yield 27
yield a
else:
yield (27, a, b)
else:
yield keypress
# PRINTING METHODS
def generate_lines(self, text, text_width):
if text == "":
return [""]
res = []
lead_index = 0
while lead_index < len(text):
res.append(text[lead_index:lead_index + text_width])
lead_index += text_width
return res
def render_content(self, content, curs_y):
additional_lines = 0
rows, cols = self.sc.getmaxyx()
for height, node_tuple in enumerate(content):
node, depth = node_tuple
indent_width = self.indent_size * depth + 3
text_width = cols - indent_width - 2
lines = self.generate_lines(node.name, text_width)
attribute = self.mode.selection_attr if height == curs_y else curses.A_NORMAL
if height + self.downset + additional_lines + len(lines) >= rows - 1:
break # stop us from going past the end of the screen!
new_additional_lines = -1
# Actual text
for line in lines:
new_additional_lines += 1
# indent space
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset,
indent_width * " ",
attribute)
# indicator
if new_additional_lines == 0:
self.sc.addstr(height + self.downset + additional_lines,
self.inset + indent_width - 2,
self.mode.indicators[node.state],
attribute)
# real content
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset + indent_width,
line,
attribute)
self.sc.clrtoeol()
# Cursor block
if height == curs_y:
simple_position = self.cursor_x(node_tuple)
extra_downset = simple_position // text_width
extra_inset = simple_position % text_width
cursor_x = self.inset + indent_width + extra_inset
cursor_y = self.downset + height + additional_lines + extra_downset
self.sc.chgat(cursor_y, cursor_x, 1, self.mode.cursor_attr)
additional_lines += new_additional_lines
# CLEAR EVERYTHING BELOW
y_to_delete_from = height + additional_lines + 2
if y_to_delete_from < rows:
self.sc.move(y_to_delete_from, 0)
self.sc.clrtobot()
# MAKE COLORED BORDER
self.sc.attrset(self.mode.border_attr)
self.sc.border()
self.sc.attrset(0)
# DRAW TO SCREEN
self.sc.refresh() | view/view.py | from view.modes import NormalMode, EditMode
import curses
import logging
log = logging.getLogger("wfcli")
class LateralCursor:
def __init__(self):
self._index = float("-Inf")
self.allowed_offset = 0
def align_cursor(self, current_node):
if self._index != self.in_line(current_node):
self._index = self.in_line(current_node)
return True
return False
def in_line(self, node_pair):
current_node = node_pair[0]
linelength = max(
len(current_node.name) - 1 + self.allowed_offset,
0,
)
current = max(0, self._index)
res = min(linelength, current)
return res
def nav_left(self, current_node):
found_x = self.in_line(current_node)
if found_x > 0:
self._index = found_x - 1
else:
self._index = 0
def nav_right(self, current_node):
found_x = self.in_line(current_node)
log.info("Nav_right found_x: {}".format(found_x))
max_allowed = len(current_node[0].name) - 1 + self.allowed_offset
log.info("Nav_right max: {}".format(max_allowed))
if found_x < max_allowed:
self._index = found_x + 1
else:
self._index = max_allowed
def dollar_sign(self):
self._index = float("Inf")
def zero(self):
self._index = float("-Inf")
class View:
# SETUP METHODS
def __init__(self):
self.lc = LateralCursor()
self.indent_size = 2
self.inset = 1
self.downset = 1
self.mode_map = {
"normal": NormalMode(),
"edit": EditMode(),
}
self.change_mode("normal")
@staticmethod
def init_colors():
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
def __enter__(self):
self.sc = curses.initscr()
curses.start_color()
self.init_colors()
curses.noecho()
curses.curs_set(False)
curses.cbreak()
self.sc.timeout(10)
self.open = True
self.keygen = self.get_keypress_wait()
return self
def __exit__(self, *args):
curses.echo()
curses.nocbreak()
curses.endwin()
def send_command(self):
while self.open:
yield self.mode.get_command(self.keygen)
# MODE METHODS
def change_mode(self, mode):
if mode in self.mode_map:
self.mode = self.mode_map[mode]
self.lc.allowed_offset = self.mode.eol_offset
else:
raise ValueError("There isn't a {} mode".format(mode))
# CURSOR METHODS
def align_cursor(self, current_node):
return self.lc.align_cursor(current_node)
def cursor_x(self, current_node):
return self.lc.in_line(current_node)
def nav_left(self, current_node):
self.lc.nav_left(current_node)
def nav_right(self, current_node):
self.lc.nav_right(current_node)
# KEYPRESS METHODS
def get_keypress_no_wait(self):
return self.sc.getch()
def get_keypress_wait(self):
while True:
keypress = self.sc.getch()
if keypress < 0:
continue
# 27 is a special case, because it could mean I pressed
# the escape key, or it could mean it's an escape code
if keypress == 27:
a = self.get_keypress_no_wait()
if a == -1:
yield 27
else:
b = self.get_keypress_no_wait()
if b == -1:
yield 27
yield a
else:
yield (27, a, b)
else:
yield keypress
# PRINTING METHODS
def generate_lines(self, text, text_width):
if text == "":
return [""]
res = []
lead_index = 0
while lead_index < len(text):
res.append(text[lead_index:lead_index + text_width])
lead_index += text_width
return res
def render_content(self, content, curs_y):
additional_lines = 0
rows, cols = self.sc.getmaxyx()
for height, node_tuple in enumerate(content):
node, depth = node_tuple
indent_width = self.indent_size * depth + 3
text_width = cols - indent_width - 2
lines = self.generate_lines(node.name, text_width)
attribute = self.mode.selection_attr if height == curs_y else curses.A_NORMAL
if height + self.downset + additional_lines + len(lines) >= rows - 1:
break # stop us from going past the end of the screen!
new_additional_lines = -1
# Actual text
for line in lines:
new_additional_lines += 1
# indent space
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset,
indent_width * " ",
attribute)
# indicator
if new_additional_lines == 0:
self.sc.addstr(height + self.downset + additional_lines,
self.inset + indent_width - 2,
self.mode.indicators[node.state],
attribute)
# real content
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset + indent_width,
line,
attribute)
self.sc.clrtoeol()
# Cursor block
if height == curs_y:
simple_position = self.cursor_x(node_tuple)
extra_downset = simple_position // text_width
extra_inset = simple_position % text_width
cursor_x = self.inset + indent_width + extra_inset
cursor_y = self.downset + height + additional_lines + extra_downset
self.sc.chgat(cursor_y, cursor_x, 1, self.mode.cursor_attr)
additional_lines += new_additional_lines
# CLEAR EVERYTHING BELOW
y_to_delete_from = height + additional_lines + 2
if y_to_delete_from < rows:
self.sc.move(y_to_delete_from, 0)
self.sc.clrtobot()
# MAKE COLORED BORDER
self.sc.attrset(self.mode.border_attr)
self.sc.border()
self.sc.attrset(0)
# DRAW TO SCREEN
self.sc.refresh() | 0.646349 | 0.157105 |
import numpy as np
import pytest
import aesara
import aesara.sandbox.rng_mrg
from aesara import gpuarray
from aesara import tensor as aet
from aesara.gpuarray.basic_ops import GpuFromHost, HostFromGpu
from aesara.gpuarray.elemwise import GpuElemwise
from aesara.scan.basic import scan
from aesara.scan.checkpoints import scan_checkpoints
from aesara.scan.op import Scan
from aesara.tensor.math import dot
from aesara.tensor.math import sum as aet_sum
from aesara.tensor.type import fscalar, ftensor3, fvector, iscalar, vector
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, test_ctx_name
pygpu_gpuarray = pytest.importorskip("pygpy.gpuarray")
GpuArrayException = pygpu_gpuarray.GpuArrayException
if aesara.config.mode == "FAST_COMPILE":
mode_with_opt = aesara.compile.mode.get_mode("FAST_RUN")
else:
mode_with_opt = aesara.compile.mode.get_default_mode()
if aesara.config.mode in ("DEBUG_MODE", "DebugMode"):
mode_nodebug = aesara.compile.mode.get_mode("FAST_RUN")
else:
mode_nodebug = mode_with_opt
class TestScan:
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
mode = mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = GpuFromHost(test_ctx_name)(output)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode,
)
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 0
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 1
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, aet.cast(u_t + x_tm1, "int64"))
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
aesara_out1, aesara_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_out1, v_out1)
utt.assert_allclose(aesara_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert scan_node.op.gpua
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
def test_gpu4_gibbs_chain(self):
rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = aesara.shared(v_vsample)
trng = aesara.sandbox.rng_mrg.MRG_RandomStream(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
aesara_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
my_f = aesara.function(
[],
aesara_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway
# more of does the graph compile kind of test
my_f()
class ScanGpuTests:
"""
This class defines a number of tests for Scan on GPU as well as a few
helper functions for these tests. The GPU tests defined in this class are
independent of the GPU backend used. Because of this, a class inheriting
from ScanGpuTests should define the following attributes and methods to
make the tests run on a specific backend :
- self.gpu_backend : Reference to the backend module
- self.mode_with_opt : Compilation mode to force usage of the gpu backend
- self.is_scan_on_gpu(node) : Method to determine is a scan node has been
moved to run on a gpu under the specific
backend. Returns a boolean.
"""
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
# The following line is needed to have the first case being used
# Otherwise, it is the second that is tested.
mode = self.mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = self.gpu_backend.gpu_from_host(output)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 0
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 1
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, aet.cast(u_t + x_tm1, "int64"))
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
aesara_out1, aesara_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_out1, v_out1)
utt.assert_allclose(aesara_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert self.is_scan_on_gpu(scan_node)
def test_gibbs_chain(self):
rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = aesara.shared(v_vsample)
trng = aesara.sandbox.rng_mrg.MRG_RandomStream(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
aesara_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
my_f = aesara.function(
[],
aesara_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway more of
# doest the graph compile kind of test
my_f()
def test_gpu_memory_usage(self):
# This test validates that the memory usage of the defined aesara
# function is reasonable when executed on the GPU. It checks for
# a bug in which one of scan's optimization was not applied which
# made the scan node compute large and unnecessary outputs which
# brought memory usage on the GPU to ~12G.
# Dimensionality of input and output data (not one-hot coded)
n_in = 100
n_out = 100
# Number of neurons in hidden layer
n_hid = 4000
# Number of minibatches
mb_size = 2
# Time steps in minibatch
mb_length = 200
# Define input variables
xin = ftensor3(name="xin")
yout = ftensor3(name="yout")
# Initialize the network parameters
U = aesara.shared(np.zeros((n_in, n_hid), dtype="float32"), name="W_xin_to_l1")
V = aesara.shared(np.zeros((n_hid, n_hid), dtype="float32"), name="W_l1_to_l1")
W = aesara.shared(np.zeros((n_hid, n_out), dtype="float32"), name="W_l1_to_l2")
nparams = [U, V, W]
# Build the forward pass
l1_base = dot(xin, U)
def scan_l(baseline, last_step):
return baseline + dot(last_step, V)
zero_output = aet.alloc(np.asarray(0.0, dtype="float32"), mb_size, n_hid)
l1_out, _ = scan(
scan_l,
sequences=[l1_base],
outputs_info=[zero_output],
mode=self.mode_with_gpu_nodebug,
)
l2_out = dot(l1_out, W)
# Compute the cost and take the gradient wrt params
cost = aet_sum((l2_out - yout) ** 2)
grads = aesara.grad(cost, nparams)
updates = list(zip(nparams, (n - g for n, g in zip(nparams, grads))))
# Compile the aesara function
feval_backprop = aesara.function(
[xin, yout], cost, updates=updates, mode=self.mode_with_gpu_nodebug
)
# Validate that the PushOutScanOutput optimization has been applied
# by checking the number of outputs of the grad Scan node in the
# compiled function.
nodes = feval_backprop.maker.fgraph.toposort()
scan_nodes = [n for n in nodes if isinstance(n.op, Scan)]
# The grad scan is always the 2nd one according to toposort. If the
# optimization has been applied, it has 2 outputs, otherwise 3.
grad_scan_node = scan_nodes[1]
assert len(grad_scan_node.outputs) == 2, len(grad_scan_node.outputs)
# Call the aesara function to ensure the absence of a memory error
feval_backprop(
np.zeros((mb_length, mb_size, n_in), dtype="float32"),
np.zeros((mb_length, mb_size, n_out), dtype="float32"),
)
def test_memory_reuse_gpudimshuffle(self):
# Test the memory pre-allocation feature in scan when one output is
# the result of a GpuDimshuffle (because an optimization in
# GpuDimshuffle can cause issues with the memory pre-allocation
# where it falsely thinks that a pre-allocated memory region has
# been used when it hasn't).
def inner_fn(seq1, recurrent_out):
temp = seq1 + recurrent_out.sum()
output1 = temp.dimshuffle(1, 0)
output2 = temp.sum() + recurrent_out
return output1, output2
input1 = ftensor3()
init = ftensor3()
outputs_info = [None, init]
out, _ = scan(
inner_fn,
sequences=[input1],
outputs_info=outputs_info,
mode=self.mode_with_gpu,
)
out1 = out[0].flatten()
out2 = out[1].flatten()
fct = aesara.function([input1, init], [out1, out2], mode=self.mode_with_gpu)
output = fct(
np.ones((2, 1, 1), dtype="float32"), np.ones((1, 1, 1), dtype="float32")
)
expected_output = (
np.array([2, 4], dtype="float32"),
np.array([3, 7], dtype="float32"),
)
utt.assert_allclose(output, expected_output)
class TestScanGpuarray(ScanGpuTests):
"""
This class takes the gpu tests for scan that are defined in
class ScanGpuTests and runs them using the gpuarray backend.
"""
def setup_method(self):
self.gpu_backend = gpuarray
# This is unfortunate, but required
def gpu_from_host(v):
return gpuarray.GpuFromHost(None)(v)
self.gpu_backend.gpu_from_host = gpu_from_host
self.mode_with_gpu = mode_with_opt.including("gpuarray", "scan")
self.mode_with_gpu_nodebug = mode_nodebug.including("gpuarray", "scan")
# Skip the test if pygpu is not available
if not self.gpu_backend.pygpu_activated:
pytest.skip("Optional package pygpu disabled")
def is_scan_on_gpu(self, node):
return node.op.info.get("gpua", False)
class TestScanCheckpoint:
def setup_method(self):
self.k = iscalar("k")
self.A = vector("A")
result, _ = scan(
fn=lambda prior_result, A: prior_result * A,
outputs_info=aet.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
)
result_check, _ = scan_checkpoints(
fn=lambda prior_result, A: prior_result * A,
outputs_info=aet.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
save_every_N=100,
)
self.result = result[-1]
self.result_check = result_check[-1]
self.grad_A = aesara.grad(self.result.sum(), self.A)
self.grad_A_check = aesara.grad(self.result_check.sum(), self.A)
def test_memory(self):
from tests.gpuarray.config import mode_with_gpu # noqa
f = aesara.function(
inputs=[self.A, self.k], outputs=self.grad_A, mode=mode_with_gpu
)
f_check = aesara.function(
inputs=[self.A, self.k], outputs=self.grad_A_check, mode=mode_with_gpu
)
free_gmem = aesara.gpuarray.type._context_reg[None].free_gmem
data = np.ones(free_gmem // 3000, dtype=np.float32)
# Check that it works with the checkpoints
size = 1000
if isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
size = 100
f_check(data, size)
# Check that the basic scan fails in that case
# Skip that check in DebugMode, as it can fail in different ways
if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
with pytest.raises(GpuArrayException):
f(data, 1000) | tests/gpuarray/test_scan.py | import numpy as np
import pytest
import aesara
import aesara.sandbox.rng_mrg
from aesara import gpuarray
from aesara import tensor as aet
from aesara.gpuarray.basic_ops import GpuFromHost, HostFromGpu
from aesara.gpuarray.elemwise import GpuElemwise
from aesara.scan.basic import scan
from aesara.scan.checkpoints import scan_checkpoints
from aesara.scan.op import Scan
from aesara.tensor.math import dot
from aesara.tensor.math import sum as aet_sum
from aesara.tensor.type import fscalar, ftensor3, fvector, iscalar, vector
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, test_ctx_name
pygpu_gpuarray = pytest.importorskip("pygpy.gpuarray")
GpuArrayException = pygpu_gpuarray.GpuArrayException
if aesara.config.mode == "FAST_COMPILE":
mode_with_opt = aesara.compile.mode.get_mode("FAST_RUN")
else:
mode_with_opt = aesara.compile.mode.get_default_mode()
if aesara.config.mode in ("DEBUG_MODE", "DebugMode"):
mode_nodebug = aesara.compile.mode.get_mode("FAST_RUN")
else:
mode_nodebug = mode_with_opt
class TestScan:
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
mode = mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = GpuFromHost(test_ctx_name)(output)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode,
)
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 0
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 1
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, aet.cast(u_t + x_tm1, "int64"))
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
aesara_out1, aesara_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_out1, v_out1)
utt.assert_allclose(aesara_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert scan_node.op.gpua
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
def test_gpu4_gibbs_chain(self):
rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = aesara.shared(v_vsample)
trng = aesara.sandbox.rng_mrg.MRG_RandomStream(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
aesara_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
my_f = aesara.function(
[],
aesara_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway
# more of does the graph compile kind of test
my_f()
class ScanGpuTests:
"""
This class defines a number of tests for Scan on GPU as well as a few
helper functions for these tests. The GPU tests defined in this class are
independent of the GPU backend used. Because of this, a class inheriting
from ScanGpuTests should define the following attributes and methods to
make the tests run on a specific backend :
- self.gpu_backend : Reference to the backend module
- self.mode_with_opt : Compilation mode to force usage of the gpu backend
- self.is_scan_on_gpu(node) : Method to determine is a scan node has been
moved to run on a gpu under the specific
backend. Returns a boolean.
"""
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
# The following line is needed to have the first case being used
# Otherwise, it is the second that is tested.
mode = self.mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = self.gpu_backend.gpu_from_host(output)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 0
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
aesara_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_values, v_out)
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 1
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, aet.cast(u_t + x_tm1, "int64"))
u = fvector("u")
x0 = fscalar("x0")
W_in = fscalar("win")
W = fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = aesara.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.default_rng(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
aesara_out1, aesara_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(aesara_out1, v_out1)
utt.assert_allclose(aesara_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert self.is_scan_on_gpu(scan_node)
def test_gibbs_chain(self):
rng = np.random.default_rng(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = aesara.shared(v_vsample)
trng = aesara.sandbox.rng_mrg.MRG_RandomStream(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
aesara_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
my_f = aesara.function(
[],
aesara_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway more of
# doest the graph compile kind of test
my_f()
def test_gpu_memory_usage(self):
# This test validates that the memory usage of the defined aesara
# function is reasonable when executed on the GPU. It checks for
# a bug in which one of scan's optimization was not applied which
# made the scan node compute large and unnecessary outputs which
# brought memory usage on the GPU to ~12G.
# Dimensionality of input and output data (not one-hot coded)
n_in = 100
n_out = 100
# Number of neurons in hidden layer
n_hid = 4000
# Number of minibatches
mb_size = 2
# Time steps in minibatch
mb_length = 200
# Define input variables
xin = ftensor3(name="xin")
yout = ftensor3(name="yout")
# Initialize the network parameters
U = aesara.shared(np.zeros((n_in, n_hid), dtype="float32"), name="W_xin_to_l1")
V = aesara.shared(np.zeros((n_hid, n_hid), dtype="float32"), name="W_l1_to_l1")
W = aesara.shared(np.zeros((n_hid, n_out), dtype="float32"), name="W_l1_to_l2")
nparams = [U, V, W]
# Build the forward pass
l1_base = dot(xin, U)
def scan_l(baseline, last_step):
return baseline + dot(last_step, V)
zero_output = aet.alloc(np.asarray(0.0, dtype="float32"), mb_size, n_hid)
l1_out, _ = scan(
scan_l,
sequences=[l1_base],
outputs_info=[zero_output],
mode=self.mode_with_gpu_nodebug,
)
l2_out = dot(l1_out, W)
# Compute the cost and take the gradient wrt params
cost = aet_sum((l2_out - yout) ** 2)
grads = aesara.grad(cost, nparams)
updates = list(zip(nparams, (n - g for n, g in zip(nparams, grads))))
# Compile the aesara function
feval_backprop = aesara.function(
[xin, yout], cost, updates=updates, mode=self.mode_with_gpu_nodebug
)
# Validate that the PushOutScanOutput optimization has been applied
# by checking the number of outputs of the grad Scan node in the
# compiled function.
nodes = feval_backprop.maker.fgraph.toposort()
scan_nodes = [n for n in nodes if isinstance(n.op, Scan)]
# The grad scan is always the 2nd one according to toposort. If the
# optimization has been applied, it has 2 outputs, otherwise 3.
grad_scan_node = scan_nodes[1]
assert len(grad_scan_node.outputs) == 2, len(grad_scan_node.outputs)
# Call the aesara function to ensure the absence of a memory error
feval_backprop(
np.zeros((mb_length, mb_size, n_in), dtype="float32"),
np.zeros((mb_length, mb_size, n_out), dtype="float32"),
)
def test_memory_reuse_gpudimshuffle(self):
# Test the memory pre-allocation feature in scan when one output is
# the result of a GpuDimshuffle (because an optimization in
# GpuDimshuffle can cause issues with the memory pre-allocation
# where it falsely thinks that a pre-allocated memory region has
# been used when it hasn't).
def inner_fn(seq1, recurrent_out):
temp = seq1 + recurrent_out.sum()
output1 = temp.dimshuffle(1, 0)
output2 = temp.sum() + recurrent_out
return output1, output2
input1 = ftensor3()
init = ftensor3()
outputs_info = [None, init]
out, _ = scan(
inner_fn,
sequences=[input1],
outputs_info=outputs_info,
mode=self.mode_with_gpu,
)
out1 = out[0].flatten()
out2 = out[1].flatten()
fct = aesara.function([input1, init], [out1, out2], mode=self.mode_with_gpu)
output = fct(
np.ones((2, 1, 1), dtype="float32"), np.ones((1, 1, 1), dtype="float32")
)
expected_output = (
np.array([2, 4], dtype="float32"),
np.array([3, 7], dtype="float32"),
)
utt.assert_allclose(output, expected_output)
class TestScanGpuarray(ScanGpuTests):
"""
This class takes the gpu tests for scan that are defined in
class ScanGpuTests and runs them using the gpuarray backend.
"""
def setup_method(self):
self.gpu_backend = gpuarray
# This is unfortunate, but required
def gpu_from_host(v):
return gpuarray.GpuFromHost(None)(v)
self.gpu_backend.gpu_from_host = gpu_from_host
self.mode_with_gpu = mode_with_opt.including("gpuarray", "scan")
self.mode_with_gpu_nodebug = mode_nodebug.including("gpuarray", "scan")
# Skip the test if pygpu is not available
if not self.gpu_backend.pygpu_activated:
pytest.skip("Optional package pygpu disabled")
def is_scan_on_gpu(self, node):
return node.op.info.get("gpua", False)
class TestScanCheckpoint:
def setup_method(self):
self.k = iscalar("k")
self.A = vector("A")
result, _ = scan(
fn=lambda prior_result, A: prior_result * A,
outputs_info=aet.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
)
result_check, _ = scan_checkpoints(
fn=lambda prior_result, A: prior_result * A,
outputs_info=aet.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
save_every_N=100,
)
self.result = result[-1]
self.result_check = result_check[-1]
self.grad_A = aesara.grad(self.result.sum(), self.A)
self.grad_A_check = aesara.grad(self.result_check.sum(), self.A)
def test_memory(self):
from tests.gpuarray.config import mode_with_gpu # noqa
f = aesara.function(
inputs=[self.A, self.k], outputs=self.grad_A, mode=mode_with_gpu
)
f_check = aesara.function(
inputs=[self.A, self.k], outputs=self.grad_A_check, mode=mode_with_gpu
)
free_gmem = aesara.gpuarray.type._context_reg[None].free_gmem
data = np.ones(free_gmem // 3000, dtype=np.float32)
# Check that it works with the checkpoints
size = 1000
if isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
size = 100
f_check(data, size)
# Check that the basic scan fails in that case
# Skip that check in DebugMode, as it can fail in different ways
if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
with pytest.raises(GpuArrayException):
f(data, 1000) | 0.703651 | 0.542742 |
from tests.util import yasha_cli
from pathlib import Path
import pytest
def test_string(with_tmp_path):
Path('template.j2').write_text("{{ var is string }}, {{ var }}")
yasha_cli('--var=foo template.j2')
assert Path('template').read_text() == 'True, foo'
yasha_cli("--var='foo' template.j2")
assert Path('template').read_text() == 'True, foo'
def test_boolean(with_tmp_path):
Path('template.j2').write_text("{{ var is sameas false }}, {{ var }}")
yasha_cli('--var=False template.j2')
assert Path('template').read_text() == 'True, False'
def test_number(with_tmp_path):
Path('template.j2').write_text("{{ var is number }}, {{ var + 1 }}")
yasha_cli('--var=1 template.j2')
assert Path('template').read_text() == 'True, 2'
def test_list(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=['foo','bar','baz'] template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_tuple(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=('foo','bar','baz') template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_comma_separated_list(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=foo,bar,baz template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_dictionary(with_tmp_path):
Path('template.j2').write_text("{{ var is mapping }}, {% for k in 'abc' %}{{ var[k] }}{% endfor %}")
yasha_cli("--var={'a':1,'b':2,'c':3} template.j2")
assert Path('template').read_text() == 'True, 123'
def test_commas_in_quoted_string(with_tmp_path):
""" gh-57 """
Path('template.j2').write_text("{{ var is string }}, {{ var }}")
yasha_cli("""--var='"foo,bar,baz"' template.j2""")
assert Path('template').read_text() == 'True, foo,bar,baz'
def test_quoted_comma_in_comma_separated_list(with_tmp_path):
""" gh-57 """
Path('template.j2').write_text('{{ lst is sequence }}, {{ lst | join(".") }}')
yasha_cli("""--lst='"foo,bar",baz' template.j2""")
assert Path('template').read_text() == 'True, foo,bar.baz' | tests/test_template_variables.py | from tests.util import yasha_cli
from pathlib import Path
import pytest
def test_string(with_tmp_path):
Path('template.j2').write_text("{{ var is string }}, {{ var }}")
yasha_cli('--var=foo template.j2')
assert Path('template').read_text() == 'True, foo'
yasha_cli("--var='foo' template.j2")
assert Path('template').read_text() == 'True, foo'
def test_boolean(with_tmp_path):
Path('template.j2').write_text("{{ var is sameas false }}, {{ var }}")
yasha_cli('--var=False template.j2')
assert Path('template').read_text() == 'True, False'
def test_number(with_tmp_path):
Path('template.j2').write_text("{{ var is number }}, {{ var + 1 }}")
yasha_cli('--var=1 template.j2')
assert Path('template').read_text() == 'True, 2'
def test_list(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=['foo','bar','baz'] template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_tuple(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=('foo','bar','baz') template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_comma_separated_list(with_tmp_path):
Path('template.j2').write_text("{{ var is sequence }}, {{ var | join }}")
yasha_cli("--var=foo,bar,baz template.j2")
assert Path('template').read_text() == 'True, foobarbaz'
def test_dictionary(with_tmp_path):
Path('template.j2').write_text("{{ var is mapping }}, {% for k in 'abc' %}{{ var[k] }}{% endfor %}")
yasha_cli("--var={'a':1,'b':2,'c':3} template.j2")
assert Path('template').read_text() == 'True, 123'
def test_commas_in_quoted_string(with_tmp_path):
""" gh-57 """
Path('template.j2').write_text("{{ var is string }}, {{ var }}")
yasha_cli("""--var='"foo,bar,baz"' template.j2""")
assert Path('template').read_text() == 'True, foo,bar,baz'
def test_quoted_comma_in_comma_separated_list(with_tmp_path):
""" gh-57 """
Path('template.j2').write_text('{{ lst is sequence }}, {{ lst | join(".") }}')
yasha_cli("""--lst='"foo,bar",baz' template.j2""")
assert Path('template').read_text() == 'True, foo,bar.baz' | 0.482673 | 0.36659 |
import pathlib
from lingpy import tokens2class, prosodic_string
from lingpy.align.sca import get_consensus
from lingpy import basictypes as bt
def lingrex_path(*comps):
return str(pathlib.Path(__file__).parent.joinpath(*comps))
def add_structure(
wordlist, model="cv", segments="tokens", structure="structure", ref="cogid", gap="-"
):
"""Add structure to a wordlist to make sure correspondence patterns can be
inferred"""
if model not in ["cv", "c", "CcV", "ps", "nogap"]:
raise ValueError("[i] you need to select a valid model")
D = {}
if model == "cv":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(tokens2class(tks, "cv")).lower()
if model == "c":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = (
" ".join(tokens2class(tks, "cv"))
.lower()
.replace("v", "c")
.replace("t", "c")
)
if model == "nogap":
assert hasattr(wordlist, "msa")
for cogid, msa in wordlist.msa[ref].items():
cons = [
"c" if c != gap else gap
for c in get_consensus(msa["alignment"], gaps=True)
]
for idx, alm in zip(msa["ID"], msa["alignment"]):
struc = []
for a, b in zip(cons, alm):
if b != "-":
struc += [a]
D[idx] = " ".join(struc)
for idx, tks in wordlist.iter_rows(segments):
if idx not in D:
D[idx] = " ".join(["c" if c != "+" else c for c in tks])
if model == "CcV":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(
list(prosodic_string(tks, _output="CcV").replace("_", "+"))
)
if model == "ps":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(list(prosodic_string(tks)))
if hasattr(wordlist, "_mode") and wordlist._mode == "fuzzy":
struc_ = bt.lists
else:
struc_ = bt.strings
wordlist.add_entries(structure, D, lambda x: struc_(x)) | src/lingrex/util.py | import pathlib
from lingpy import tokens2class, prosodic_string
from lingpy.align.sca import get_consensus
from lingpy import basictypes as bt
def lingrex_path(*comps):
return str(pathlib.Path(__file__).parent.joinpath(*comps))
def add_structure(
wordlist, model="cv", segments="tokens", structure="structure", ref="cogid", gap="-"
):
"""Add structure to a wordlist to make sure correspondence patterns can be
inferred"""
if model not in ["cv", "c", "CcV", "ps", "nogap"]:
raise ValueError("[i] you need to select a valid model")
D = {}
if model == "cv":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(tokens2class(tks, "cv")).lower()
if model == "c":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = (
" ".join(tokens2class(tks, "cv"))
.lower()
.replace("v", "c")
.replace("t", "c")
)
if model == "nogap":
assert hasattr(wordlist, "msa")
for cogid, msa in wordlist.msa[ref].items():
cons = [
"c" if c != gap else gap
for c in get_consensus(msa["alignment"], gaps=True)
]
for idx, alm in zip(msa["ID"], msa["alignment"]):
struc = []
for a, b in zip(cons, alm):
if b != "-":
struc += [a]
D[idx] = " ".join(struc)
for idx, tks in wordlist.iter_rows(segments):
if idx not in D:
D[idx] = " ".join(["c" if c != "+" else c for c in tks])
if model == "CcV":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(
list(prosodic_string(tks, _output="CcV").replace("_", "+"))
)
if model == "ps":
for idx, tks in wordlist.iter_rows(segments):
D[idx] = " ".join(list(prosodic_string(tks)))
if hasattr(wordlist, "_mode") and wordlist._mode == "fuzzy":
struc_ = bt.lists
else:
struc_ = bt.strings
wordlist.add_entries(structure, D, lambda x: struc_(x)) | 0.293303 | 0.394376 |
"""Linear Gaussian State Space Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.linear_gaussian_ssm import _augment_sample_shape
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_cov_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_filter_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_mean_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import kalman_transition
from tensorflow_probability.python.distributions.linear_gaussian_ssm import KalmanFilterState
from tensorflow_probability.python.distributions.linear_gaussian_ssm import linear_gaussian_update
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
tfl = tf.linalg
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class IIDNormalTest(test.TestCase):
def setUp(self):
pass
def _build_iid_normal_model(self,
num_timesteps,
latent_size,
observation_size,
transition_variance,
obs_variance):
"""Build a model whose outputs are IID normal by construction."""
# Use orthogonal matrices to project a (potentially
# high-dimensional) latent space of IID normal variables into a
# low-dimensional observation that is still IID normal.
random_orthogonal_matrix = lambda: np.linalg.qr(
np.random.randn(latent_size, latent_size))[0][:observation_size, :]
obs_matrix = tf.convert_to_tensor(random_orthogonal_matrix(),
dtype=tf.float32)
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.zeros((latent_size, latent_size)),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
observation_matrix=obs_matrix,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(obs_variance)*tf.ones((observation_size))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
validate_args=True)
return model
def test_iid_normal_sample(self):
num_timesteps = 10
latent_size = 3
observation_size = 2
num_samples = 10000
for transition_variance_val in [.3, 100.]:
for obs_variance_val in [.6, 40.]:
iid_latents = self._build_iid_normal_model(
num_timesteps=num_timesteps,
latent_size=latent_size,
observation_size=observation_size,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample(num_samples)
x_val = self.evaluate(x)
result_shape = [num_timesteps, observation_size]
marginal_variance = transition_variance_val + obs_variance_val
stderr_mean = np.sqrt(num_samples * marginal_variance)
stderr_variance = marginal_variance * np.sqrt(2./(num_samples-1))
self.assertAllClose(np.mean(x_val, axis=0),
np.zeros(result_shape),
atol=5*stderr_mean)
self.assertAllClose(np.var(x_val, axis=0),
np.ones(result_shape) * marginal_variance,
rtol=5*stderr_variance)
def test_iid_normal_logprob(self):
# In the case where the latent states are iid normal (achieved by
# setting the transition matrix to zero, so there's no dependence
# between timesteps), and observations are also independent
# (achieved by using an orthogonal matrix as the observation model),
# we can verify log_prob as a simple iid Gaussian log density.
delta = 1e-4
for transition_variance_val in [1., 1e-8]:
for obs_variance_val in [1., 1e-8]:
iid_latents = self._build_iid_normal_model(
num_timesteps=10,
latent_size=4,
observation_size=2,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample([5, 3])
lp_kalman = iid_latents.log_prob(x)
marginal_variance = transition_variance_val + obs_variance_val
lp_iid = tf.reduce_sum(
tfd.Normal(0., tf.sqrt(marginal_variance)).log_prob(x),
axis=(-2, -1))
lp_kalman_val, lp_iid_val = self.evaluate((lp_kalman, lp_iid))
self.assertAllClose(lp_kalman_val,
lp_iid_val,
rtol=delta, atol=0.)
@test_util.run_all_in_graph_and_eager_modes
class BatchTest(test.TestCase):
"""Test that methods broadcast batch dimensions for each parameter."""
def setUp(self):
pass
def _build_random_model(self,
num_timesteps,
latent_size,
observation_size,
prior_batch_shape=None,
transition_matrix_batch_shape=None,
transition_noise_batch_shape=None,
observation_matrix_batch_shape=None,
observation_noise_batch_shape=None):
"""Builds a LGSSM with random normal ops of specified shape."""
prior_batch_shape = (
[] if prior_batch_shape is None else prior_batch_shape)
transition_matrix_batch_shape = ([] if transition_matrix_batch_shape is None
else transition_matrix_batch_shape)
transition_noise_batch_shape = ([] if transition_noise_batch_shape is None
else transition_noise_batch_shape)
observation_matrix_batch_shape = ([]
if observation_matrix_batch_shape is None
else observation_matrix_batch_shape)
observation_noise_batch_shape = ([] if observation_noise_batch_shape is None
else observation_noise_batch_shape)
return tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.random_normal(
transition_matrix_batch_shape + [latent_size, latent_size]),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
transition_noise_batch_shape + [latent_size]))),
observation_matrix=tf.random_normal(
observation_matrix_batch_shape + [observation_size, latent_size]),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
observation_noise_batch_shape + [observation_size]))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
prior_batch_shape + [latent_size]))),
validate_args=True)
def _sanity_check_shapes(self, model,
batch_shape,
event_shape,
sample_shape=(2, 1)):
# Lists can't be default arguments, but we'll want sample_shape to
# be a list so we can concatenate with other shapes passed as
# lists.
sample_shape = list(sample_shape)
self.assertEqual(model.event_shape.as_list(), event_shape)
self.assertEqual(model.batch_shape.as_list(), batch_shape)
y = model.sample(sample_shape)
self.assertEqual(y.shape.as_list(),
sample_shape + batch_shape + event_shape)
lp = model.log_prob(y)
self.assertEqual(lp.shape.as_list(), sample_shape + batch_shape)
# Try an argument with no batch shape to ensure we broadcast
# correctly.
unbatched_y = tf.random_normal(event_shape)
lp = model.log_prob(unbatched_y)
self.assertEqual(lp.shape.as_list(), batch_shape)
self.assertEqual(model.mean().shape.as_list(),
batch_shape + event_shape)
self.assertEqual(model.variance().shape.as_list(),
batch_shape + event_shape)
def test_constant_batch_shape(self):
"""Simple case where all components have the same batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape,
transition_matrix_batch_shape=batch_shape,
transition_noise_batch_shape=batch_shape,
observation_matrix_batch_shape=batch_shape,
observation_noise_batch_shape=batch_shape)
# check that we get the basic shapes right
self.assertEqual(model.latent_size, latent_size)
self.assertEqual(model.observation_size, observation_size)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_broadcast_batch_shape(self):
"""Broadcasting when only one component has batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
# Test batching only over the prior
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_batch_shape_error(self):
# build a dist where components have incompatible batch
# shapes. this should cause a problem somehow.
pass
class _KalmanStepsTest(object):
def setUp(self):
# Define a simple model with 2D latents and 1D observations.
self.transition_matrix = np.asarray([[1., .5], [-.2, .3]], dtype=np.float32)
self.get_transition_matrix_for_timestep = (
lambda t: tfl.LinearOperatorFullMatrix(self.transition_matrix))
self.bias = np.asarray([-4.3, .9], dtype=np.float32)
self.get_transition_noise_for_timestep = (
lambda t: tfd.MultivariateNormalDiag(self.bias, [1., 1.]))
self.get_observation_matrix_for_timestep = (
lambda t: tfl.LinearOperatorFullMatrix([[1., 1.]]))
self.observation_bias = np.asarray([-.9], dtype=np.float32)
self.get_observation_noise_for_timestep = (
lambda t: tfd.MultivariateNormalDiag(self.observation_bias, [1.]))
def testKalmanFilterStep(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, .1], [.2, .6]], dtype=np.float32)
x_observed = np.asarray([[4.]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
x_observed_tensor = self.build_tensor(x_observed)
filter_step = build_kalman_filter_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
initial_filter_state = KalmanFilterState(
filtered_mean=None,
filtered_cov=None,
predicted_mean=prev_mean_tensor,
predicted_cov=prev_cov_tensor,
observation_mean=None,
observation_cov=None,
log_marginal_likelihood=self.build_tensor(0.),
timestep=self.build_tensor(0))
filter_state = self.evaluate(
filter_step(initial_filter_state,
x_observed_tensor))
# Computed by running a believed-correct version of
# the code.
expected_filtered_mean = [[-0.104167], [2.295833]]
expected_filtered_cov = [[0.325, -0.075], [-0.033333, 0.366667]]
expected_predicted_mean = [[-3.25625], [1.609583]]
expected_predicted_cov = [[1.3625, -0.0125], [-0.029167, 1.0525]]
expected_observation_mean = [[-2.5]]
expected_observation_cov = [[2.4]]
expected_log_marginal_likelihood = -10.1587553024292
self.assertAllClose(filter_state.filtered_mean,
expected_filtered_mean)
self.assertAllClose(filter_state.filtered_cov,
expected_filtered_cov)
self.assertAllClose(filter_state.predicted_mean,
expected_predicted_mean)
self.assertAllClose(filter_state.predicted_cov,
expected_predicted_cov)
self.assertAllClose(filter_state.observation_mean,
expected_observation_mean)
self.assertAllClose(filter_state.observation_cov,
expected_observation_cov)
self.assertAllClose(filter_state.log_marginal_likelihood,
expected_log_marginal_likelihood)
self.assertAllClose(filter_state.timestep, 1)
def testKalmanTransition(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
predicted_mean, predicted_cov = kalman_transition(
prev_mean_tensor, prev_cov_tensor,
self.get_transition_matrix_for_timestep(0),
self.get_transition_noise_for_timestep(0))
self.assertAllClose(self.evaluate(predicted_mean),
np.dot(self.transition_matrix,
prev_mean) + self.bias[:, np.newaxis])
self.assertAllClose(self.evaluate(predicted_cov),
np.dot(self.transition_matrix,
np.dot(prev_cov,
self.transition_matrix.T)) + np.eye(2))
def testLinearGaussianObservation(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
x_observed = np.asarray([[4.]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
x_observed_tensor = self.build_tensor(x_observed)
observation_matrix = self.get_observation_matrix_for_timestep(0)
observation_noise = self.get_observation_noise_for_timestep(0)
(posterior_mean,
posterior_cov,
predictive_dist) = linear_gaussian_update(
prev_mean_tensor, prev_cov_tensor,
observation_matrix, observation_noise,
x_observed_tensor)
expected_posterior_mean = [[-1.025], [2.675]]
expected_posterior_cov = [[0.455, -0.305], [-0.305, 0.655]]
expected_predicted_mean = [-2.5]
expected_predicted_cov = [[2.]]
self.assertAllClose(self.evaluate(posterior_mean),
expected_posterior_mean)
self.assertAllClose(self.evaluate(posterior_cov),
expected_posterior_cov)
self.assertAllClose(self.evaluate(predictive_dist.mean()),
expected_predicted_mean)
self.assertAllClose(self.evaluate(predictive_dist.covariance()),
expected_predicted_cov)
def testMeanStep(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
mean_step = build_kalman_mean_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
new_mean, obs_mean = mean_step((prev_mean_tensor, None), t=0)
self.assertAllClose(self.evaluate(new_mean),
np.dot(self.transition_matrix,
prev_mean) + self.bias[:, np.newaxis])
self.assertAllClose(self.evaluate(obs_mean),
np.sum(self.evaluate(new_mean)) +
self.observation_bias[:, np.newaxis])
def testCovStep(self):
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
prev_cov_tensor = self.build_tensor(prev_cov)
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
new_cov, obs_cov = cov_step((prev_cov_tensor, None), t=0)
self.assertAllClose(self.evaluate(new_cov),
np.dot(self.transition_matrix,
np.dot(prev_cov,
self.transition_matrix.T)) + np.eye(2))
self.assertAllClose(self.evaluate(obs_cov),
[[np.sum(self.evaluate(new_cov)) + 1.]])
@test_util.run_all_in_graph_and_eager_modes
class KalmanStepsTestStatic(test.TestCase, _KalmanStepsTest):
def setUp(self):
return _KalmanStepsTest.setUp(self)
def build_tensor(self, tensor):
return tf.convert_to_tensor(tensor)
@test_util.run_all_in_graph_and_eager_modes
class KalmanStepsTestDynamic(test.TestCase, _KalmanStepsTest):
def setUp(self):
return _KalmanStepsTest.setUp(self)
def build_tensor(self, tensor):
return tf.placeholder_with_default(input=tf.convert_to_tensor(tensor),
shape=None)
class _AugmentSampleShapeTest(object):
def testAugmentsShape(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [2, 3])
sample_shape = _augment_sample_shape(dist, full_batch_shape,
validate_args=True)
self.assertAllEqual(self.maybe_evaluate(sample_shape), [5, 4])
def testSameShape(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [5, 4, 2, 3])
sample_shape = _augment_sample_shape(dist, full_batch_shape,
validate_args=True)
self.assertAllEqual(self.maybe_evaluate(sample_shape), [])
# We omit the eager-mode decorator for error handling checks,
# because eager mode throws dynamic errors statically which confuses
# the test harness.
def testNotPrefixThrowsError(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [1, 3])
with self.assertRaisesError("Broadcasting is not supported"):
self.maybe_evaluate(
_augment_sample_shape(dist, full_batch_shape,
validate_args=True))
def testTooManyDimsThrowsError(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [6, 5, 4, 2, 3])
with self.assertRaisesError(
"(Broadcasting is not supported|Cannot broadcast)"):
self.maybe_evaluate(
_augment_sample_shape(dist, full_batch_shape,
validate_args=True))
@test_util.run_all_in_graph_and_eager_modes
class AugmentSampleShapeTestStatic(test.TestCase, _AugmentSampleShapeTest):
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def build_inputs(self, full_batch_shape, partial_batch_shape):
full_batch_shape = np.asarray(full_batch_shape, dtype=np.int32)
dist = tfd.Normal(tf.random_normal(partial_batch_shape), 1.)
return full_batch_shape, dist
def maybe_evaluate(self, x):
return x
@test_util.run_all_in_graph_and_eager_modes
class AugmentSampleShapeTestDynamic(test.TestCase, _AugmentSampleShapeTest):
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def build_inputs(self, full_batch_shape, partial_batch_shape):
full_batch_shape = tf.placeholder_with_default(
input=np.asarray(full_batch_shape, dtype=np.int32),
shape=None)
partial_batch_shape = tf.placeholder_with_default(
input=np.asarray(partial_batch_shape, dtype=np.int32),
shape=None)
dist = tfd.Normal(tf.random_normal(partial_batch_shape), 1.)
return full_batch_shape, dist
def maybe_evaluate(self, x):
return self.evaluate(x)
if __name__ == "__main__":
test.main() | tensorflow_probability/python/distributions/linear_gaussian_ssm_test.py | """Linear Gaussian State Space Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.linear_gaussian_ssm import _augment_sample_shape
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_cov_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_filter_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import build_kalman_mean_step
from tensorflow_probability.python.distributions.linear_gaussian_ssm import kalman_transition
from tensorflow_probability.python.distributions.linear_gaussian_ssm import KalmanFilterState
from tensorflow_probability.python.distributions.linear_gaussian_ssm import linear_gaussian_update
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
tfl = tf.linalg
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class IIDNormalTest(test.TestCase):
def setUp(self):
pass
def _build_iid_normal_model(self,
num_timesteps,
latent_size,
observation_size,
transition_variance,
obs_variance):
"""Build a model whose outputs are IID normal by construction."""
# Use orthogonal matrices to project a (potentially
# high-dimensional) latent space of IID normal variables into a
# low-dimensional observation that is still IID normal.
random_orthogonal_matrix = lambda: np.linalg.qr(
np.random.randn(latent_size, latent_size))[0][:observation_size, :]
obs_matrix = tf.convert_to_tensor(random_orthogonal_matrix(),
dtype=tf.float32)
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.zeros((latent_size, latent_size)),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
observation_matrix=obs_matrix,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(obs_variance)*tf.ones((observation_size))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(transition_variance)*tf.ones((latent_size))),
validate_args=True)
return model
def test_iid_normal_sample(self):
num_timesteps = 10
latent_size = 3
observation_size = 2
num_samples = 10000
for transition_variance_val in [.3, 100.]:
for obs_variance_val in [.6, 40.]:
iid_latents = self._build_iid_normal_model(
num_timesteps=num_timesteps,
latent_size=latent_size,
observation_size=observation_size,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample(num_samples)
x_val = self.evaluate(x)
result_shape = [num_timesteps, observation_size]
marginal_variance = transition_variance_val + obs_variance_val
stderr_mean = np.sqrt(num_samples * marginal_variance)
stderr_variance = marginal_variance * np.sqrt(2./(num_samples-1))
self.assertAllClose(np.mean(x_val, axis=0),
np.zeros(result_shape),
atol=5*stderr_mean)
self.assertAllClose(np.var(x_val, axis=0),
np.ones(result_shape) * marginal_variance,
rtol=5*stderr_variance)
def test_iid_normal_logprob(self):
# In the case where the latent states are iid normal (achieved by
# setting the transition matrix to zero, so there's no dependence
# between timesteps), and observations are also independent
# (achieved by using an orthogonal matrix as the observation model),
# we can verify log_prob as a simple iid Gaussian log density.
delta = 1e-4
for transition_variance_val in [1., 1e-8]:
for obs_variance_val in [1., 1e-8]:
iid_latents = self._build_iid_normal_model(
num_timesteps=10,
latent_size=4,
observation_size=2,
transition_variance=transition_variance_val,
obs_variance=obs_variance_val)
x = iid_latents.sample([5, 3])
lp_kalman = iid_latents.log_prob(x)
marginal_variance = transition_variance_val + obs_variance_val
lp_iid = tf.reduce_sum(
tfd.Normal(0., tf.sqrt(marginal_variance)).log_prob(x),
axis=(-2, -1))
lp_kalman_val, lp_iid_val = self.evaluate((lp_kalman, lp_iid))
self.assertAllClose(lp_kalman_val,
lp_iid_val,
rtol=delta, atol=0.)
@test_util.run_all_in_graph_and_eager_modes
class BatchTest(test.TestCase):
"""Test that methods broadcast batch dimensions for each parameter."""
def setUp(self):
pass
def _build_random_model(self,
num_timesteps,
latent_size,
observation_size,
prior_batch_shape=None,
transition_matrix_batch_shape=None,
transition_noise_batch_shape=None,
observation_matrix_batch_shape=None,
observation_noise_batch_shape=None):
"""Builds a LGSSM with random normal ops of specified shape."""
prior_batch_shape = (
[] if prior_batch_shape is None else prior_batch_shape)
transition_matrix_batch_shape = ([] if transition_matrix_batch_shape is None
else transition_matrix_batch_shape)
transition_noise_batch_shape = ([] if transition_noise_batch_shape is None
else transition_noise_batch_shape)
observation_matrix_batch_shape = ([]
if observation_matrix_batch_shape is None
else observation_matrix_batch_shape)
observation_noise_batch_shape = ([] if observation_noise_batch_shape is None
else observation_noise_batch_shape)
return tfd.LinearGaussianStateSpaceModel(
num_timesteps=num_timesteps,
transition_matrix=tf.random_normal(
transition_matrix_batch_shape + [latent_size, latent_size]),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
transition_noise_batch_shape + [latent_size]))),
observation_matrix=tf.random_normal(
observation_matrix_batch_shape + [observation_size, latent_size]),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
observation_noise_batch_shape + [observation_size]))),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.nn.softplus(tf.random_normal(
prior_batch_shape + [latent_size]))),
validate_args=True)
def _sanity_check_shapes(self, model,
batch_shape,
event_shape,
sample_shape=(2, 1)):
# Lists can't be default arguments, but we'll want sample_shape to
# be a list so we can concatenate with other shapes passed as
# lists.
sample_shape = list(sample_shape)
self.assertEqual(model.event_shape.as_list(), event_shape)
self.assertEqual(model.batch_shape.as_list(), batch_shape)
y = model.sample(sample_shape)
self.assertEqual(y.shape.as_list(),
sample_shape + batch_shape + event_shape)
lp = model.log_prob(y)
self.assertEqual(lp.shape.as_list(), sample_shape + batch_shape)
# Try an argument with no batch shape to ensure we broadcast
# correctly.
unbatched_y = tf.random_normal(event_shape)
lp = model.log_prob(unbatched_y)
self.assertEqual(lp.shape.as_list(), batch_shape)
self.assertEqual(model.mean().shape.as_list(),
batch_shape + event_shape)
self.assertEqual(model.variance().shape.as_list(),
batch_shape + event_shape)
def test_constant_batch_shape(self):
"""Simple case where all components have the same batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape,
transition_matrix_batch_shape=batch_shape,
transition_noise_batch_shape=batch_shape,
observation_matrix_batch_shape=batch_shape,
observation_noise_batch_shape=batch_shape)
# check that we get the basic shapes right
self.assertEqual(model.latent_size, latent_size)
self.assertEqual(model.observation_size, observation_size)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_broadcast_batch_shape(self):
"""Broadcasting when only one component has batch shape."""
num_timesteps = 5
latent_size = 3
observation_size = 2
batch_shape = [3, 4]
event_shape = [num_timesteps, observation_size]
# Test batching only over the prior
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
prior_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the transition noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
transition_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation op
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_matrix_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
# Test batching only over the observation noise
model = self._build_random_model(num_timesteps,
latent_size,
observation_size,
observation_noise_batch_shape=batch_shape)
self._sanity_check_shapes(model, batch_shape, event_shape)
def test_batch_shape_error(self):
# build a dist where components have incompatible batch
# shapes. this should cause a problem somehow.
pass
class _KalmanStepsTest(object):
def setUp(self):
# Define a simple model with 2D latents and 1D observations.
self.transition_matrix = np.asarray([[1., .5], [-.2, .3]], dtype=np.float32)
self.get_transition_matrix_for_timestep = (
lambda t: tfl.LinearOperatorFullMatrix(self.transition_matrix))
self.bias = np.asarray([-4.3, .9], dtype=np.float32)
self.get_transition_noise_for_timestep = (
lambda t: tfd.MultivariateNormalDiag(self.bias, [1., 1.]))
self.get_observation_matrix_for_timestep = (
lambda t: tfl.LinearOperatorFullMatrix([[1., 1.]]))
self.observation_bias = np.asarray([-.9], dtype=np.float32)
self.get_observation_noise_for_timestep = (
lambda t: tfd.MultivariateNormalDiag(self.observation_bias, [1.]))
def testKalmanFilterStep(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, .1], [.2, .6]], dtype=np.float32)
x_observed = np.asarray([[4.]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
x_observed_tensor = self.build_tensor(x_observed)
filter_step = build_kalman_filter_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
initial_filter_state = KalmanFilterState(
filtered_mean=None,
filtered_cov=None,
predicted_mean=prev_mean_tensor,
predicted_cov=prev_cov_tensor,
observation_mean=None,
observation_cov=None,
log_marginal_likelihood=self.build_tensor(0.),
timestep=self.build_tensor(0))
filter_state = self.evaluate(
filter_step(initial_filter_state,
x_observed_tensor))
# Computed by running a believed-correct version of
# the code.
expected_filtered_mean = [[-0.104167], [2.295833]]
expected_filtered_cov = [[0.325, -0.075], [-0.033333, 0.366667]]
expected_predicted_mean = [[-3.25625], [1.609583]]
expected_predicted_cov = [[1.3625, -0.0125], [-0.029167, 1.0525]]
expected_observation_mean = [[-2.5]]
expected_observation_cov = [[2.4]]
expected_log_marginal_likelihood = -10.1587553024292
self.assertAllClose(filter_state.filtered_mean,
expected_filtered_mean)
self.assertAllClose(filter_state.filtered_cov,
expected_filtered_cov)
self.assertAllClose(filter_state.predicted_mean,
expected_predicted_mean)
self.assertAllClose(filter_state.predicted_cov,
expected_predicted_cov)
self.assertAllClose(filter_state.observation_mean,
expected_observation_mean)
self.assertAllClose(filter_state.observation_cov,
expected_observation_cov)
self.assertAllClose(filter_state.log_marginal_likelihood,
expected_log_marginal_likelihood)
self.assertAllClose(filter_state.timestep, 1)
def testKalmanTransition(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
predicted_mean, predicted_cov = kalman_transition(
prev_mean_tensor, prev_cov_tensor,
self.get_transition_matrix_for_timestep(0),
self.get_transition_noise_for_timestep(0))
self.assertAllClose(self.evaluate(predicted_mean),
np.dot(self.transition_matrix,
prev_mean) + self.bias[:, np.newaxis])
self.assertAllClose(self.evaluate(predicted_cov),
np.dot(self.transition_matrix,
np.dot(prev_cov,
self.transition_matrix.T)) + np.eye(2))
def testLinearGaussianObservation(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
x_observed = np.asarray([[4.]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
prev_cov_tensor = self.build_tensor(prev_cov)
x_observed_tensor = self.build_tensor(x_observed)
observation_matrix = self.get_observation_matrix_for_timestep(0)
observation_noise = self.get_observation_noise_for_timestep(0)
(posterior_mean,
posterior_cov,
predictive_dist) = linear_gaussian_update(
prev_mean_tensor, prev_cov_tensor,
observation_matrix, observation_noise,
x_observed_tensor)
expected_posterior_mean = [[-1.025], [2.675]]
expected_posterior_cov = [[0.455, -0.305], [-0.305, 0.655]]
expected_predicted_mean = [-2.5]
expected_predicted_cov = [[2.]]
self.assertAllClose(self.evaluate(posterior_mean),
expected_posterior_mean)
self.assertAllClose(self.evaluate(posterior_cov),
expected_posterior_cov)
self.assertAllClose(self.evaluate(predictive_dist.mean()),
expected_predicted_mean)
self.assertAllClose(self.evaluate(predictive_dist.covariance()),
expected_predicted_cov)
def testMeanStep(self):
prev_mean = np.asarray([[-2], [.4]], dtype=np.float32)
prev_mean_tensor = self.build_tensor(prev_mean)
mean_step = build_kalman_mean_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
new_mean, obs_mean = mean_step((prev_mean_tensor, None), t=0)
self.assertAllClose(self.evaluate(new_mean),
np.dot(self.transition_matrix,
prev_mean) + self.bias[:, np.newaxis])
self.assertAllClose(self.evaluate(obs_mean),
np.sum(self.evaluate(new_mean)) +
self.observation_bias[:, np.newaxis])
def testCovStep(self):
prev_cov = np.asarray([[.5, -.2], [-.2, .9]], dtype=np.float32)
prev_cov_tensor = self.build_tensor(prev_cov)
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
new_cov, obs_cov = cov_step((prev_cov_tensor, None), t=0)
self.assertAllClose(self.evaluate(new_cov),
np.dot(self.transition_matrix,
np.dot(prev_cov,
self.transition_matrix.T)) + np.eye(2))
self.assertAllClose(self.evaluate(obs_cov),
[[np.sum(self.evaluate(new_cov)) + 1.]])
@test_util.run_all_in_graph_and_eager_modes
class KalmanStepsTestStatic(test.TestCase, _KalmanStepsTest):
def setUp(self):
return _KalmanStepsTest.setUp(self)
def build_tensor(self, tensor):
return tf.convert_to_tensor(tensor)
@test_util.run_all_in_graph_and_eager_modes
class KalmanStepsTestDynamic(test.TestCase, _KalmanStepsTest):
def setUp(self):
return _KalmanStepsTest.setUp(self)
def build_tensor(self, tensor):
return tf.placeholder_with_default(input=tf.convert_to_tensor(tensor),
shape=None)
class _AugmentSampleShapeTest(object):
def testAugmentsShape(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [2, 3])
sample_shape = _augment_sample_shape(dist, full_batch_shape,
validate_args=True)
self.assertAllEqual(self.maybe_evaluate(sample_shape), [5, 4])
def testSameShape(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [5, 4, 2, 3])
sample_shape = _augment_sample_shape(dist, full_batch_shape,
validate_args=True)
self.assertAllEqual(self.maybe_evaluate(sample_shape), [])
# We omit the eager-mode decorator for error handling checks,
# because eager mode throws dynamic errors statically which confuses
# the test harness.
def testNotPrefixThrowsError(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [1, 3])
with self.assertRaisesError("Broadcasting is not supported"):
self.maybe_evaluate(
_augment_sample_shape(dist, full_batch_shape,
validate_args=True))
def testTooManyDimsThrowsError(self):
full_batch_shape, dist = self.build_inputs([5, 4, 2, 3], [6, 5, 4, 2, 3])
with self.assertRaisesError(
"(Broadcasting is not supported|Cannot broadcast)"):
self.maybe_evaluate(
_augment_sample_shape(dist, full_batch_shape,
validate_args=True))
@test_util.run_all_in_graph_and_eager_modes
class AugmentSampleShapeTestStatic(test.TestCase, _AugmentSampleShapeTest):
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def build_inputs(self, full_batch_shape, partial_batch_shape):
full_batch_shape = np.asarray(full_batch_shape, dtype=np.int32)
dist = tfd.Normal(tf.random_normal(partial_batch_shape), 1.)
return full_batch_shape, dist
def maybe_evaluate(self, x):
return x
@test_util.run_all_in_graph_and_eager_modes
class AugmentSampleShapeTestDynamic(test.TestCase, _AugmentSampleShapeTest):
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def build_inputs(self, full_batch_shape, partial_batch_shape):
full_batch_shape = tf.placeholder_with_default(
input=np.asarray(full_batch_shape, dtype=np.int32),
shape=None)
partial_batch_shape = tf.placeholder_with_default(
input=np.asarray(partial_batch_shape, dtype=np.int32),
shape=None)
dist = tfd.Normal(tf.random_normal(partial_batch_shape), 1.)
return full_batch_shape, dist
def maybe_evaluate(self, x):
return self.evaluate(x)
if __name__ == "__main__":
test.main() | 0.926058 | 0.571348 |
import math
from typing import Tuple
import numpy as np
from examples.PolicyGradient.TestRigs.Interface.RewardFunction1D import RewardFunction1D
"""
This Reward Function has two local maxima and one global maxima. This is modelled as 2.5 cycles
of a sinusoidal curve with the 2nd (central) peek weighed as give it a larger magnitude than the
peeks either side. The function is symmetrical around the turning point of the central peek.
So the RL Agent should be able to find and maximise to move the agent to the central peek even
when starting at the state space extremity and having to pass through the local maxima.
If x is a value in radians and state space is in range (0 to m)then the reward function is:
sin(x)* EXP(1-ABS((x-(m/2))/(m/2)))
"""
class LocalMaximaRewardFunction1D(RewardFunction1D):
__state_min = int(0)
__state_max = int(60)
__state_step = int(1)
__center_state = int(__state_max / 2.0)
__x_min = float(0)
__x_max = float(2.5 * (2 * math.pi))
__x_step = float(15.0 * (math.pi / 180.0)) # 15 degree steps as radians
def __init__(self):
"""
Start in a default reset state.
"""
self.state = None
self.num_steps = int((self.__state_max - self.__state_min) / self.__state_step)
self.done_state = int(self.num_steps / 2)
self.reset()
return
def reset(self) -> np.array:
"""
Reset state to a random step between state space min and max
:return: The state after reset was performed.
"""
self.state = self.__state_step * np.random.randint(self.num_steps)
return np.array([self.state])
def reward(self,
state: float) -> float:
"""
Compute the reward for the given state;
If x is a value in radians and state space is in range (0 to m)then the reward function is:
sin(x)* EXP(1-ABS((x-(m/2))/(m/2)))
:param state:
:return: Reward for given state
"""
x = state * self.__x_step
return math.sin(x) * math.exp(1 - math.fabs((x - (self.__x_max / 2.0)) / (self.__x_max / 2.0)))
def step(self,
actn: int) -> Tuple[np.array, float, bool]:
"""
Take the specified action
:param actn: the action to take
:return: The new state, the reward for the state transition and bool, which is true if episode ended
"""
if actn == 0:
self.state += self.__state_step
elif actn == 1:
self.state -= self.__state_step
else:
raise RuntimeError("Action can only be value 0 or 1 so [" + str(actn) + "] is illegal")
dn = (self.state < self.__state_min or self.state > self.__state_max or self.state == self.done_state)
return np.array([self.state]), self.reward(self.state), dn
@classmethod
def state_space_dimension(cls) -> int:
"""
The dimensions of the state space
:return: Always 1 as this is for 1D reward functions.
"""
return super(LocalMaximaRewardFunction1D, cls).state_space_dimension()
def state_shape(self) -> Tuple[int, int]:
"""
What are the dimensions (Shape) of the state space
:return: Tuple describing the shape
"""
return super(LocalMaximaRewardFunction1D, self).state_shape()
@classmethod
def num_actions(cls) -> int:
"""
The number of actions
:return: Always 2 as this is a 1D state space so only 2 directions of state space traversal.
"""
return super(LocalMaximaRewardFunction1D, cls).num_actions()
def state_min(self):
"""
What is the minimum value of 1D state space
:return: Minimum value of 1D state space
"""
return self.__state_min
def state_max(self):
"""
What is the maximum value of 1D state space
:return: Maximum value of 1D state space
"""
return self.__state_max
def state_step(self):
"""
What is the discrete step increment used to traverse state space (by actions)
:return: The discrete step increment used to traverse state space (by actions)
"""
return self.__state_step | examples/PolicyGradient/TestRigs/RewardFunctions/LocalMaximaRewardFunction1D.py | import math
from typing import Tuple
import numpy as np
from examples.PolicyGradient.TestRigs.Interface.RewardFunction1D import RewardFunction1D
"""
This Reward Function has two local maxima and one global maxima. This is modelled as 2.5 cycles
of a sinusoidal curve with the 2nd (central) peek weighed as give it a larger magnitude than the
peeks either side. The function is symmetrical around the turning point of the central peek.
So the RL Agent should be able to find and maximise to move the agent to the central peek even
when starting at the state space extremity and having to pass through the local maxima.
If x is a value in radians and state space is in range (0 to m)then the reward function is:
sin(x)* EXP(1-ABS((x-(m/2))/(m/2)))
"""
class LocalMaximaRewardFunction1D(RewardFunction1D):
__state_min = int(0)
__state_max = int(60)
__state_step = int(1)
__center_state = int(__state_max / 2.0)
__x_min = float(0)
__x_max = float(2.5 * (2 * math.pi))
__x_step = float(15.0 * (math.pi / 180.0)) # 15 degree steps as radians
def __init__(self):
"""
Start in a default reset state.
"""
self.state = None
self.num_steps = int((self.__state_max - self.__state_min) / self.__state_step)
self.done_state = int(self.num_steps / 2)
self.reset()
return
def reset(self) -> np.array:
"""
Reset state to a random step between state space min and max
:return: The state after reset was performed.
"""
self.state = self.__state_step * np.random.randint(self.num_steps)
return np.array([self.state])
def reward(self,
state: float) -> float:
"""
Compute the reward for the given state;
If x is a value in radians and state space is in range (0 to m)then the reward function is:
sin(x)* EXP(1-ABS((x-(m/2))/(m/2)))
:param state:
:return: Reward for given state
"""
x = state * self.__x_step
return math.sin(x) * math.exp(1 - math.fabs((x - (self.__x_max / 2.0)) / (self.__x_max / 2.0)))
def step(self,
actn: int) -> Tuple[np.array, float, bool]:
"""
Take the specified action
:param actn: the action to take
:return: The new state, the reward for the state transition and bool, which is true if episode ended
"""
if actn == 0:
self.state += self.__state_step
elif actn == 1:
self.state -= self.__state_step
else:
raise RuntimeError("Action can only be value 0 or 1 so [" + str(actn) + "] is illegal")
dn = (self.state < self.__state_min or self.state > self.__state_max or self.state == self.done_state)
return np.array([self.state]), self.reward(self.state), dn
@classmethod
def state_space_dimension(cls) -> int:
"""
The dimensions of the state space
:return: Always 1 as this is for 1D reward functions.
"""
return super(LocalMaximaRewardFunction1D, cls).state_space_dimension()
def state_shape(self) -> Tuple[int, int]:
"""
What are the dimensions (Shape) of the state space
:return: Tuple describing the shape
"""
return super(LocalMaximaRewardFunction1D, self).state_shape()
@classmethod
def num_actions(cls) -> int:
"""
The number of actions
:return: Always 2 as this is a 1D state space so only 2 directions of state space traversal.
"""
return super(LocalMaximaRewardFunction1D, cls).num_actions()
def state_min(self):
"""
What is the minimum value of 1D state space
:return: Minimum value of 1D state space
"""
return self.__state_min
def state_max(self):
"""
What is the maximum value of 1D state space
:return: Maximum value of 1D state space
"""
return self.__state_max
def state_step(self):
"""
What is the discrete step increment used to traverse state space (by actions)
:return: The discrete step increment used to traverse state space (by actions)
"""
return self.__state_step | 0.920191 | 0.791942 |
from numpy import all, abs
from overloads import hstack
from FDmisc import FuncDesignerException
def d(arg, v, **kw):#, *args, **kw):
N = len(v)
# if len(args) == 1:
# derivativeSide = args[0]
# assert derivativeSide in ('left', 'right', 'both')
# else:
# derivativeSide = 'both'
stencil = kw.get('stencil', 3)
if stencil not in (2, 3):
raise FuncDesignerException('for d1 only stencil = 2 and 3 are implemented')
timestep = v[1]-v[0]
if not all(abs(v[1:] - v [:-1] - timestep) < 1e-10):
raise FuncDesignerException('unimplemented for non-uniform step yet')
if stencil == 2:
r1 = -3*arg[0] + 4*arg[1] - arg[2]
r2 = (arg[2:N] - arg[0:N-2]) / 2.0
r3 = 3*arg[N-1] - 4*arg[N-2] + arg[N-3]
return hstack((r1, r2, r3)) / timestep
elif stencil == 3:
r1 = -22 * arg[0] + 36 * arg[1] - 18 * arg[2] + 4 * arg[3]
r2 = -22 * arg[1] + 36 * arg[2] - 18 * arg[3] + 4 * arg[4] # TODO: mb rework it?
r3 = arg[0:N-4] -8*arg[1:N-3] + 8*arg[3:N-1] - arg[4:N]
r4 = 22 * arg[N-5] - 36 * arg[N-4] + 18 * arg[N-3] - 4 * arg[N-2] # TODO: mb rework it?
r5 = 22 * arg[N-4] - 36 * arg[N-3] + 18 * arg[N-2] - 4 * arg[N-1]
return hstack((r1, r2, r3, r4, r5)) / (12*timestep)
# if derivativeSide == 'both':
# r = hstack((r1, r2, r3))
# elif derivativeSide == 'left':
# r = hstack((r1, r2))
# else: # derivativeSide == 'right'
# r = hstack((r2, r3))
return r
def d2(arg, v, **kw):#, *args, **kw):
N = len(v)
timestep = v[1]-v[0]
if not all(abs(v[1:] - v [:-1] - timestep) < 1e-10):
raise FuncDesignerException('unimplemented for non-uniform step yet')
stencil = kw.get('stencil', 1)
if stencil not in (1, ):
raise FuncDesignerException('for d2 only stencil = 1 is implemented')
if stencil == 1:
r1 = arg[0] - 2*arg[1] + arg[2]
r2 = arg[0:N-2] - 2 * arg[1:N-1] + arg[2:N]
r3 = arg[N-1] - 2*arg[N-2] + arg[N-3]
return hstack((r1, r2, r3)) / timestep**2 | lib/python2.7/site-packages/FuncDesigner/stencils.py | from numpy import all, abs
from overloads import hstack
from FDmisc import FuncDesignerException
def d(arg, v, **kw):#, *args, **kw):
N = len(v)
# if len(args) == 1:
# derivativeSide = args[0]
# assert derivativeSide in ('left', 'right', 'both')
# else:
# derivativeSide = 'both'
stencil = kw.get('stencil', 3)
if stencil not in (2, 3):
raise FuncDesignerException('for d1 only stencil = 2 and 3 are implemented')
timestep = v[1]-v[0]
if not all(abs(v[1:] - v [:-1] - timestep) < 1e-10):
raise FuncDesignerException('unimplemented for non-uniform step yet')
if stencil == 2:
r1 = -3*arg[0] + 4*arg[1] - arg[2]
r2 = (arg[2:N] - arg[0:N-2]) / 2.0
r3 = 3*arg[N-1] - 4*arg[N-2] + arg[N-3]
return hstack((r1, r2, r3)) / timestep
elif stencil == 3:
r1 = -22 * arg[0] + 36 * arg[1] - 18 * arg[2] + 4 * arg[3]
r2 = -22 * arg[1] + 36 * arg[2] - 18 * arg[3] + 4 * arg[4] # TODO: mb rework it?
r3 = arg[0:N-4] -8*arg[1:N-3] + 8*arg[3:N-1] - arg[4:N]
r4 = 22 * arg[N-5] - 36 * arg[N-4] + 18 * arg[N-3] - 4 * arg[N-2] # TODO: mb rework it?
r5 = 22 * arg[N-4] - 36 * arg[N-3] + 18 * arg[N-2] - 4 * arg[N-1]
return hstack((r1, r2, r3, r4, r5)) / (12*timestep)
# if derivativeSide == 'both':
# r = hstack((r1, r2, r3))
# elif derivativeSide == 'left':
# r = hstack((r1, r2))
# else: # derivativeSide == 'right'
# r = hstack((r2, r3))
return r
def d2(arg, v, **kw):#, *args, **kw):
N = len(v)
timestep = v[1]-v[0]
if not all(abs(v[1:] - v [:-1] - timestep) < 1e-10):
raise FuncDesignerException('unimplemented for non-uniform step yet')
stencil = kw.get('stencil', 1)
if stencil not in (1, ):
raise FuncDesignerException('for d2 only stencil = 1 is implemented')
if stencil == 1:
r1 = arg[0] - 2*arg[1] + arg[2]
r2 = arg[0:N-2] - 2 * arg[1:N-1] + arg[2:N]
r3 = arg[N-1] - 2*arg[N-2] + arg[N-3]
return hstack((r1, r2, r3)) / timestep**2 | 0.186206 | 0.332744 |
__version__ = "3.7.4.post11"
from typing import Tuple
from . import hdrs as hdrs
from .client import (
BaseConnector as BaseConnector,
ClientConnectionError as ClientConnectionError,
ClientConnectorCertificateError as ClientConnectorCertificateError,
ClientConnectorError as ClientConnectorError,
ClientConnectorSSLError as ClientConnectorSSLError,
ClientError as ClientError,
ClientHttpProxyError as ClientHttpProxyError,
ClientOSError as ClientOSError,
ClientPayloadError as ClientPayloadError,
ClientProxyConnectionError as ClientProxyConnectionError,
ClientRequest as ClientRequest,
ClientResponse as ClientResponse,
ClientResponseError as ClientResponseError,
ClientSession as ClientSession,
ClientSSLError as ClientSSLError,
ClientTimeout as ClientTimeout,
ClientWebSocketResponse as ClientWebSocketResponse,
ContentTypeError as ContentTypeError,
Fingerprint as Fingerprint,
InvalidURL as InvalidURL,
NamedPipeConnector as NamedPipeConnector,
RequestInfo as RequestInfo,
ServerConnectionError as ServerConnectionError,
ServerDisconnectedError as ServerDisconnectedError,
ServerFingerprintMismatch as ServerFingerprintMismatch,
ServerTimeoutError as ServerTimeoutError,
TCPConnector as TCPConnector,
TooManyRedirects as TooManyRedirects,
UnixConnector as UnixConnector,
WSServerHandshakeError as WSServerHandshakeError,
request as request,
)
from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar
from .formdata import FormData as FormData
from .helpers import BasicAuth as BasicAuth, ChainMapProxy as ChainMapProxy
from .http import (
HttpVersion as HttpVersion,
HttpVersion10 as HttpVersion10,
HttpVersion11 as HttpVersion11,
WebSocketError as WebSocketError,
WSCloseCode as WSCloseCode,
WSMessage as WSMessage,
WSMsgType as WSMsgType,
)
from .multipart import (
BadContentDispositionHeader as BadContentDispositionHeader,
BadContentDispositionParam as BadContentDispositionParam,
BodyPartReader as BodyPartReader,
MultipartReader as MultipartReader,
MultipartWriter as MultipartWriter,
content_disposition_filename as content_disposition_filename,
parse_content_disposition as parse_content_disposition,
)
from .payload import (
PAYLOAD_REGISTRY as PAYLOAD_REGISTRY,
AsyncIterablePayload as AsyncIterablePayload,
BufferedReaderPayload as BufferedReaderPayload,
BytesIOPayload as BytesIOPayload,
BytesPayload as BytesPayload,
IOBasePayload as IOBasePayload,
JsonPayload as JsonPayload,
Payload as Payload,
StringIOPayload as StringIOPayload,
StringPayload as StringPayload,
TextIOPayload as TextIOPayload,
get_payload as get_payload,
payload_type as payload_type,
)
from .payload_streamer import streamer as streamer
from .resolver import (
AsyncResolver as AsyncResolver,
DefaultResolver as DefaultResolver,
ThreadedResolver as ThreadedResolver,
)
from .signals import Signal as Signal
from .streams import (
EMPTY_PAYLOAD as EMPTY_PAYLOAD,
DataQueue as DataQueue,
EofStream as EofStream,
FlowControlDataQueue as FlowControlDataQueue,
StreamReader as StreamReader,
)
from .tracing import (
TraceConfig as TraceConfig,
TraceConnectionCreateEndParams as TraceConnectionCreateEndParams,
TraceConnectionCreateStartParams as TraceConnectionCreateStartParams,
TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams,
TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams,
TraceConnectionReuseconnParams as TraceConnectionReuseconnParams,
TraceDnsCacheHitParams as TraceDnsCacheHitParams,
TraceDnsCacheMissParams as TraceDnsCacheMissParams,
TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams,
TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams,
TraceRequestChunkSentParams as TraceRequestChunkSentParams,
TraceRequestEndParams as TraceRequestEndParams,
TraceRequestExceptionParams as TraceRequestExceptionParams,
TraceRequestRedirectParams as TraceRequestRedirectParams,
TraceRequestStartParams as TraceRequestStartParams,
TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams,
)
__all__: Tuple[str, ...] = (
"hdrs",
# client
"BaseConnector",
"ClientConnectionError",
"ClientConnectorCertificateError",
"ClientConnectorError",
"ClientConnectorSSLError",
"ClientError",
"ClientHttpProxyError",
"ClientOSError",
"ClientPayloadError",
"ClientProxyConnectionError",
"ClientResponse",
"ClientRequest",
"ClientResponseError",
"ClientSSLError",
"ClientSession",
"ClientTimeout",
"ClientWebSocketResponse",
"ContentTypeError",
"Fingerprint",
"InvalidURL",
"RequestInfo",
"ServerConnectionError",
"ServerDisconnectedError",
"ServerFingerprintMismatch",
"ServerTimeoutError",
"TCPConnector",
"TooManyRedirects",
"UnixConnector",
"NamedPipeConnector",
"WSServerHandshakeError",
"request",
# cookiejar
"CookieJar",
"DummyCookieJar",
# formdata
"FormData",
# helpers
"BasicAuth",
"ChainMapProxy",
# http
"HttpVersion",
"HttpVersion10",
"HttpVersion11",
"WSMsgType",
"WSCloseCode",
"WSMessage",
"WebSocketError",
# multipart
"BadContentDispositionHeader",
"BadContentDispositionParam",
"BodyPartReader",
"MultipartReader",
"MultipartWriter",
"content_disposition_filename",
"parse_content_disposition",
# payload
"AsyncIterablePayload",
"BufferedReaderPayload",
"BytesIOPayload",
"BytesPayload",
"IOBasePayload",
"JsonPayload",
"PAYLOAD_REGISTRY",
"Payload",
"StringIOPayload",
"StringPayload",
"TextIOPayload",
"get_payload",
"payload_type",
# payload_streamer
"streamer",
# resolver
"AsyncResolver",
"DefaultResolver",
"ThreadedResolver",
# signals
"Signal",
"DataQueue",
"EMPTY_PAYLOAD",
"EofStream",
"FlowControlDataQueue",
"StreamReader",
# tracing
"TraceConfig",
"TraceConnectionCreateEndParams",
"TraceConnectionCreateStartParams",
"TraceConnectionQueuedEndParams",
"TraceConnectionQueuedStartParams",
"TraceConnectionReuseconnParams",
"TraceDnsCacheHitParams",
"TraceDnsCacheMissParams",
"TraceDnsResolveHostEndParams",
"TraceDnsResolveHostStartParams",
"TraceRequestChunkSentParams",
"TraceRequestEndParams",
"TraceRequestExceptionParams",
"TraceRequestRedirectParams",
"TraceRequestStartParams",
"TraceResponseChunkReceivedParams",
)
try:
from .worker import GunicornUVLoopWebWorker, GunicornWebWorker
__all__ += ("GunicornWebWorker", "GunicornUVLoopWebWorker")
except ImportError: # pragma: no cover
pass | aiohttp/__init__.py | __version__ = "3.7.4.post11"
from typing import Tuple
from . import hdrs as hdrs
from .client import (
BaseConnector as BaseConnector,
ClientConnectionError as ClientConnectionError,
ClientConnectorCertificateError as ClientConnectorCertificateError,
ClientConnectorError as ClientConnectorError,
ClientConnectorSSLError as ClientConnectorSSLError,
ClientError as ClientError,
ClientHttpProxyError as ClientHttpProxyError,
ClientOSError as ClientOSError,
ClientPayloadError as ClientPayloadError,
ClientProxyConnectionError as ClientProxyConnectionError,
ClientRequest as ClientRequest,
ClientResponse as ClientResponse,
ClientResponseError as ClientResponseError,
ClientSession as ClientSession,
ClientSSLError as ClientSSLError,
ClientTimeout as ClientTimeout,
ClientWebSocketResponse as ClientWebSocketResponse,
ContentTypeError as ContentTypeError,
Fingerprint as Fingerprint,
InvalidURL as InvalidURL,
NamedPipeConnector as NamedPipeConnector,
RequestInfo as RequestInfo,
ServerConnectionError as ServerConnectionError,
ServerDisconnectedError as ServerDisconnectedError,
ServerFingerprintMismatch as ServerFingerprintMismatch,
ServerTimeoutError as ServerTimeoutError,
TCPConnector as TCPConnector,
TooManyRedirects as TooManyRedirects,
UnixConnector as UnixConnector,
WSServerHandshakeError as WSServerHandshakeError,
request as request,
)
from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar
from .formdata import FormData as FormData
from .helpers import BasicAuth as BasicAuth, ChainMapProxy as ChainMapProxy
from .http import (
HttpVersion as HttpVersion,
HttpVersion10 as HttpVersion10,
HttpVersion11 as HttpVersion11,
WebSocketError as WebSocketError,
WSCloseCode as WSCloseCode,
WSMessage as WSMessage,
WSMsgType as WSMsgType,
)
from .multipart import (
BadContentDispositionHeader as BadContentDispositionHeader,
BadContentDispositionParam as BadContentDispositionParam,
BodyPartReader as BodyPartReader,
MultipartReader as MultipartReader,
MultipartWriter as MultipartWriter,
content_disposition_filename as content_disposition_filename,
parse_content_disposition as parse_content_disposition,
)
from .payload import (
PAYLOAD_REGISTRY as PAYLOAD_REGISTRY,
AsyncIterablePayload as AsyncIterablePayload,
BufferedReaderPayload as BufferedReaderPayload,
BytesIOPayload as BytesIOPayload,
BytesPayload as BytesPayload,
IOBasePayload as IOBasePayload,
JsonPayload as JsonPayload,
Payload as Payload,
StringIOPayload as StringIOPayload,
StringPayload as StringPayload,
TextIOPayload as TextIOPayload,
get_payload as get_payload,
payload_type as payload_type,
)
from .payload_streamer import streamer as streamer
from .resolver import (
AsyncResolver as AsyncResolver,
DefaultResolver as DefaultResolver,
ThreadedResolver as ThreadedResolver,
)
from .signals import Signal as Signal
from .streams import (
EMPTY_PAYLOAD as EMPTY_PAYLOAD,
DataQueue as DataQueue,
EofStream as EofStream,
FlowControlDataQueue as FlowControlDataQueue,
StreamReader as StreamReader,
)
from .tracing import (
TraceConfig as TraceConfig,
TraceConnectionCreateEndParams as TraceConnectionCreateEndParams,
TraceConnectionCreateStartParams as TraceConnectionCreateStartParams,
TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams,
TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams,
TraceConnectionReuseconnParams as TraceConnectionReuseconnParams,
TraceDnsCacheHitParams as TraceDnsCacheHitParams,
TraceDnsCacheMissParams as TraceDnsCacheMissParams,
TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams,
TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams,
TraceRequestChunkSentParams as TraceRequestChunkSentParams,
TraceRequestEndParams as TraceRequestEndParams,
TraceRequestExceptionParams as TraceRequestExceptionParams,
TraceRequestRedirectParams as TraceRequestRedirectParams,
TraceRequestStartParams as TraceRequestStartParams,
TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams,
)
__all__: Tuple[str, ...] = (
"hdrs",
# client
"BaseConnector",
"ClientConnectionError",
"ClientConnectorCertificateError",
"ClientConnectorError",
"ClientConnectorSSLError",
"ClientError",
"ClientHttpProxyError",
"ClientOSError",
"ClientPayloadError",
"ClientProxyConnectionError",
"ClientResponse",
"ClientRequest",
"ClientResponseError",
"ClientSSLError",
"ClientSession",
"ClientTimeout",
"ClientWebSocketResponse",
"ContentTypeError",
"Fingerprint",
"InvalidURL",
"RequestInfo",
"ServerConnectionError",
"ServerDisconnectedError",
"ServerFingerprintMismatch",
"ServerTimeoutError",
"TCPConnector",
"TooManyRedirects",
"UnixConnector",
"NamedPipeConnector",
"WSServerHandshakeError",
"request",
# cookiejar
"CookieJar",
"DummyCookieJar",
# formdata
"FormData",
# helpers
"BasicAuth",
"ChainMapProxy",
# http
"HttpVersion",
"HttpVersion10",
"HttpVersion11",
"WSMsgType",
"WSCloseCode",
"WSMessage",
"WebSocketError",
# multipart
"BadContentDispositionHeader",
"BadContentDispositionParam",
"BodyPartReader",
"MultipartReader",
"MultipartWriter",
"content_disposition_filename",
"parse_content_disposition",
# payload
"AsyncIterablePayload",
"BufferedReaderPayload",
"BytesIOPayload",
"BytesPayload",
"IOBasePayload",
"JsonPayload",
"PAYLOAD_REGISTRY",
"Payload",
"StringIOPayload",
"StringPayload",
"TextIOPayload",
"get_payload",
"payload_type",
# payload_streamer
"streamer",
# resolver
"AsyncResolver",
"DefaultResolver",
"ThreadedResolver",
# signals
"Signal",
"DataQueue",
"EMPTY_PAYLOAD",
"EofStream",
"FlowControlDataQueue",
"StreamReader",
# tracing
"TraceConfig",
"TraceConnectionCreateEndParams",
"TraceConnectionCreateStartParams",
"TraceConnectionQueuedEndParams",
"TraceConnectionQueuedStartParams",
"TraceConnectionReuseconnParams",
"TraceDnsCacheHitParams",
"TraceDnsCacheMissParams",
"TraceDnsResolveHostEndParams",
"TraceDnsResolveHostStartParams",
"TraceRequestChunkSentParams",
"TraceRequestEndParams",
"TraceRequestExceptionParams",
"TraceRequestRedirectParams",
"TraceRequestStartParams",
"TraceResponseChunkReceivedParams",
)
try:
from .worker import GunicornUVLoopWebWorker, GunicornWebWorker
__all__ += ("GunicornWebWorker", "GunicornUVLoopWebWorker")
except ImportError: # pragma: no cover
pass | 0.507324 | 0.052887 |
import argparse, gzip, sys
sys.path.insert(0,'.')
from collections import defaultdict
import numpy as np
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import Seq
from Bio import AlignIO, SeqIO
from get_distance_to_focal_set import sequence_to_int_array
from augur.utils import read_metadata
from datetime import datetime
tmrca = datetime(2019, 12, 1).toordinal()
def expected_divergence(date, rate_per_day = 25/365):
try:
return (datetime.strptime(date, '%Y-%m-%d').toordinal() - tmrca)*rate_per_day
except:
return np.nan
def analyze_divergence(sequences, metadata, reference, mask_5p=0, mask_3p=0):
int_ref = sequence_to_int_array(reference, fill_gaps=False)
diagnostics = defaultdict(dict)
fill_value = 110
gap_value = 45
ws = 50
known_true_clusters = [(28880,28883)]
known_true_cluster_array = np.ones_like(int_ref, dtype=int)
for b,e in known_true_clusters:
known_true_cluster_array[b:e]=0
cluster_cut_off = 10
with open(sequences) as fasta:
for h,s in SimpleFastaParser(fasta):
left_gaps = len(s) - len(s.lstrip('-'))
right_gaps = len(s) - len(s.rstrip('-'))
s = sequence_to_int_array(s, fill_value=fill_value, fill_gaps=False)
# mask from both ends to avoid exclusion for problems at sites that will be masked anyway
if mask_5p:
s[:mask_5p] = fill_value
if mask_3p:
s[-mask_3p:] = fill_value
# fill terminal gaps -- those will be filled anyway
if left_gaps:
s[:left_gaps] = fill_value
if right_gaps:
s[-right_gaps:] = fill_value
# determine non-gap non-N mismatches
snps = (int_ref!=s) & (s!=fill_value) & (s!=gap_value)
# determine N positions
filled = s==fill_value
# determine gap positions (cast to int to detect start and ends)
gaps = np.array(s==gap_value, dtype=int)
gap_start = np.where(np.diff(gaps)==1)[0]
gap_end = np.where(np.diff(gaps)==-1)[0]
# determined mutation clusters by convolution with an array of ones => running window average
clusters = np.array(np.convolve(snps*known_true_cluster_array, np.ones(ws), mode='same')>=cluster_cut_off, dtype=int)
# determine start and end of clusters. extend by half window size on both ends.
cluster_start = [0] if clusters[0] else []
cluster_start.extend([max(0, x-ws//2) for x in np.where(np.diff(clusters)==1)[0]])
cluster_end = [min(int_ref.shape[0], x+ws//2) for x in np.where(np.diff(clusters)==-1)[0]]
if clusters[-1]:
cluster_end.append(int_ref.shape[0])
diagnostics[h] = {'snps':list(np.where(snps)[0]), 'gaps': list(zip(gap_start, gap_end)), 'gap_sum':np.sum(gaps),
'no_data':np.sum(filled) - mask_3p - mask_5p,
'clusters': [(b,e,np.sum(snps[b:e])) for b,e in zip(cluster_start, cluster_end)]}
return diagnostics
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="check sequences for anomalies",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--alignment", type=str, required=True, help="FASTA file of alignment")
parser.add_argument("--reference", type = str, required=True, help="reference sequence")
parser.add_argument("--metadata", type = str, required=True, help="metadata")
parser.add_argument("--mask-from-beginning", type = int, default=0, help="number of bases to mask from start")
parser.add_argument("--mask-from-end", type = int, default=0, help="number of bases to mask from end")
parser.add_argument("--output-diagnostics", type=str, required=True, help="Output of stats for every sequence")
parser.add_argument("--output-flagged", type=str, required=True, help="Output of sequences flagged for exclusion with specific reasons")
parser.add_argument("--output-exclusion-list", type=str, required=True, help="Output to-be-reviewed addition to exclude.txt")
args = parser.parse_args()
# load entire alignment and the alignment of focal sequences (upper case -- probably not necessary)
ref = SeqIO.read(args.reference, 'genbank').seq
metadata, _ = read_metadata(args.metadata)
diagnostics = analyze_divergence(args.alignment, metadata, ref,
mask_5p=args.mask_from_beginning,
mask_3p=args.mask_from_end)
snp_cutoff = 25
no_data_cutoff = 3000
flagged_sequences = []
# output diagnostics for each sequence, ordered by divergence
with open(args.output_diagnostics, 'w') as diag:
diag.write('\t'.join(['strain', 'divergence', 'excess divergence', '#Ns', '#gaps', 'clusters', 'gaps', 'all_snps', 'gap_list'])+'\n')
for s, d in sorted(diagnostics.items(), key=lambda x:len(x[1]['snps']), reverse=True):
expected_div = expected_divergence(metadata[s]['date']) if s in metadata else np.nan
diag.write('\t'.join(map(str,[s, len(d['snps']), round(len(d['snps']) - expected_div,2),
d['no_data'], d['gap_sum'],
','.join([f'{b}-{e}' for b,e,n in d['clusters']]),
','.join([f'{b}-{e}' for b,e in d['gaps']]),
','.join(map(str, d['snps'])),
",".join([",".join([str(x) for x in range(b,e)]) for b,e in d["gaps"]])]))+'\n')
msg = ""
reasons = []
if not np.isnan(expected_div) and np.abs(len(d['snps']) - expected_div) > snp_cutoff:
msg += f"too high divergence {np.abs(len(d['snps']) - expected_div):1.2f}>{snp_cutoff};"
reasons.append('divergence')
if len(d['clusters']):
msg += f"{len(d['clusters'])} SNP clusters with {','.join([str(x[2]) for x in d['clusters']])} SNPs each;"
reasons.append('clustered mutations')
if d['no_data']>no_data_cutoff:
msg += f"too many Ns ({d['no_data']}>{no_data_cutoff})"
reasons.append('too many ambigous sites')
if msg:
flagged_sequences.append([s, msg, tuple(reasons), metadata.get(s,{})])
# write out file with sequences flagged for exclusion sorted by date
to_exclude_by_reason = defaultdict(list)
with open(args.output_flagged, 'w') as flag:
flag.write(f'strain\tcollection_date\tsubmission_date\tflagging_reason\n')
for s, msg, reasons, meta in sorted(flagged_sequences, key=lambda x:x[3].get('date_submitted', 'XX'), reverse=True):
flag.write(f"{s}\t{metadata[s]['date'] if s in metadata else 'XXXX-XX-XX'}\t{metadata[s].get('date_submitted', 'XXXX-XX-XX') if s in metadata else 'XXXX-XX-XX'}\t{msg}\n")
to_exclude_by_reason[reasons].append(s)
# write out file with sequences flagged for exclusion sorted by date
with open(args.output_exclusion_list, 'w') as excl:
for reason in to_exclude_by_reason:
excl.write(f'\n# {"&".join(reason)}\n')
excl.write('\n'.join(to_exclude_by_reason[reason])+'\n') | scripts/diagnostic.py | import argparse, gzip, sys
sys.path.insert(0,'.')
from collections import defaultdict
import numpy as np
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import Seq
from Bio import AlignIO, SeqIO
from get_distance_to_focal_set import sequence_to_int_array
from augur.utils import read_metadata
from datetime import datetime
tmrca = datetime(2019, 12, 1).toordinal()
def expected_divergence(date, rate_per_day = 25/365):
try:
return (datetime.strptime(date, '%Y-%m-%d').toordinal() - tmrca)*rate_per_day
except:
return np.nan
def analyze_divergence(sequences, metadata, reference, mask_5p=0, mask_3p=0):
int_ref = sequence_to_int_array(reference, fill_gaps=False)
diagnostics = defaultdict(dict)
fill_value = 110
gap_value = 45
ws = 50
known_true_clusters = [(28880,28883)]
known_true_cluster_array = np.ones_like(int_ref, dtype=int)
for b,e in known_true_clusters:
known_true_cluster_array[b:e]=0
cluster_cut_off = 10
with open(sequences) as fasta:
for h,s in SimpleFastaParser(fasta):
left_gaps = len(s) - len(s.lstrip('-'))
right_gaps = len(s) - len(s.rstrip('-'))
s = sequence_to_int_array(s, fill_value=fill_value, fill_gaps=False)
# mask from both ends to avoid exclusion for problems at sites that will be masked anyway
if mask_5p:
s[:mask_5p] = fill_value
if mask_3p:
s[-mask_3p:] = fill_value
# fill terminal gaps -- those will be filled anyway
if left_gaps:
s[:left_gaps] = fill_value
if right_gaps:
s[-right_gaps:] = fill_value
# determine non-gap non-N mismatches
snps = (int_ref!=s) & (s!=fill_value) & (s!=gap_value)
# determine N positions
filled = s==fill_value
# determine gap positions (cast to int to detect start and ends)
gaps = np.array(s==gap_value, dtype=int)
gap_start = np.where(np.diff(gaps)==1)[0]
gap_end = np.where(np.diff(gaps)==-1)[0]
# determined mutation clusters by convolution with an array of ones => running window average
clusters = np.array(np.convolve(snps*known_true_cluster_array, np.ones(ws), mode='same')>=cluster_cut_off, dtype=int)
# determine start and end of clusters. extend by half window size on both ends.
cluster_start = [0] if clusters[0] else []
cluster_start.extend([max(0, x-ws//2) for x in np.where(np.diff(clusters)==1)[0]])
cluster_end = [min(int_ref.shape[0], x+ws//2) for x in np.where(np.diff(clusters)==-1)[0]]
if clusters[-1]:
cluster_end.append(int_ref.shape[0])
diagnostics[h] = {'snps':list(np.where(snps)[0]), 'gaps': list(zip(gap_start, gap_end)), 'gap_sum':np.sum(gaps),
'no_data':np.sum(filled) - mask_3p - mask_5p,
'clusters': [(b,e,np.sum(snps[b:e])) for b,e in zip(cluster_start, cluster_end)]}
return diagnostics
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="check sequences for anomalies",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--alignment", type=str, required=True, help="FASTA file of alignment")
parser.add_argument("--reference", type = str, required=True, help="reference sequence")
parser.add_argument("--metadata", type = str, required=True, help="metadata")
parser.add_argument("--mask-from-beginning", type = int, default=0, help="number of bases to mask from start")
parser.add_argument("--mask-from-end", type = int, default=0, help="number of bases to mask from end")
parser.add_argument("--output-diagnostics", type=str, required=True, help="Output of stats for every sequence")
parser.add_argument("--output-flagged", type=str, required=True, help="Output of sequences flagged for exclusion with specific reasons")
parser.add_argument("--output-exclusion-list", type=str, required=True, help="Output to-be-reviewed addition to exclude.txt")
args = parser.parse_args()
# load entire alignment and the alignment of focal sequences (upper case -- probably not necessary)
ref = SeqIO.read(args.reference, 'genbank').seq
metadata, _ = read_metadata(args.metadata)
diagnostics = analyze_divergence(args.alignment, metadata, ref,
mask_5p=args.mask_from_beginning,
mask_3p=args.mask_from_end)
snp_cutoff = 25
no_data_cutoff = 3000
flagged_sequences = []
# output diagnostics for each sequence, ordered by divergence
with open(args.output_diagnostics, 'w') as diag:
diag.write('\t'.join(['strain', 'divergence', 'excess divergence', '#Ns', '#gaps', 'clusters', 'gaps', 'all_snps', 'gap_list'])+'\n')
for s, d in sorted(diagnostics.items(), key=lambda x:len(x[1]['snps']), reverse=True):
expected_div = expected_divergence(metadata[s]['date']) if s in metadata else np.nan
diag.write('\t'.join(map(str,[s, len(d['snps']), round(len(d['snps']) - expected_div,2),
d['no_data'], d['gap_sum'],
','.join([f'{b}-{e}' for b,e,n in d['clusters']]),
','.join([f'{b}-{e}' for b,e in d['gaps']]),
','.join(map(str, d['snps'])),
",".join([",".join([str(x) for x in range(b,e)]) for b,e in d["gaps"]])]))+'\n')
msg = ""
reasons = []
if not np.isnan(expected_div) and np.abs(len(d['snps']) - expected_div) > snp_cutoff:
msg += f"too high divergence {np.abs(len(d['snps']) - expected_div):1.2f}>{snp_cutoff};"
reasons.append('divergence')
if len(d['clusters']):
msg += f"{len(d['clusters'])} SNP clusters with {','.join([str(x[2]) for x in d['clusters']])} SNPs each;"
reasons.append('clustered mutations')
if d['no_data']>no_data_cutoff:
msg += f"too many Ns ({d['no_data']}>{no_data_cutoff})"
reasons.append('too many ambigous sites')
if msg:
flagged_sequences.append([s, msg, tuple(reasons), metadata.get(s,{})])
# write out file with sequences flagged for exclusion sorted by date
to_exclude_by_reason = defaultdict(list)
with open(args.output_flagged, 'w') as flag:
flag.write(f'strain\tcollection_date\tsubmission_date\tflagging_reason\n')
for s, msg, reasons, meta in sorted(flagged_sequences, key=lambda x:x[3].get('date_submitted', 'XX'), reverse=True):
flag.write(f"{s}\t{metadata[s]['date'] if s in metadata else 'XXXX-XX-XX'}\t{metadata[s].get('date_submitted', 'XXXX-XX-XX') if s in metadata else 'XXXX-XX-XX'}\t{msg}\n")
to_exclude_by_reason[reasons].append(s)
# write out file with sequences flagged for exclusion sorted by date
with open(args.output_exclusion_list, 'w') as excl:
for reason in to_exclude_by_reason:
excl.write(f'\n# {"&".join(reason)}\n')
excl.write('\n'.join(to_exclude_by_reason[reason])+'\n') | 0.318167 | 0.346749 |
import os
import json
import uuid
from collections import deque
from typing import Dict, Union, Tuple, Optional, Callable
from base58 import b58decode, b58encode
from plenum.client.client import Client as PlenumClient
from plenum.common.error import fault
from plenum.common.txn_util import get_type
from stp_core.common.log import getlogger
from plenum.common.startable import Status
from plenum.common.constants import REPLY, NAME, VERSION, REQACK, REQNACK, \
TXN_ID, TARGET_NYM, NONCE, STEWARD, OP_FIELD_NAME, REJECT, TYPE
from plenum.common.types import f
from plenum.common.util import libnacl
from plenum.server.router import Router
from stp_core.network.auth_mode import AuthMode
from stp_zmq.simple_zstack import SimpleZStack
from indy_common.constants import TXN_TYPE, ATTRIB, DATA, GET_NYM, ROLE, \
NYM, GET_TXNS, LAST_TXN, TXNS, SCHEMA, CLAIM_DEF, SKEY, DISCLO, \
GET_ATTR, TRUST_ANCHOR, GET_CLAIM_DEF, GET_SCHEMA
from indy_client.persistence.client_req_rep_store_file import ClientReqRepStoreFile
from indy_client.persistence.client_txn_log import ClientTxnLog
from indy_common.config_util import getConfig
from stp_core.types import HA
from indy_common.state import domain
from indy_client.agent.jsonpickle_util import setUpJsonpickle
from indy_client.client.wallet.migration import migrate_indy_wallet_raw
from indy_common.plugin_helper import writeAnonCredPlugin
from plenum.client.wallet import WALLET_RAW_MIGRATORS
logger = getlogger()
class Client(PlenumClient):
anoncredsAreSetUp = False
def __init__(self,
name: str=None,
nodeReg: Dict[str, HA]=None,
ha: Union[HA, Tuple[str, int]]=None,
peerHA: Union[HA, Tuple[str, int]]=None,
basedirpath: str=None,
config=None,
sighex: str=None):
self.config = config or getConfig()
self.setupAnoncreds()
basedirpath = basedirpath or os.path.join(self.config.CLI_NETWORK_DIR, self.config.NETWORK_NAME)
super().__init__(name,
nodeReg,
ha,
basedirpath,
config=config,
sighex=sighex)
self.autoDiscloseAttributes = False
self.requestedPendingTxns = False
self.hasAnonCreds = bool(peerHA)
if self.hasAnonCreds:
self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)
stackargs = dict(name=self.stackName,
ha=peerHA,
main=True,
auth_mode=AuthMode.ALLOW_ANY.value)
self.peerMsgRoutes = []
self.peerMsgRouter = Router(*self.peerMsgRoutes)
self.peerStack = self.peerStackClass(
stackargs, msgHandler=self.handlePeerMessage)
self.peerStack.sign = self.sign
self.peerInbox = deque()
# To let client send this transactions to just one node
self._read_only_requests = {GET_NYM,
GET_ATTR,
GET_CLAIM_DEF,
GET_SCHEMA}
@property
def peerStackClass(self):
return SimpleZStack
def setupAnoncreds(self):
if self.anoncredsAreSetUp is False:
writeAnonCredPlugin(os.path.expanduser(self.config.CLI_BASE_DIR))
# This is to setup anoncreds wallet related custom jsonpickle handlers to
# serialize/deserialize it properly
setUpJsonpickle()
WALLET_RAW_MIGRATORS.append(migrate_indy_wallet_raw)
self.anoncredsAreSetUp = True
def handlePeerMessage(self, msg):
"""
Use the peerMsgRouter to pass the messages to the correct
function that handles them
:param msg: the P2P client message.
"""
return self.peerMsgRouter.handle(msg)
def getReqRepStore(self):
return ClientReqRepStoreFile(self.ledger_dir)
def getTxnLogStore(self):
return ClientTxnLog(self.ledger_dir)
def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
msg, sender = wrappedMsg
# excludeGetTxns = (msg.get(OP_FIELD_NAME) == REPLY and
# msg[f.RESULT.nm].get(TXN_TYPE) == GET_TXNS)
excludeReqAcks = msg.get(OP_FIELD_NAME) == REQACK
excludeReqNacks = msg.get(OP_FIELD_NAME) == REQNACK
excludeReply = msg.get(OP_FIELD_NAME) == REPLY
excludeReject = msg.get(OP_FIELD_NAME) == REJECT
excludeFromCli = excludeFromCli or excludeReqAcks or excludeReqNacks \
or excludeReply or excludeReject
super().handleOneNodeMsg(wrappedMsg, excludeFromCli)
if OP_FIELD_NAME not in msg:
logger.error("Op absent in message {}".format(msg))
def requestConfirmed(self, key) -> bool:
return self.txnLog.hasTxnWithReqId(key)
def hasConsensus(self, identifier: str, reqId: int) -> Optional[str]:
return super().hasConsensus(identifier, reqId)
def prepare_for_state(self, result):
request_type = result[TYPE]
if request_type == GET_NYM:
return domain.prepare_get_nym_for_state(result)
if request_type == GET_ATTR:
attr_type, path, value, hashed_value, value_bytes = \
domain.prepare_get_attr_for_state(result)
return path, value_bytes
if request_type == GET_CLAIM_DEF:
return domain.prepare_get_claim_def_for_state(result)
if request_type == GET_SCHEMA:
return domain.prepare_get_schema_for_state(result)
raise ValueError("Cannot make state key for "
"request of type {}"
.format(request_type))
def getTxnsByType(self, txnType):
return self.txnLog.getTxnsByType(txnType)
# TODO: Just for now. Remove it later
def doAttrDisclose(self, origin, target, txnId, key):
box = libnacl.public.Box(b58decode(origin), b58decode(target))
data = json.dumps({TXN_ID: txnId, SKEY: key})
nonce, boxedMsg = box.encrypt(data.encode(), pack_nonce=False)
op = {
TARGET_NYM: target,
TXN_TYPE: DISCLO,
NONCE: b58encode(nonce).decode("utf-8"),
DATA: b58encode(boxedMsg).decode("utf-8")
}
self.submit(op, identifier=origin)
def doGetAttributeTxn(self, identifier, attrName):
op = {
TARGET_NYM: identifier,
TXN_TYPE: GET_ATTR,
DATA: json.dumps({"name": attrName})
}
self.submit(op, identifier=identifier)
@staticmethod
def _getDecryptedData(encData, key):
data = bytes(bytearray.fromhex(encData))
rawKey = bytes(bytearray.fromhex(key))
box = libnacl.secret.SecretBox(rawKey)
decData = box.decrypt(data).decode()
return json.loads(decData)
def hasNym(self, nym):
for txn in self.txnLog.getTxnsByType(NYM):
if get_type(txn) == NYM:
return True
return False
def _statusChanged(self, old, new):
super()._statusChanged(old, new)
def start(self, loop):
super().start(loop)
if self.hasAnonCreds and self.status not in Status.going():
self.peerStack.start()
async def prod(self, limit) -> int:
s = await super().prod(limit)
if self.hasAnonCreds:
return s + await self.peerStack.service(limit)
else:
return s | indy_client/client/client.py | import os
import json
import uuid
from collections import deque
from typing import Dict, Union, Tuple, Optional, Callable
from base58 import b58decode, b58encode
from plenum.client.client import Client as PlenumClient
from plenum.common.error import fault
from plenum.common.txn_util import get_type
from stp_core.common.log import getlogger
from plenum.common.startable import Status
from plenum.common.constants import REPLY, NAME, VERSION, REQACK, REQNACK, \
TXN_ID, TARGET_NYM, NONCE, STEWARD, OP_FIELD_NAME, REJECT, TYPE
from plenum.common.types import f
from plenum.common.util import libnacl
from plenum.server.router import Router
from stp_core.network.auth_mode import AuthMode
from stp_zmq.simple_zstack import SimpleZStack
from indy_common.constants import TXN_TYPE, ATTRIB, DATA, GET_NYM, ROLE, \
NYM, GET_TXNS, LAST_TXN, TXNS, SCHEMA, CLAIM_DEF, SKEY, DISCLO, \
GET_ATTR, TRUST_ANCHOR, GET_CLAIM_DEF, GET_SCHEMA
from indy_client.persistence.client_req_rep_store_file import ClientReqRepStoreFile
from indy_client.persistence.client_txn_log import ClientTxnLog
from indy_common.config_util import getConfig
from stp_core.types import HA
from indy_common.state import domain
from indy_client.agent.jsonpickle_util import setUpJsonpickle
from indy_client.client.wallet.migration import migrate_indy_wallet_raw
from indy_common.plugin_helper import writeAnonCredPlugin
from plenum.client.wallet import WALLET_RAW_MIGRATORS
logger = getlogger()
class Client(PlenumClient):
anoncredsAreSetUp = False
def __init__(self,
name: str=None,
nodeReg: Dict[str, HA]=None,
ha: Union[HA, Tuple[str, int]]=None,
peerHA: Union[HA, Tuple[str, int]]=None,
basedirpath: str=None,
config=None,
sighex: str=None):
self.config = config or getConfig()
self.setupAnoncreds()
basedirpath = basedirpath or os.path.join(self.config.CLI_NETWORK_DIR, self.config.NETWORK_NAME)
super().__init__(name,
nodeReg,
ha,
basedirpath,
config=config,
sighex=sighex)
self.autoDiscloseAttributes = False
self.requestedPendingTxns = False
self.hasAnonCreds = bool(peerHA)
if self.hasAnonCreds:
self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)
stackargs = dict(name=self.stackName,
ha=peerHA,
main=True,
auth_mode=AuthMode.ALLOW_ANY.value)
self.peerMsgRoutes = []
self.peerMsgRouter = Router(*self.peerMsgRoutes)
self.peerStack = self.peerStackClass(
stackargs, msgHandler=self.handlePeerMessage)
self.peerStack.sign = self.sign
self.peerInbox = deque()
# To let client send this transactions to just one node
self._read_only_requests = {GET_NYM,
GET_ATTR,
GET_CLAIM_DEF,
GET_SCHEMA}
@property
def peerStackClass(self):
return SimpleZStack
def setupAnoncreds(self):
if self.anoncredsAreSetUp is False:
writeAnonCredPlugin(os.path.expanduser(self.config.CLI_BASE_DIR))
# This is to setup anoncreds wallet related custom jsonpickle handlers to
# serialize/deserialize it properly
setUpJsonpickle()
WALLET_RAW_MIGRATORS.append(migrate_indy_wallet_raw)
self.anoncredsAreSetUp = True
def handlePeerMessage(self, msg):
"""
Use the peerMsgRouter to pass the messages to the correct
function that handles them
:param msg: the P2P client message.
"""
return self.peerMsgRouter.handle(msg)
def getReqRepStore(self):
return ClientReqRepStoreFile(self.ledger_dir)
def getTxnLogStore(self):
return ClientTxnLog(self.ledger_dir)
def handleOneNodeMsg(self, wrappedMsg, excludeFromCli=None) -> None:
msg, sender = wrappedMsg
# excludeGetTxns = (msg.get(OP_FIELD_NAME) == REPLY and
# msg[f.RESULT.nm].get(TXN_TYPE) == GET_TXNS)
excludeReqAcks = msg.get(OP_FIELD_NAME) == REQACK
excludeReqNacks = msg.get(OP_FIELD_NAME) == REQNACK
excludeReply = msg.get(OP_FIELD_NAME) == REPLY
excludeReject = msg.get(OP_FIELD_NAME) == REJECT
excludeFromCli = excludeFromCli or excludeReqAcks or excludeReqNacks \
or excludeReply or excludeReject
super().handleOneNodeMsg(wrappedMsg, excludeFromCli)
if OP_FIELD_NAME not in msg:
logger.error("Op absent in message {}".format(msg))
def requestConfirmed(self, key) -> bool:
return self.txnLog.hasTxnWithReqId(key)
def hasConsensus(self, identifier: str, reqId: int) -> Optional[str]:
return super().hasConsensus(identifier, reqId)
def prepare_for_state(self, result):
request_type = result[TYPE]
if request_type == GET_NYM:
return domain.prepare_get_nym_for_state(result)
if request_type == GET_ATTR:
attr_type, path, value, hashed_value, value_bytes = \
domain.prepare_get_attr_for_state(result)
return path, value_bytes
if request_type == GET_CLAIM_DEF:
return domain.prepare_get_claim_def_for_state(result)
if request_type == GET_SCHEMA:
return domain.prepare_get_schema_for_state(result)
raise ValueError("Cannot make state key for "
"request of type {}"
.format(request_type))
def getTxnsByType(self, txnType):
return self.txnLog.getTxnsByType(txnType)
# TODO: Just for now. Remove it later
def doAttrDisclose(self, origin, target, txnId, key):
box = libnacl.public.Box(b58decode(origin), b58decode(target))
data = json.dumps({TXN_ID: txnId, SKEY: key})
nonce, boxedMsg = box.encrypt(data.encode(), pack_nonce=False)
op = {
TARGET_NYM: target,
TXN_TYPE: DISCLO,
NONCE: b58encode(nonce).decode("utf-8"),
DATA: b58encode(boxedMsg).decode("utf-8")
}
self.submit(op, identifier=origin)
def doGetAttributeTxn(self, identifier, attrName):
op = {
TARGET_NYM: identifier,
TXN_TYPE: GET_ATTR,
DATA: json.dumps({"name": attrName})
}
self.submit(op, identifier=identifier)
@staticmethod
def _getDecryptedData(encData, key):
data = bytes(bytearray.fromhex(encData))
rawKey = bytes(bytearray.fromhex(key))
box = libnacl.secret.SecretBox(rawKey)
decData = box.decrypt(data).decode()
return json.loads(decData)
def hasNym(self, nym):
for txn in self.txnLog.getTxnsByType(NYM):
if get_type(txn) == NYM:
return True
return False
def _statusChanged(self, old, new):
super()._statusChanged(old, new)
def start(self, loop):
super().start(loop)
if self.hasAnonCreds and self.status not in Status.going():
self.peerStack.start()
async def prod(self, limit) -> int:
s = await super().prod(limit)
if self.hasAnonCreds:
return s + await self.peerStack.service(limit)
else:
return s | 0.562417 | 0.098686 |
import re
import os
import sublime
import sublime_plugin
DEBUG = False
def debug_message(message):
if not DEBUG:
pass
print('DEBUG phpunitkit: %s' % str(message))
class PluginSettings():
def __init__(self, name):
self.name = name
self.loaded = False
self.transient_data = {}
def on_load(self):
if self.loaded:
return
self.loaded = True
def get(self, key):
if not self.loaded:
raise RuntimeError('Plugin settings not loaded')
window = sublime.active_window()
if window is not None:
view = window.active_view()
if view is not None:
settings = view.settings()
if settings.has(self.name + '.' + key):
return settings.get(self.name + '.' + key)
raise RuntimeError('Unknown plugin setting "%s"' % key)
def get_transient(self, key, default = None):
if key in self.transient_data:
return self.transient_data[key]
try:
return self.get(key)
except:
return default
def set_transient(self, key, value):
self.transient_data[key] = value
plugin_settings = PluginSettings('phpunit')
def plugin_loaded():
plugin_settings.on_load()
class PHPUnitConfigurationFileFinder():
"""
Find the first PHPUnit configuration file, either
phpunit.xml or phpunit.xml.dist, in {file_name}
directory or the nearest common ancestor directory
in {folders}.
"""
def find(self, file_name, folders):
"""
Finds the PHPUnit configuration file.
"""
debug_message('Find PHPUnit configuration file for %s in %s (%d)' % (file_name, folders, len(folders)))
if file_name == None:
return None
if not isinstance(file_name, str):
return None
if not len(file_name) > 0:
return None
if folders == None:
return None
if not isinstance(folders, list):
return None
if not len(folders) > 0:
return None
ancestor_folders = []
common_prefix = os.path.commonprefix(folders)
parent = os.path.dirname(file_name)
while parent not in ancestor_folders and parent.startswith(common_prefix):
ancestor_folders.append(parent)
parent = os.path.dirname(parent)
ancestor_folders.sort(reverse=True)
debug_message(' Found %d common ancestor folder%s %s' % (len(ancestor_folders), '' if len(ancestor_folders) == 1 else 's', ancestor_folders))
for folder in ancestor_folders:
debug_message(' Searching folder: %s' % folder)
for file_name in ['phpunit.xml', 'phpunit.xml.dist']:
phpunit_configuration_file = os.path.join(folder, file_name)
debug_message(' Checking: %s' % phpunit_configuration_file)
if os.path.isfile(phpunit_configuration_file):
debug_message(' Found PHPUnit configuration file: %s' % phpunit_configuration_file)
return phpunit_configuration_file
debug_message(' PHPUnit Configuration file not found')
return None
def find_dirname(self, file_name, folders):
phpunit_configuration_file = self.find(file_name, folders)
if phpunit_configuration_file:
return os.path.dirname(phpunit_configuration_file)
return None
def is_valid_php_identifier(string):
return re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', string)
class ViewHelpers():
def __init__(self, view):
self.view = view
def contains_phpunit_test_case(self):
"""
Returns true if view contains a PHPUnit test-case; otherwise false
"""
for php_class in self.find_php_classes():
if php_class[-4:] == 'Test':
return True
return False
def find_php_classes(self):
"""
Returns an array of classes (class names) defined in the view
"""
classes = []
for class_as_region in self.view.find_by_selector('source.php entity.name.type.class'):
class_as_string = self.view.substr(class_as_region)
if is_valid_php_identifier(class_as_string):
classes.append(class_as_string)
# Quick fix for ST build >= 3114 because the default PHP package
# changed the scope on class entities.
if not classes:
for class_as_region in self.view.find_by_selector('source.php entity.name.class'):
class_as_string = self.view.substr(class_as_region)
if is_valid_php_identifier(class_as_string):
classes.append(class_as_string)
return classes
def find_first_switchable(self):
"""
Returns the first switchable; otherwise None
"""
file_name = self.view.file_name()
debug_message('Find first switchable for %s' % file_name)
classes = self.find_php_classes()
debug_message(' Found %d PHP class%s %s in %s' % (len(classes), '' if len(classes) == 1 else 'es', classes, file_name))
for class_name in classes:
if class_name[-4:] == "Test":
lookup_symbol = class_name[:-4]
else:
lookup_symbol = class_name + "Test"
debug_message(' Switchable symbol: %s' % lookup_symbol)
switchables_in_open_files = self.view.window().lookup_symbol_in_open_files(lookup_symbol)
switchables_in_index = self.view.window().lookup_symbol_in_index(lookup_symbol)
debug_message(' Found %d switchable symbol%s in open files %s' % (len(switchables_in_open_files), '' if len(switchables_in_open_files) == 1 else 's', str(switchables_in_open_files)))
debug_message(' Found %d switchable symbol%s in index %s' % (len(switchables_in_index), '' if len(switchables_in_index) == 1 else 's', str(switchables_in_index)))
for open_file in switchables_in_open_files:
debug_message(' Found switchable symbol in open file %s' % str(open_file))
return open_file
for index in switchables_in_index:
debug_message(' Found switchable symbol in index %s' % str(index))
return index
def find_first_switchable_file(self):
"""
Returns the first switchable file; otherwise None
"""
first_switchable = self.find_first_switchable()
if not first_switchable:
return None
def normalise_path(path):
if int(sublime.version()) < 3118:
if sublime.platform() == "windows":
path = re.sub(r"/([A-Za-z])/(.+)", r"\1:/\2", path)
path = re.sub(r"/", r"\\", path)
return path
return normalise_path(first_switchable[0])
def get_current_function(self):
sel = self.view.sel()[0]
function_regions = self.view.find_by_selector('entity.name.function')
cf = None
for r in reversed(function_regions):
if r.a < sel.a:
cf = self.view.substr(r)
break
return cf
class PHPUnitTextUITestRunner():
def __init__(self, window):
self.window = window
def run(self, args=None):
if args:
debug_message('PHPUnitTextUITestRunner::run %s' % (args))
self._run(**args)
else:
debug_message('PHPUnitTextUITestRunner::run {}')
self._run()
def _run(self, working_dir=None, unit_test_or_directory=None, options = None):
view = self.window.active_view()
if not view:
return
# Working directory
if not working_dir:
working_dir = PHPUnitConfigurationFileFinder().find_dirname(view.file_name(), self.window.folders())
if not working_dir:
debug_message('Could not find a PHPUnit working directory')
return
debug_message('Found PHPUnit working directory: %s' % working_dir)
if not os.path.isdir(working_dir):
debug_message('PHPUnit working directory does not exist or is not a valid directory: %s' % working_dir)
return
debug_message('PHPUnit working directory: %s' % working_dir)
# Unit test or directory
if unit_test_or_directory:
if not os.path.isfile(unit_test_or_directory) and not os.path.isdir(unit_test_or_directory):
debug_message('PHPUnit test or directory is invalid: %s' % unit_test_or_directory)
return
unit_test_or_directory = os.path.relpath(unit_test_or_directory, working_dir)
debug_message('PHPUnit test or directory: %s' % unit_test_or_directory)
# PHPUnit options
# Order of Precedence
# * User specific "phpunit.options" setting
# * Project specific "phpunit.options" setting
# * toggled "transient/session" settings
# * this command's argument
if options is None:
options = {}
for k, v in plugin_settings.get_transient('options', {}).items():
if k not in options:
options[k] = v
for k, v in plugin_settings.get('options').items():
if k not in options:
options[k] = v
debug_message('PHPUnit options %s' % str(options))
# PHPUnit bin
phpunit_bin = 'phpunit'
if plugin_settings.get('composer'):
relative_composer_phpunit_bin = os.path.join('vendor', 'bin', 'phpunit')
composer_phpunit_bin = os.path.join(working_dir, relative_composer_phpunit_bin)
if os.path.isfile(composer_phpunit_bin):
debug_message('Found Composer PHPUnit bin: %s' % composer_phpunit_bin)
phpunit_bin = relative_composer_phpunit_bin
debug_message('PHPUnit bin: %s' % phpunit_bin)
# Execute Command
cmd = phpunit_bin
for k, v in options.items():
if not v == False:
if len(k) == 1:
if not v == False:
if v == True:
cmd += " -%s" % (k)
else:
if isinstance(v, list):
for _v in v:
cmd += " -%s \"%s\"" % (k, _v)
else:
cmd += " -%s \"%s\"" % (k, v)
else:
cmd += " --" + k
if not v == True:
cmd += " \"%s\"" % (v)
if unit_test_or_directory:
cmd += " " + unit_test_or_directory
debug_message('exec cmd: %s' % cmd)
# Write out every buffer (active window) with changes and a file name.
if plugin_settings.get('save_all_on_run'):
for view in self.window.views():
if view.is_dirty() and view.file_name():
view.run_command('save')
self.window.run_command('exec', {
'cmd': cmd,
'file_regex': '([a-zA-Z0-9\\.\\/_-]+)(?: on line |\:)([0-9]+)$',
'quiet': not bool(DEBUG),
'shell': True,
'syntax': 'Packages/phpunitkit/test-results.hidden-tmLanguage',
'word_wrap': False,
'working_dir': working_dir
})
# Save last run arguments (for current window)
plugin_settings.set_transient('__window__' + str(self.window.id()) + '__run_last_test_args', {
'working_dir': working_dir,
'unit_test_or_directory': unit_test_or_directory,
'options': options
})
# Configure color scheme
panel_settings = self.window.create_output_panel('exec').settings()
panel_settings.set('color_scheme',
plugin_settings.get('color_scheme')
if plugin_settings.get('color_scheme')
else view.settings().get('color_scheme'))
def run_last_test(self):
# get last run arguments (for current window)
args = plugin_settings.get_transient('__window__' + str(self.window.id()) + '__run_last_test_args')
if args:
self.run(args)
class PhpunitRunAllTests(sublime_plugin.WindowCommand):
def run(self):
PHPUnitTextUITestRunner(self.window).run()
class PhpunitRunLastTestCommand(sublime_plugin.WindowCommand):
def run(self):
PHPUnitTextUITestRunner(self.window).run_last_test()
class PhpunitRunSingleFileCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
view_helpers = ViewHelpers(view)
if view_helpers.contains_phpunit_test_case():
debug_message('Found test case in %s' % view.file_name())
unit_test = view.file_name()
options = {}
else:
debug_message('No test case found in %s' % view.file_name())
unit_test = view_helpers.find_first_switchable_file()
options = {}
# @todo how to check that the switchable contains a testcase?
if not unit_test:
debug_message('Could not find a PHPUnit test case or a switchable test case')
return
PHPUnitTextUITestRunner(self.window).run({
"unit_test_or_directory": unit_test,
"options": options
})
class PhpunitRunSingleTestCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
view_helpers = ViewHelpers(view)
current_function = view_helpers.get_current_function()
if not current_function:
debug_message('No current function')
return
debug_message('Current function is %s' % current_function)
options = {
'filter': '::(' + current_function + ')( with data set .+)?$'
}
PHPUnitTextUITestRunner(self.window).run({
"unit_test_or_directory": "",
"options": options
})
class PhpunitSwitchFile(sublime_plugin.WindowCommand):
def run(self):
current_view = self.window.active_view()
if not current_view:
return
first_switchable = ViewHelpers(current_view).find_first_switchable()
if not first_switchable:
sublime.status_message('No PHPUnit switchable found for "%s"' % current_view.file_name())
return
debug_message('Switching from %s to %s' % (current_view.file_name(), first_switchable))
self.window.open_file(first_switchable[0])
switched_view = self.window.active_view()
if current_view == switched_view: # looks like the class and test-case are in the same view
return
# split in two with test case and class under test side-by-side
if self.window.num_groups() == 1:
self.window.run_command('set_layout', {
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]]
})
current_view_index = self.window.get_view_index(current_view)
switched_view_index = self.window.get_view_index(switched_view)
if self.window.num_groups() <= 2 and current_view_index[0] == switched_view_index[0]:
if current_view_index[0] == 0:
self.window.set_view_index(switched_view, 1, 0)
else:
self.window.set_view_index(switched_view, 0, 0)
# ensure focus is not lost from either view
self.window.focus_view(current_view)
self.window.focus_view(switched_view)
class PhpunitToggleLongOption(sublime_plugin.WindowCommand):
def run(self, option):
options = plugin_settings.get_transient('options', {})
options[option] = not bool(options[option]) if option in options else True
plugin_settings.set_transient('options', options)
class PhpunitOpenHtmlCodeCoverageInBrowser(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
working_dir = PHPUnitConfigurationFileFinder().find_dirname(view.file_name(), self.window.folders())
if not working_dir:
sublime.status_message('Could not find a PHPUnit working directory')
return
coverage_html_index_html_file = os.path.join(working_dir, 'build/coverage/index.html')
if not os.path.exists(coverage_html_index_html_file):
sublime.status_message('Could not find PHPUnit HTML code coverage %s' % coverage_html_index_html_file)
return
import webbrowser
webbrowser.open_new_tab('file://' + coverage_html_index_html_file) | plugin.py | import re
import os
import sublime
import sublime_plugin
DEBUG = False
def debug_message(message):
if not DEBUG:
pass
print('DEBUG phpunitkit: %s' % str(message))
class PluginSettings():
def __init__(self, name):
self.name = name
self.loaded = False
self.transient_data = {}
def on_load(self):
if self.loaded:
return
self.loaded = True
def get(self, key):
if not self.loaded:
raise RuntimeError('Plugin settings not loaded')
window = sublime.active_window()
if window is not None:
view = window.active_view()
if view is not None:
settings = view.settings()
if settings.has(self.name + '.' + key):
return settings.get(self.name + '.' + key)
raise RuntimeError('Unknown plugin setting "%s"' % key)
def get_transient(self, key, default = None):
if key in self.transient_data:
return self.transient_data[key]
try:
return self.get(key)
except:
return default
def set_transient(self, key, value):
self.transient_data[key] = value
plugin_settings = PluginSettings('phpunit')
def plugin_loaded():
plugin_settings.on_load()
class PHPUnitConfigurationFileFinder():
"""
Find the first PHPUnit configuration file, either
phpunit.xml or phpunit.xml.dist, in {file_name}
directory or the nearest common ancestor directory
in {folders}.
"""
def find(self, file_name, folders):
"""
Finds the PHPUnit configuration file.
"""
debug_message('Find PHPUnit configuration file for %s in %s (%d)' % (file_name, folders, len(folders)))
if file_name == None:
return None
if not isinstance(file_name, str):
return None
if not len(file_name) > 0:
return None
if folders == None:
return None
if not isinstance(folders, list):
return None
if not len(folders) > 0:
return None
ancestor_folders = []
common_prefix = os.path.commonprefix(folders)
parent = os.path.dirname(file_name)
while parent not in ancestor_folders and parent.startswith(common_prefix):
ancestor_folders.append(parent)
parent = os.path.dirname(parent)
ancestor_folders.sort(reverse=True)
debug_message(' Found %d common ancestor folder%s %s' % (len(ancestor_folders), '' if len(ancestor_folders) == 1 else 's', ancestor_folders))
for folder in ancestor_folders:
debug_message(' Searching folder: %s' % folder)
for file_name in ['phpunit.xml', 'phpunit.xml.dist']:
phpunit_configuration_file = os.path.join(folder, file_name)
debug_message(' Checking: %s' % phpunit_configuration_file)
if os.path.isfile(phpunit_configuration_file):
debug_message(' Found PHPUnit configuration file: %s' % phpunit_configuration_file)
return phpunit_configuration_file
debug_message(' PHPUnit Configuration file not found')
return None
def find_dirname(self, file_name, folders):
phpunit_configuration_file = self.find(file_name, folders)
if phpunit_configuration_file:
return os.path.dirname(phpunit_configuration_file)
return None
def is_valid_php_identifier(string):
return re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', string)
class ViewHelpers():
def __init__(self, view):
self.view = view
def contains_phpunit_test_case(self):
"""
Returns true if view contains a PHPUnit test-case; otherwise false
"""
for php_class in self.find_php_classes():
if php_class[-4:] == 'Test':
return True
return False
def find_php_classes(self):
"""
Returns an array of classes (class names) defined in the view
"""
classes = []
for class_as_region in self.view.find_by_selector('source.php entity.name.type.class'):
class_as_string = self.view.substr(class_as_region)
if is_valid_php_identifier(class_as_string):
classes.append(class_as_string)
# Quick fix for ST build >= 3114 because the default PHP package
# changed the scope on class entities.
if not classes:
for class_as_region in self.view.find_by_selector('source.php entity.name.class'):
class_as_string = self.view.substr(class_as_region)
if is_valid_php_identifier(class_as_string):
classes.append(class_as_string)
return classes
def find_first_switchable(self):
"""
Returns the first switchable; otherwise None
"""
file_name = self.view.file_name()
debug_message('Find first switchable for %s' % file_name)
classes = self.find_php_classes()
debug_message(' Found %d PHP class%s %s in %s' % (len(classes), '' if len(classes) == 1 else 'es', classes, file_name))
for class_name in classes:
if class_name[-4:] == "Test":
lookup_symbol = class_name[:-4]
else:
lookup_symbol = class_name + "Test"
debug_message(' Switchable symbol: %s' % lookup_symbol)
switchables_in_open_files = self.view.window().lookup_symbol_in_open_files(lookup_symbol)
switchables_in_index = self.view.window().lookup_symbol_in_index(lookup_symbol)
debug_message(' Found %d switchable symbol%s in open files %s' % (len(switchables_in_open_files), '' if len(switchables_in_open_files) == 1 else 's', str(switchables_in_open_files)))
debug_message(' Found %d switchable symbol%s in index %s' % (len(switchables_in_index), '' if len(switchables_in_index) == 1 else 's', str(switchables_in_index)))
for open_file in switchables_in_open_files:
debug_message(' Found switchable symbol in open file %s' % str(open_file))
return open_file
for index in switchables_in_index:
debug_message(' Found switchable symbol in index %s' % str(index))
return index
def find_first_switchable_file(self):
"""
Returns the first switchable file; otherwise None
"""
first_switchable = self.find_first_switchable()
if not first_switchable:
return None
def normalise_path(path):
if int(sublime.version()) < 3118:
if sublime.platform() == "windows":
path = re.sub(r"/([A-Za-z])/(.+)", r"\1:/\2", path)
path = re.sub(r"/", r"\\", path)
return path
return normalise_path(first_switchable[0])
def get_current_function(self):
sel = self.view.sel()[0]
function_regions = self.view.find_by_selector('entity.name.function')
cf = None
for r in reversed(function_regions):
if r.a < sel.a:
cf = self.view.substr(r)
break
return cf
class PHPUnitTextUITestRunner():
def __init__(self, window):
self.window = window
def run(self, args=None):
if args:
debug_message('PHPUnitTextUITestRunner::run %s' % (args))
self._run(**args)
else:
debug_message('PHPUnitTextUITestRunner::run {}')
self._run()
def _run(self, working_dir=None, unit_test_or_directory=None, options = None):
view = self.window.active_view()
if not view:
return
# Working directory
if not working_dir:
working_dir = PHPUnitConfigurationFileFinder().find_dirname(view.file_name(), self.window.folders())
if not working_dir:
debug_message('Could not find a PHPUnit working directory')
return
debug_message('Found PHPUnit working directory: %s' % working_dir)
if not os.path.isdir(working_dir):
debug_message('PHPUnit working directory does not exist or is not a valid directory: %s' % working_dir)
return
debug_message('PHPUnit working directory: %s' % working_dir)
# Unit test or directory
if unit_test_or_directory:
if not os.path.isfile(unit_test_or_directory) and not os.path.isdir(unit_test_or_directory):
debug_message('PHPUnit test or directory is invalid: %s' % unit_test_or_directory)
return
unit_test_or_directory = os.path.relpath(unit_test_or_directory, working_dir)
debug_message('PHPUnit test or directory: %s' % unit_test_or_directory)
# PHPUnit options
# Order of Precedence
# * User specific "phpunit.options" setting
# * Project specific "phpunit.options" setting
# * toggled "transient/session" settings
# * this command's argument
if options is None:
options = {}
for k, v in plugin_settings.get_transient('options', {}).items():
if k not in options:
options[k] = v
for k, v in plugin_settings.get('options').items():
if k not in options:
options[k] = v
debug_message('PHPUnit options %s' % str(options))
# PHPUnit bin
phpunit_bin = 'phpunit'
if plugin_settings.get('composer'):
relative_composer_phpunit_bin = os.path.join('vendor', 'bin', 'phpunit')
composer_phpunit_bin = os.path.join(working_dir, relative_composer_phpunit_bin)
if os.path.isfile(composer_phpunit_bin):
debug_message('Found Composer PHPUnit bin: %s' % composer_phpunit_bin)
phpunit_bin = relative_composer_phpunit_bin
debug_message('PHPUnit bin: %s' % phpunit_bin)
# Execute Command
cmd = phpunit_bin
for k, v in options.items():
if not v == False:
if len(k) == 1:
if not v == False:
if v == True:
cmd += " -%s" % (k)
else:
if isinstance(v, list):
for _v in v:
cmd += " -%s \"%s\"" % (k, _v)
else:
cmd += " -%s \"%s\"" % (k, v)
else:
cmd += " --" + k
if not v == True:
cmd += " \"%s\"" % (v)
if unit_test_or_directory:
cmd += " " + unit_test_or_directory
debug_message('exec cmd: %s' % cmd)
# Write out every buffer (active window) with changes and a file name.
if plugin_settings.get('save_all_on_run'):
for view in self.window.views():
if view.is_dirty() and view.file_name():
view.run_command('save')
self.window.run_command('exec', {
'cmd': cmd,
'file_regex': '([a-zA-Z0-9\\.\\/_-]+)(?: on line |\:)([0-9]+)$',
'quiet': not bool(DEBUG),
'shell': True,
'syntax': 'Packages/phpunitkit/test-results.hidden-tmLanguage',
'word_wrap': False,
'working_dir': working_dir
})
# Save last run arguments (for current window)
plugin_settings.set_transient('__window__' + str(self.window.id()) + '__run_last_test_args', {
'working_dir': working_dir,
'unit_test_or_directory': unit_test_or_directory,
'options': options
})
# Configure color scheme
panel_settings = self.window.create_output_panel('exec').settings()
panel_settings.set('color_scheme',
plugin_settings.get('color_scheme')
if plugin_settings.get('color_scheme')
else view.settings().get('color_scheme'))
def run_last_test(self):
# get last run arguments (for current window)
args = plugin_settings.get_transient('__window__' + str(self.window.id()) + '__run_last_test_args')
if args:
self.run(args)
class PhpunitRunAllTests(sublime_plugin.WindowCommand):
def run(self):
PHPUnitTextUITestRunner(self.window).run()
class PhpunitRunLastTestCommand(sublime_plugin.WindowCommand):
def run(self):
PHPUnitTextUITestRunner(self.window).run_last_test()
class PhpunitRunSingleFileCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
view_helpers = ViewHelpers(view)
if view_helpers.contains_phpunit_test_case():
debug_message('Found test case in %s' % view.file_name())
unit_test = view.file_name()
options = {}
else:
debug_message('No test case found in %s' % view.file_name())
unit_test = view_helpers.find_first_switchable_file()
options = {}
# @todo how to check that the switchable contains a testcase?
if not unit_test:
debug_message('Could not find a PHPUnit test case or a switchable test case')
return
PHPUnitTextUITestRunner(self.window).run({
"unit_test_or_directory": unit_test,
"options": options
})
class PhpunitRunSingleTestCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
view_helpers = ViewHelpers(view)
current_function = view_helpers.get_current_function()
if not current_function:
debug_message('No current function')
return
debug_message('Current function is %s' % current_function)
options = {
'filter': '::(' + current_function + ')( with data set .+)?$'
}
PHPUnitTextUITestRunner(self.window).run({
"unit_test_or_directory": "",
"options": options
})
class PhpunitSwitchFile(sublime_plugin.WindowCommand):
def run(self):
current_view = self.window.active_view()
if not current_view:
return
first_switchable = ViewHelpers(current_view).find_first_switchable()
if not first_switchable:
sublime.status_message('No PHPUnit switchable found for "%s"' % current_view.file_name())
return
debug_message('Switching from %s to %s' % (current_view.file_name(), first_switchable))
self.window.open_file(first_switchable[0])
switched_view = self.window.active_view()
if current_view == switched_view: # looks like the class and test-case are in the same view
return
# split in two with test case and class under test side-by-side
if self.window.num_groups() == 1:
self.window.run_command('set_layout', {
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]]
})
current_view_index = self.window.get_view_index(current_view)
switched_view_index = self.window.get_view_index(switched_view)
if self.window.num_groups() <= 2 and current_view_index[0] == switched_view_index[0]:
if current_view_index[0] == 0:
self.window.set_view_index(switched_view, 1, 0)
else:
self.window.set_view_index(switched_view, 0, 0)
# ensure focus is not lost from either view
self.window.focus_view(current_view)
self.window.focus_view(switched_view)
class PhpunitToggleLongOption(sublime_plugin.WindowCommand):
def run(self, option):
options = plugin_settings.get_transient('options', {})
options[option] = not bool(options[option]) if option in options else True
plugin_settings.set_transient('options', options)
class PhpunitOpenHtmlCodeCoverageInBrowser(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if not view:
return
working_dir = PHPUnitConfigurationFileFinder().find_dirname(view.file_name(), self.window.folders())
if not working_dir:
sublime.status_message('Could not find a PHPUnit working directory')
return
coverage_html_index_html_file = os.path.join(working_dir, 'build/coverage/index.html')
if not os.path.exists(coverage_html_index_html_file):
sublime.status_message('Could not find PHPUnit HTML code coverage %s' % coverage_html_index_html_file)
return
import webbrowser
webbrowser.open_new_tab('file://' + coverage_html_index_html_file) | 0.401923 | 0.074097 |
import string
import random
from datetime import datetime
from hashlib import blake2b as blake
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
ENCODE_KEY = string.digits + string.ascii_letters
NEGATIVE_SYMBOL = "Z"
def create_random_string(n: int) -> str:
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
def generate_random_id(n: int, preclusion: set) -> str:
random_id = create_random_string(n)
while random_id in preclusion:
random_id = create_random_string(n)
return random_id
def strip_html_symbols(text: str) -> str:
return text.replace("&", "&").replace("<", "<").replace(">", ">")
def make_html_bold(text: str) -> str:
return f"<b>{strip_html_symbols(text)}</b>"
def make_html_italic(text: str) -> str:
return f"<i>{strip_html_symbols(text)}</i>"
def make_html_bold_first_line(text: str) -> str:
text_split = text.split("\n", 1)
output = make_html_bold(text_split[0])
return output + "\n" + strip_html_symbols(text_split[1]) if len(text_split) > 1 else output
def encode(num: int, base=32) -> str:
if num == 0:
return "0"
num, code = (num, "") if num > 0 else (-num, NEGATIVE_SYMBOL)
while num > 0:
num, i = divmod(num, base)
code += ENCODE_KEY[i]
return code
def decode(code: str, base=32) -> int:
if code.startswith(NEGATIVE_SYMBOL):
code, factor = code[1:], -1
else:
factor = 1
num = 0
for i, value in enumerate(code):
num += ENCODE_KEY.find(value) * base ** i
return num * factor
def simple_hash(text: str, salt="", length=16, variance=True) -> str:
if variance:
time_variance = datetime.now().strftime("%H%d%m%y")
salt_bytes = bytes(f"{time_variance}{salt}", "ascii")
else:
salt_bytes = bytes(f"{salt}", "ascii")
hasher = blake(key=salt_bytes, digest_size=16)
hasher.update(bytes(text, "ascii"))
digest = hasher.hexdigest()
encoded_digest = encode(int(digest, 16), base=62)
return encoded_digest[:length]
def build_button(text: str, subject: str, action: str, identifier: str) -> InlineKeyboardButton:
data = f"{subject} {action} {identifier}"
return InlineKeyboardButton(text, callback_data=data)
def build_switch_button(text: str, placeholder: str, to_self=False) -> InlineKeyboardButton:
return InlineKeyboardButton(text, switch_inline_query_current_chat=placeholder) if to_self \
else InlineKeyboardButton(text, switch_inline_query=placeholder)
def build_single_button_markup(text: str, action: str) -> InlineKeyboardMarkup:
data = f"{action}"
button = InlineKeyboardButton(text, callback_data=data)
return InlineKeyboardMarkup([[button]])
def build_single_switch_button_markup(text: str, placeholder: str) -> InlineKeyboardMarkup:
button = InlineKeyboardButton(text, switch_inline_query=placeholder)
return InlineKeyboardMarkup([[button]])
def build_single_link_button_markup(text: str, link: str) -> InlineKeyboardMarkup:
button = InlineKeyboardButton(text, url=link)
return InlineKeyboardMarkup([[button]])
def format_date(date: datetime, date_format="%B %d, %Y") -> str:
return date.strftime(date_format) | util.py | import string
import random
from datetime import datetime
from hashlib import blake2b as blake
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
ENCODE_KEY = string.digits + string.ascii_letters
NEGATIVE_SYMBOL = "Z"
def create_random_string(n: int) -> str:
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
def generate_random_id(n: int, preclusion: set) -> str:
random_id = create_random_string(n)
while random_id in preclusion:
random_id = create_random_string(n)
return random_id
def strip_html_symbols(text: str) -> str:
return text.replace("&", "&").replace("<", "<").replace(">", ">")
def make_html_bold(text: str) -> str:
return f"<b>{strip_html_symbols(text)}</b>"
def make_html_italic(text: str) -> str:
return f"<i>{strip_html_symbols(text)}</i>"
def make_html_bold_first_line(text: str) -> str:
text_split = text.split("\n", 1)
output = make_html_bold(text_split[0])
return output + "\n" + strip_html_symbols(text_split[1]) if len(text_split) > 1 else output
def encode(num: int, base=32) -> str:
if num == 0:
return "0"
num, code = (num, "") if num > 0 else (-num, NEGATIVE_SYMBOL)
while num > 0:
num, i = divmod(num, base)
code += ENCODE_KEY[i]
return code
def decode(code: str, base=32) -> int:
if code.startswith(NEGATIVE_SYMBOL):
code, factor = code[1:], -1
else:
factor = 1
num = 0
for i, value in enumerate(code):
num += ENCODE_KEY.find(value) * base ** i
return num * factor
def simple_hash(text: str, salt="", length=16, variance=True) -> str:
if variance:
time_variance = datetime.now().strftime("%H%d%m%y")
salt_bytes = bytes(f"{time_variance}{salt}", "ascii")
else:
salt_bytes = bytes(f"{salt}", "ascii")
hasher = blake(key=salt_bytes, digest_size=16)
hasher.update(bytes(text, "ascii"))
digest = hasher.hexdigest()
encoded_digest = encode(int(digest, 16), base=62)
return encoded_digest[:length]
def build_button(text: str, subject: str, action: str, identifier: str) -> InlineKeyboardButton:
data = f"{subject} {action} {identifier}"
return InlineKeyboardButton(text, callback_data=data)
def build_switch_button(text: str, placeholder: str, to_self=False) -> InlineKeyboardButton:
return InlineKeyboardButton(text, switch_inline_query_current_chat=placeholder) if to_self \
else InlineKeyboardButton(text, switch_inline_query=placeholder)
def build_single_button_markup(text: str, action: str) -> InlineKeyboardMarkup:
data = f"{action}"
button = InlineKeyboardButton(text, callback_data=data)
return InlineKeyboardMarkup([[button]])
def build_single_switch_button_markup(text: str, placeholder: str) -> InlineKeyboardMarkup:
button = InlineKeyboardButton(text, switch_inline_query=placeholder)
return InlineKeyboardMarkup([[button]])
def build_single_link_button_markup(text: str, link: str) -> InlineKeyboardMarkup:
button = InlineKeyboardButton(text, url=link)
return InlineKeyboardMarkup([[button]])
def format_date(date: datetime, date_format="%B %d, %Y") -> str:
return date.strftime(date_format) | 0.53607 | 0.206154 |
import io
import os
import time
import gym
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from datetime import datetime
from threading import Thread
from rl_visualization.app import start_app
from math import sqrt, ceil
sns.set(style='whitegrid')
sns.set_context('paper')
class VisualizationEnv(gym.Wrapper):
def __init__(self, env, agent=None, steps_lookback=1000, episodic=True, features_names=None, actions_names=None, refresh_time=20, path='./logs'):
"""Gym Env wrapper for visualization
Args:
env (gym.Env): Gym Env to be wrapped
"""
super().__init__(env)
self.env = env
self.agent = agent
self.steps_lookback = steps_lookback
self.episodic = episodic
self.user_plots = {}
self.user_plots_values = {}
if isinstance(self.observation_space, gym.spaces.Discrete):
self.state_dim = self.observation_space.n
elif isinstance(self.observation_space, gym.spaces.Box):
self.state_dim = self.observation_space.shape[0]
else:
exit('Observation space not supported.')
if isinstance(self.action_space, gym.spaces.Discrete):
self.action_dim = self.action_space.n
elif isinstance(self.action_space, gym.spaces.Box):
self.action_dim = self.action_space.shape[0]
else:
exit('Action space not supported')
if features_names is not None:
self.features_names = features_names
else:
self.features_names = ['feature_'+str(i) for i in range(self.state_dim)]
if actions_names is not None:
self.actions_names = actions_names
else:
self.actions_names = ['action'+str(i) for i in range(self.action_dim)]
if not os.path.exists(path):
os.mkdir(path)
self.filepath = os.path.join(path, 'rl_vis' + str(datetime.now()).split('.')[0] + "." + 'csv')
self.refresh_time = refresh_time
self.delay = 0
self.experiences = []
self.epsilon = []
self.sa_counter = Counter()
self.obs = None
self.start_app()
def set_agent(self, agent):
self.agent = agent
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
if self.delay > 0:
time.sleep(self.delay)
self.experiences.append((self.obs, action, reward, next_obs, done))
if self.agent is not None and hasattr(self.agent, 'q_table'):
self.sa_counter.update([(self.env.encode(self.obs), action)])
self.obs = next_obs
for plot in self.user_plots:
self.user_plots_values[plot].append(self.user_plots[plot]())
return next_obs, reward, done, info
def reset(self):
self.obs = self.env.reset()
return self.obs
def start_app(self):
self.app_process = Thread(target=start_app, args=(self,))
self.app_process.start()
def get_available_plots(self):
plots = []
if len(self.experiences) == 0:
return plots
if self.agent is not None and hasattr(self.agent, 'q_table'):
plots.append('Q-table')
plots.append('Visit Count')
self.q_table_to_df()
plots.append('Rewards')
if self.episodic:
plots.append('Episode Rewards')
plots.extend(['Features Distributions', 'Actions Distributions'])
plots.extend(self.user_plots.keys())
return plots
def add_plot(self, title, get_func):
self.user_plots[title] = get_func
self.user_plots_values[title] = []
def get_userplot(self, title):
f, ax = plt.subplots(figsize=(14, 8))
plt.title(title)
plt.xlabel('step')
plt.plot(self.user_plots_values[title])
plt.tight_layout()
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
plt.close()
bytes_image.seek(0)
return bytes_image
def get_featuresdistribution(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Features Distribution')
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[0]
d.append({self.features_names[i]: s[i] for i in range(self.state_dim)})
df = pd.DataFrame(d)
n = ceil(sqrt(self.state_dim))
for i in range(self.state_dim):
plt.subplot(1 if n == self.state_dim else n, n, i+1)
sns.distplot(df[self.features_names[i]], hist=True, color="b", kde_kws={"shade": True})
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_actionsdistribution(self):
f, ax = plt.subplots()
plt.title('Actions Distribution')
if not hasattr(self.experiences[0][1], '__len__'): # int, float or numpy.int
d = []
for exp in self.experiences[-self.steps_lookback:]:
a = exp[1]
d.append({'Action': self.actions_names[a]})
df = pd.DataFrame(d)
sns.catplot(x="Action", kind="count", data=df)
else:
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[1]
d.append({self.actions_names[i]: s[i] for i in range(self.action_dim)})
df = pd.DataFrame(d)
n = ceil(sqrt(self.action_dim))
for i in range(self.action_dim):
plt.subplot(1 if n == self.state_dim else n, n, i+1)
sns.distplot(df[self.actions_names[i]], hist=True, color="r", kde_kws={"shade": True})
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_qtable_png(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Q-table')
df = self.df.pivot(index='State', columns='Action', values='q')
sns.heatmap(df, annot=True, fmt="g", cmap="PiYG", linewidths=.5, center=0.0)
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_visitcount(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Visit Count')
df = self.df.pivot(index='State', columns='Action', values='count')
sns.heatmap(df, annot=True, cmap="YlGnBu", linewidths=.5)
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_rewards(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Rewards')
plt.xlabel('step')
plt.plot([exp[2] for exp in self.experiences], color='g')
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_episoderewards(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Episode Rewards')
d = []
ep_reward = 0
for i in range(1, len(self.experiences)):
if self.experiences[i][4]:
d.append({'step': i, 'Episode Rewards': ep_reward})
ep_reward = 0
else:
ep_reward += self.experiences[i][2]
if len(d) > 0:
sns.lineplot(x='step', y='Episode Rewards', data=pd.DataFrame(d), color='orange')
else:
return None
plt.tight_layout()
return self.plot_to_bytes(plt)
def q_table_to_df(self, num_rows=20):
df = []
for exp in self.experiences[-num_rows:]:
s = self.env.encode(exp[0])
for i, q in enumerate(self.agent.q_table[s]):
df.append({'State': str(self.env.radix_decode(s)), 'Action': self.actions_names[i], 'q': q, 'count': self.sa_counter[(s, i)]})
df = pd.DataFrame(df)
df.drop_duplicates(subset=None, keep='first', inplace=True)
self.df = df
def plot_to_bytes(self, plot):
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
plt.close()
bytes_image.seek(0)
return bytes_image
def join(self):
self.app_process.join() | rl_visualization/visualization_env.py | import io
import os
import time
import gym
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from datetime import datetime
from threading import Thread
from rl_visualization.app import start_app
from math import sqrt, ceil
sns.set(style='whitegrid')
sns.set_context('paper')
class VisualizationEnv(gym.Wrapper):
def __init__(self, env, agent=None, steps_lookback=1000, episodic=True, features_names=None, actions_names=None, refresh_time=20, path='./logs'):
"""Gym Env wrapper for visualization
Args:
env (gym.Env): Gym Env to be wrapped
"""
super().__init__(env)
self.env = env
self.agent = agent
self.steps_lookback = steps_lookback
self.episodic = episodic
self.user_plots = {}
self.user_plots_values = {}
if isinstance(self.observation_space, gym.spaces.Discrete):
self.state_dim = self.observation_space.n
elif isinstance(self.observation_space, gym.spaces.Box):
self.state_dim = self.observation_space.shape[0]
else:
exit('Observation space not supported.')
if isinstance(self.action_space, gym.spaces.Discrete):
self.action_dim = self.action_space.n
elif isinstance(self.action_space, gym.spaces.Box):
self.action_dim = self.action_space.shape[0]
else:
exit('Action space not supported')
if features_names is not None:
self.features_names = features_names
else:
self.features_names = ['feature_'+str(i) for i in range(self.state_dim)]
if actions_names is not None:
self.actions_names = actions_names
else:
self.actions_names = ['action'+str(i) for i in range(self.action_dim)]
if not os.path.exists(path):
os.mkdir(path)
self.filepath = os.path.join(path, 'rl_vis' + str(datetime.now()).split('.')[0] + "." + 'csv')
self.refresh_time = refresh_time
self.delay = 0
self.experiences = []
self.epsilon = []
self.sa_counter = Counter()
self.obs = None
self.start_app()
def set_agent(self, agent):
self.agent = agent
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
if self.delay > 0:
time.sleep(self.delay)
self.experiences.append((self.obs, action, reward, next_obs, done))
if self.agent is not None and hasattr(self.agent, 'q_table'):
self.sa_counter.update([(self.env.encode(self.obs), action)])
self.obs = next_obs
for plot in self.user_plots:
self.user_plots_values[plot].append(self.user_plots[plot]())
return next_obs, reward, done, info
def reset(self):
self.obs = self.env.reset()
return self.obs
def start_app(self):
self.app_process = Thread(target=start_app, args=(self,))
self.app_process.start()
def get_available_plots(self):
plots = []
if len(self.experiences) == 0:
return plots
if self.agent is not None and hasattr(self.agent, 'q_table'):
plots.append('Q-table')
plots.append('Visit Count')
self.q_table_to_df()
plots.append('Rewards')
if self.episodic:
plots.append('Episode Rewards')
plots.extend(['Features Distributions', 'Actions Distributions'])
plots.extend(self.user_plots.keys())
return plots
def add_plot(self, title, get_func):
self.user_plots[title] = get_func
self.user_plots_values[title] = []
def get_userplot(self, title):
f, ax = plt.subplots(figsize=(14, 8))
plt.title(title)
plt.xlabel('step')
plt.plot(self.user_plots_values[title])
plt.tight_layout()
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
plt.close()
bytes_image.seek(0)
return bytes_image
def get_featuresdistribution(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Features Distribution')
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[0]
d.append({self.features_names[i]: s[i] for i in range(self.state_dim)})
df = pd.DataFrame(d)
n = ceil(sqrt(self.state_dim))
for i in range(self.state_dim):
plt.subplot(1 if n == self.state_dim else n, n, i+1)
sns.distplot(df[self.features_names[i]], hist=True, color="b", kde_kws={"shade": True})
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_actionsdistribution(self):
f, ax = plt.subplots()
plt.title('Actions Distribution')
if not hasattr(self.experiences[0][1], '__len__'): # int, float or numpy.int
d = []
for exp in self.experiences[-self.steps_lookback:]:
a = exp[1]
d.append({'Action': self.actions_names[a]})
df = pd.DataFrame(d)
sns.catplot(x="Action", kind="count", data=df)
else:
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[1]
d.append({self.actions_names[i]: s[i] for i in range(self.action_dim)})
df = pd.DataFrame(d)
n = ceil(sqrt(self.action_dim))
for i in range(self.action_dim):
plt.subplot(1 if n == self.state_dim else n, n, i+1)
sns.distplot(df[self.actions_names[i]], hist=True, color="r", kde_kws={"shade": True})
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_qtable_png(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Q-table')
df = self.df.pivot(index='State', columns='Action', values='q')
sns.heatmap(df, annot=True, fmt="g", cmap="PiYG", linewidths=.5, center=0.0)
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_visitcount(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Visit Count')
df = self.df.pivot(index='State', columns='Action', values='count')
sns.heatmap(df, annot=True, cmap="YlGnBu", linewidths=.5)
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_rewards(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Rewards')
plt.xlabel('step')
plt.plot([exp[2] for exp in self.experiences], color='g')
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_episoderewards(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Episode Rewards')
d = []
ep_reward = 0
for i in range(1, len(self.experiences)):
if self.experiences[i][4]:
d.append({'step': i, 'Episode Rewards': ep_reward})
ep_reward = 0
else:
ep_reward += self.experiences[i][2]
if len(d) > 0:
sns.lineplot(x='step', y='Episode Rewards', data=pd.DataFrame(d), color='orange')
else:
return None
plt.tight_layout()
return self.plot_to_bytes(plt)
def q_table_to_df(self, num_rows=20):
df = []
for exp in self.experiences[-num_rows:]:
s = self.env.encode(exp[0])
for i, q in enumerate(self.agent.q_table[s]):
df.append({'State': str(self.env.radix_decode(s)), 'Action': self.actions_names[i], 'q': q, 'count': self.sa_counter[(s, i)]})
df = pd.DataFrame(df)
df.drop_duplicates(subset=None, keep='first', inplace=True)
self.df = df
def plot_to_bytes(self, plot):
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
plt.close()
bytes_image.seek(0)
return bytes_image
def join(self):
self.app_process.join() | 0.596903 | 0.304701 |
import json
import requests
import warnings
import html
import re
warnings.filterwarnings('always', '.*', PendingDeprecationWarning)
class HTTPException(Exception):
def __init__(self, response):
"""
Custom exception class to report possible API errors
The body is contructed by extracting the API error code from the requests.Response object
"""
try:
r = response.json()
if 'detail' in r:
detail = r['detail']
elif 'errors' in r:
detail = r['errors'][0]['title']
elif '_meta' in r:
detail = r['_meta']['message']
else:
detail = response.content
except Exception:
detail = response.content
body = 'Status code: {code} - {detail}'.format(code=str(response.status_code), detail=detail)
super().__init__(body)
def request_error_handler(func):
def request_handler(self, *args, **kwargs):
response = func(self, *args, **kwargs)
if response.status_code in [200, 201, 204]:
return response
else:
raise HTTPException(response)
return request_handler
def validate_api_v2(func):
def api_validator(self, *args, **kwargs):
if self.version == 2:
return func(self, *args, **kwargs)
else:
raise NotImplementedError('Method only accessible via v2 of API')
return api_validator
def deprecation(message):
warnings.warn(message, PendingDeprecationWarning)
def param_deprecation(key):
message = '{0} will be deprecated with Vectra API v1 which will be annouced in an upcoming release'.format(key)
warnings.warn(message, PendingDeprecationWarning)
class VectraClient(object):
def __init__(self, url=None, token=None, user=None, password=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param user: Username to authenticate to Vectra brain when using API v1*
:param password: Password when using username to authenticate using API v1*
:param verify: Verify SSL (default: False) - optional
*Either token or user are required
"""
self.url = url
self.version = 2 if token else 1
self.verify = verify
url = VectraClient._remove_trailing_slashes(url)
if token:
self.url = '{url}/api/v2'.format(url=url)
self.headers = {
'Authorization': "Token " + token.strip(),
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
elif user and password:
self.url = '{url}/api'.format(url=url)
self.auth = (user, password)
deprecation('Deprecation of the Vectra API v1 will be announced in an upcoming release. Migrate to API v2'
' when possible')
else:
raise RuntimeError("At least one form of authentication is required. Please provide a token or username"
" and password")
@staticmethod
def _remove_trailing_slashes(url):
url = url[:-1] if url.endswith('/') else url
return url
@staticmethod
def _generate_campaign_params(args):
"""
Generate query parameters for campaigns based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'dst_ip', 'target_domain', 'state', 'name', 'last_updated_gte',
'note_modified_timestamp_gte','page', 'page_size']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid campaign query parameter'.format(str(k)))
return params
@staticmethod
def _generate_host_params(args):
"""
Generate query parameters for hosts based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['active_traffic', 'all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte',
'fields', 'has_active_traffic', 'include_detection_summaries', 'is_key_asset', 'is_targeting_key_asset',
'key_asset', 'last_detection_timestamp', 'last_source', 'mac_address', 'max_id', 'min_id',
'name', 'note_modified_timestamp_gte', 'ordering','page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'targets_key_asset', 'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'key_asset', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_host_by_id_params(args):
"""
Generate query parameters for host based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'include_external', 'include_ldap']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detection_params(args):
"""
Generate query parameters for detections based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['c_score', 'c_score_gte', 'category', 'certainty', 'certainty_gte', 'description',
'detection', 'detection_category', 'detection_type', 'fields', 'host_id', 'is_targeting_key_asset',
'is_triaged', 'last_timestamp', 'max_id', 'min_id', 'note_modified_timestamp_gte', 'ordering',
'page', 'page_size', 'src_ip', 'state', 't_score', 't_score_gte', 'tags', 'targets_key_asset',
'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'category', 'detection', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid detection query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_group_params(args):
"""
Generate query parameters for groups based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['description', 'domains', 'host_ids', 'host_names', 'last_modified_by',
'last_modified_timestamp', 'name', 'page', 'page_size', 'type']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid group query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_params(args):
"""
Generate query parameters for rules based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['contains', 'fields', 'include_templates', 'page', 'page_size', 'ordering']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_by_id_params(args):
"""
Generate query parameters for rule based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_user_params(args):
"""
Generate query parameters for users based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['username', 'role', 'account_type', 'authentication_profile', 'last_login_gte']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid user query parameter'.format(str(k)))
return params
@staticmethod
def _generate_ip_address_params(args):
"""
Generate query parameters for ip address queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid ip address query parameter'.format(str(k)))
return params
@staticmethod
def _generate_subnet_params(args):
"""
Generate query parameters for subnet queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['ordering', 'search']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid subnet query parameter'.format(str(k)))
return params
@staticmethod
def _generate_internal_network_params(args):
"""
Generate query parameters for internal network queries based on provided argsbased on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid internal network query parameter'.format(str(k)))
return params
@validate_api_v2
@request_error_handler
def _get_request(self, url, **kwargs):
"""
Do a get request on the provided URL
This is used by paginated endpoints
:rtype: requests.Response
"""
params = {}
for k, v in kwargs.items():
params[k] = v
if self.version == 2:
return requests.get(url, headers=self.headers, params=params, verify=self.verify)
else:
return requests.get(url, auth=self.auth, params=params, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_campaigns(self, **kwargs):
"""
Query all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
return requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
def get_all_campaigns(self, **kwargs):
"""
Generator to retrieve all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_campaign_by_id(self, campaign_id=None, **kwargs):
"""
Get campaign by id
"""
if not campaign_id:
raise ValueError('Campaign id required')
return requests.get('{url}/campaigns/{id}'.format(url=self.url, id=campaign_id),
headers=self.headers, verify=self.verify)
@request_error_handler
def get_hosts(self, **kwargs):
"""
Query all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
if self.version == 2:
return requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts'.format(url=self.url), auth=self.auth,
params=self._generate_host_params(kwargs), verify=self.verify)
def get_all_hosts(self, **kwargs):
"""
Generator to retrieve all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_host_by_id(self, host_id=None, **kwargs):
"""
Get host by id
:param host_id: host id - required
:param include_external: include fields regarding external connectors (e.g. CrowdStrike) - optional
:param include_ldap: include LDAP context pulled over AD connector - optional
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: active_traffic, assigned_date, assigned_to, c_score, campaign_summaries,
carbon_black, certainty, crowdstrike, detection_profile, detection_set, detection_summaries,
groups, has_active_traffic, has_custom_model, has_shell_knocker_learnings, host_artifact_set,
host_luid, host_session_luid, host_url, id, ip, is_key_asset, is_targeting_key_asset, key_asset,
last_detection_timestamp, last_modified, last_seen, last_source, ldap, name, note, note_modified_by,
note_modified_timestamp, previous_ips, privilege_category, privilege_level, probable_owner, sensor,
sensor_name, severity, shell_knocker, state, suspicious_admin_learnings, t_score, tags, targets_key_asset,
threat, url, vcenter
"""
if not host_id:
raise ValueError('Host id required')
if self.version == 2:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), auth=self.auth,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def set_key_asset(self, host_id=None, set=True):
"""
(Un)set host as key asset
:param host_id: id of host needing to be set - required
:param set: set flag to true if setting host as key asset
"""
if not host_id:
raise ValueError('Host id required')
if set:
payload = {'key_asset':'true'}
else:
payload = {'key_asset':'false'}
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_host_tags(self, host_id=None):
"""
Get host tags
:param host_id: ID of the host for which to retrieve the tags
"""
if not host_id:
raise ValueError('Host id required')
return requests.get('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_host_tags(self, host_id=None, tags=[], append=False):
"""
Set host tags
:param host_id:
:param tags: list of tags to add to host
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if not host_id:
raise ValueError('Host id required')
if append and type(tags) == list:
current_list = self.get_host_tags(host_id=host_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_hosts_tag(self, tag, host_ids):
"""
Set a tag in bulk on multiple hosts. Only one tag can be set at a time
:param host_ids: IDs of the hosts for which to set the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.post('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_hosts_tag(self, tag, host_ids):
"""
Delete a tag in bulk on multiple hosts. Only one tag can be deleted at a time
:param host_ids: IDs of the hosts on which to delete the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.delete('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_host_note(self, host_id=None):
"""
Get host notes
:param host_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
if not host_id:
raise ValueError('Host id required')
host = requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, verify=self.verify)
if host.status_code == 200:
host_note = host.json()['note']
# API endpoint return HTML escaped characters
host_note = html.unescape(host_note) if host_note else ''
json_dict = {'status': 'success', 'host_id': str(host_id), 'note': host_note}
host._content = json.dumps(json_dict).encode('utf-8')
return host
@validate_api_v2
@request_error_handler
def set_host_note(self, host_id=None, note='', append=False):
"""
Set host note
:param host_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear host note
"""
if not host_id:
raise ValueError('Host id required')
if append and isinstance(note, str):
current_note = self.get_host_note(host_id=host_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, data=json.dumps(payload),
verify=self.verify)
@request_error_handler
def get_detections(self, **kwargs):
"""
Query all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
if self.version == 2:
return requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections'.format(url=self.url), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
def get_all_detections(self, **kwargs):
"""
Generator to retrieve all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
resp = requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@request_error_handler
def get_detection_by_id(self, detection_id=None, **kwargs):
"""
Get detection by id
:param detection_id: detection id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
"""
if not detection_id:
raise ValueError('Detection id required')
if self.version == 2:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_fixed(self, detection_ids=None):
"""
Mark detections as fixed
:param detection_ids: list of detections to mark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=True)
@validate_api_v2
@request_error_handler
def unmark_detections_fixed(self, detection_ids=None):
"""
Unmark detections as fixed
:param detection_ids: list of detections to unmark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=False)
def _toggle_detections_fixed(self, detection_ids, fixed):
"""
Internal function to mark/unmark detections as fixed
"""
payload = {
'detectionIdList': detection_ids,
'mark_as_fixed': str(fixed)
}
return requests.patch('{url}/detections'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_custom(self, detection_ids=[], triage_category=None):
"""
Mark detections as custom
:param detection_ids: list of detection IDs to mark as custom
:param triage_category: custom name to give detection
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as custom')
payload = {
"triage_category": triage_category,
"detectionIdList": detection_ids
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def unmark_detections_custom(self, detection_ids=[]):
"""
Unmark detection as custom
:param detection_ids: list of detection IDs to unmark as custom
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as custom')
payload = {
"detectionIdList": detection_ids
}
response = requests.delete('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
# DELETE returns an empty response, but we populate the response for consistency with the mark_as_fixed() function
json_dict = {'_meta': {'message': 'Successfully unmarked detections', 'level': 'Success'}}
response._content = json.dumps(json_dict).encode('utf-8')
return response
@validate_api_v2
@request_error_handler
def get_detection_tags(self, detection_id=None):
"""
Get detection tags
:param detection_id:
"""
return requests.get('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_detection_tags(self, detection_id=None, tags=[], append=False):
"""
Set detection tags
:param detection_id:
:param tags: list of tags to add to detection
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear all tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_detection_tags(detection_id=detection_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_detections_tag(self, tag, detection_ids):
"""
Set a tag in bulk on multiple detections. Only one tag can be set at a time
:param detection_ids: IDs of the detections for which to set the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.post('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_detections_tag(self, tag, detection_ids):
"""
Delete a tag in bulk on multiple detections. Only one tag can be deleted at a time
:param detection_ids: IDs of the detections for which to delete the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.delete('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_detection_note(self, detection_id=None):
"""
Get detection notes
:param detection_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete detection body, we alter the response content
"""
detection = requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, verify=self.verify)
if detection.status_code == 200:
detection_note = detection.json()['note']
# API endpoint return HTML escaped characters
detection_note = html.unescape(detection_note) if detection_note else ''
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'note': detection_note}
detection._content = json.dumps(json_dict).encode('utf-8')
return detection
@validate_api_v2
@request_error_handler
def set_detection_note(self, detection_id=None, note='', append=False):
"""
Set detection note
:param detection_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear detection note
"""
if append and isinstance(note, str):
current_note = self.get_detection_note(detection_id=detection_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
def get_detection_pcap(self, detection_id=None, filename=None):
"""
Get detection pcap
:param detection_id: ID of the detection for which to get a pcap
:param filename: filename to write the pcap to. Will be overwriten if already exists.
"""
response = requests.get('{url}/detections/{id}/pcap'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
if response.status_code not in [200, 201, 204]:
raise HTTPException(response)
with open(filename, 'wb') as f:
f.write(response.content)
# Return a <Response> object for consistency
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'file_created': filename}
response._content = json.dumps(json_dict).encode('utf-8')
return response
# TODO add request_error_handler decorator as soon as get_rules_by_name() returns requests.Response object
@validate_api_v2
def get_rules(self, name=None, rule_id=None, **kwargs):
"""
Query all rules
:param name: name of rule to search (substring matching)
:param rule_id: ID of rule to return
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
return self.get_rules_by_name(triage_category=name)
elif rule_id:
deprecation('The "rule_id" argument will be removed from this function, please use the corresponding get_rule_by_id function')
return self.get_rule_by_id(rule_id)
else:
return requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
"""
if not rule_id:
raise ValueError('Rule id required')
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
# TODO make return type requests.Reponse
@validate_api_v2
def get_rules_by_name(self, triage_category=None, description=None):
"""
Get triage rules by name or description
Condition are to be read as OR
:param triage_category: 'Triage as' field of filter
:param description: Description of the triage filter
:rtype list: to be backwards compatible
"""
search_query = triage_category if triage_category else description
response = self.get_rules(contains=search_query)
return response.json()['results']
@validate_api_v2
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage [botnet activity, command & control, reconnaissance,
lateral movement, exfiltration]
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise KeyError("missing required parameter: "
"detection_category, detection_type, triage_category")
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist
}
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
payload[k] = v
else:
raise ValueError('argument {} is an invalid field for rule creation'.format(str(k)))
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_rule(self, rule_id=None, name=None, append=False, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param name: name of rule to update
:param append: set to True if appending to existing list (boolean)
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
matching_rules = self.get_rules_by_name(triage_category=name)
if len(matching_rules) > 1:
raise Exception('More than one rule matching the name')
elif len(matching_rules) < 1:
raise Exception('No rule matching the search')
else:
rule = matching_rules[0]
elif rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule name or id must be provided")
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
if append:
if isinstance(rule[k], list):
rule[k] += v
else:
rule[k] = v
else:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_rule(self, rule_id=None, restore_detections=True):
"""
Delete triage rule
:param rule_id:
:param restore_detections: restore previously triaged detections (bool) default behavior is to restore
detections
"""
if not rule_id:
raise ValueError('Rule id required')
params = {
'restore_detections': restore_detections
}
return requests.delete('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers, params=params,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_groups(self, **kwargs):
"""
Query all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
return requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
@validate_api_v2
def get_all_groups(self, **kwargs):
"""
Generator to retrieve all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
resp = requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_group_by_id(self, group_id):
"""
Get groups by id
:param rule_id: id of group to retrieve
"""
return requests.get('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=False)
@validate_api_v2
def get_groups_by_name(self, name=None, description=None):
"""
Get groups by name or description
:param name: Name of group*
:param description: Description of the group*
*params are to be read as OR
"""
if name and description:
raise Exception('Can only provide a name OR a description')
if name:
response = self.get_groups(name=name)
return response.json()['results']
elif description:
response = self.get_groups(description=description)
return response.json()['results']
@validate_api_v2
@request_error_handler
def create_group(self, name=None, description='', type=None, members=[], rules=[], **kwargs):
"""
Create group
:param name: name of the group to create
:param description: description of the group
:param type: type of the group to create (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of triage rule ids to add to group
:rtype requests.Response:
"""
if not name:
raise ValueError("missing required parameter: name")
if not type:
raise ValueError("missing required parameter: type")
if type not in ['host', 'domain', 'ip']:
raise ValueError('parameter type must have value "domain", "ip" or "host"')
if not isinstance(members, list):
raise TypeError("members must be type: list")
if not isinstance(rules, list):
raise TypeError("rules must be type: list")
payload = {
"name": name,
"description": description,
"type": type,
"members": members,
"rules": rules,
}
for k, v in kwargs.items():
if not type(v) == list:
raise TypeError("{} must be of type: list".format(k))
payload[k] = v
return requests.post('{url}/groups'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_group(self, group_id, append=False, **kwargs):
"""
Update group
:param group_id: id of group to update
:param name: name of group
:param description: description of the group
:param type: type of the group (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of rule ids to add to group
:param append: set to True if appending to existing list (boolean)
"""
valid_keys = ['name', 'description', 'type', 'members', 'rules']
group = self.get_group_by_id(group_id = group_id).json()
try:
id = group['id']
except KeyError:
raise KeyError('Group with id {} was not found'.format(str(group_id)))
# Transform members into flat list as API returns dicts for host groups
if group['type'] == 'host':
members = set()
for member in group['members']:
members.add(member['id'])
group['members'] = list(members)
for k, v in kwargs.items():
if k in valid_keys and v is not None:
if k in ['members', 'rules'] and not isinstance(v, list):
raise TypeError('{} must be of type: list'.format(k))
if append:
group[k] += v
else:
group[k] = v
else:
raise KeyError('Key {} is not valid'.format(k))
group['members'] = list(set(group['members']))
return requests.patch('{url}/groups/{id}'.format(url=self.url, id=id), headers=self.headers, json=group,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_group(self, group_id=None):
"""
Delete group
:param group_id:
detections
"""
return requests.delete('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_all_users(self, **kwargs):
"""
Generator to query all users
:param username: filter by username
:param role: filter by role
:param account_type: filter by account type (local, ldap, radius or tacacs)
:param authentication_profile: filter by authentication profile
:param last_login_gte: filter for users that have logged in since the given timestamp
"""
resp = requests.get('{url}/users'.format(url=self.url), headers=self.headers,
params=self._generate_user_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_user_by_id(self, user_id=None):
"""
Get users by id
:param user: id of user to retrieve
"""
if not user_id:
raise ValueError('User id required')
return requests.get('{url}/users/{id}'.format(url=self.url, id=user_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_user(self, user_id=None, account_type=None, authentication_profile=None):
"""
Update the authentication type for a user
:param user_id: user ID
:param account_type: new user account type (local, ldap, radius, tacacs)
:param authentication_profile: authentication profile name
"""
if not user_id:
raise ValueError('User id required')
if not account_type in ['local', 'ldap', 'radius', 'tacacs']:
raise ValueError('Invalid account_type provided')
if not authentication_profile:
raise ValueError('Authentication profile required')
payload = {
'account_type': account_type,
'authentication_profile': authentication_profile
}
return requests.patch('{url}/users/{id}'.format(url=self.url, id=user_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxies(self, proxy_id=None):
"""
Get all defined proxies
"""
if proxy_id:
deprecation('The "proxy_id" argument will be removed from this function, please use the get_proxy_by_id() function')
return self.get_proxy_by_id(proxy_id=proxy_id)
else:
return requests.get('{url}/proxies'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxy_by_id(self, proxy_id=None):
"""
Get proxy by id
:param proxy_id: id of proxy to retrieve - caution those are UUIDs not int
"""
if not proxy_id:
raise ValueError('Proxy id required')
return requests.get('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def add_proxy(self, address=None, enable=True):
"""
Add a proxy to the proxy list
:param address: IP address of the proxy to add
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
"""
payload = {
"proxy": {
"address": address,
"considerProxy": enable
}
}
return requests.post('{url}/proxies'.format(url=self.url), json=payload, headers=self.headers, verify=self.verify)
# TODO PATCH request modifies the proxy ID and 404 is actually a 500 - APP-10753
@validate_api_v2
@request_error_handler
def update_proxy(self, proxy_id=None, address=None, enable=True):
"""
Update an existing proxy in the system
:param proxy_id: ID of the proxy to update
:param address: IP address to set for this proxy
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
CAUTION: the proxy ID (ressource identifier) gets modified by the PATCH request at the moment
CAUTION: PATCHing an invalid ID returns a HTTP 500 instead of 404 at the moment
"""
if not proxy_id:
raise ValueError('Proxy id required')
payload = {"proxy": {}}
if address is not None:
payload["proxy"]["address"] = address
if enable is not None:
payload["proxy"]["considerProxy"] = enable
return requests.patch('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_proxy(self,proxy_id=None):
"""
Delete a proxy from the proxy list
:param proxy_id: ID of the proxy to delete
"""
return requests.delete('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def create_feed(self, name=None, category=None, certainty=None, itype=None, duration=None):
"""
Creates new threat feed
***Values for category, type, and certainty are case sensitive***
:param name: name of threat feed
:param category: category that detection will register. supported values are lateral, exfil, and cnc
:param certainty: certainty applied to detection. Supported values are Low, Medium, High
:param itype: indicator type - supported values are Anonymize, Exfiltration, Malware Artifacts, and Watchlist
:param duration: days that the threat feed will be applied
:returns: request object
"""
if not category in ['lateral', 'exfil', 'cnc']:
raise ValueError('Invalid category provided: {}'.format(category))
if not certainty in ['Low', 'Medium', 'High']:
raise ValueError('Invalid certainty provided: {}'.format(str(certainty)))
if not itype in ['Anonymize', 'Exfiltration', 'Malware Artifacts', 'Watchlist']:
raise ValueError('Invalid itype provided: {}'.format(str(itype)))
payload = {
"threatFeed": {
"name": name,
"defaults": {
"category": category,
"certainty": certainty,
"indicatorType": itype,
"duration": duration
}
}
}
return requests.post('{url}/threatFeeds'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_feed(self, feed_id=None):
"""
Deletes threat feed from Vectra
:param feed_id: id of threat feed (returned by get_feed_by_name())
"""
return requests.delete('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_feeds(self):
"""
Gets list of currently configured threat feeds
"""
return requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_feed_by_name(self, name=None):
"""
Gets configured threat feed by name and returns id (used in conjunction with updating and deleting feeds)
:param name: name of threat feed
"""
try:
response = requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
except requests.ConnectionError:
raise Exception('Unable to connect to remote host')
if response.status_code == 200:
for feed in response.json()['threatFeeds']:
if feed['name'].lower() == name.lower():
return feed['id']
else:
raise HTTPException(response)
@validate_api_v2
@request_error_handler
def post_stix_file(self, feed_id=None, stix_file=None):
"""
Uploads STIX file to new threat feed or overwrites STIX file in existing threat feed
:param feed_id: id of threat feed (returned by get_feed_by_name)
:param stix_file: stix filename
"""
return requests.post('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id), headers=self.headers,
files={'file': open(stix_file)}, verify=self.verify)
@validate_api_v2
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ["hosts", "detections"]:
raise ValueError("Supported values for stype are hosts or detections")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
def get_all_traffic_stats(self):
"""
Generator to get all traffic stats
"""
resp = requests.get('{url}/traffic'.format(url=self.url), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_traffic_stats(self, sensor_luid=None):
"""
Generator to get all traffic stats from a sensor
:param sensor_luid: LUID of the sensor for which to get the stats. Can be retrived in the UI under Manage > Sensors
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/traffic/{luid}'.format(url=self.url, luid=sensor_luid), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_subnets(self, **kwargs):
"""
Generator to get all subnets seen by the brain
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
resp = requests.get('{url}/subnets'.format(url=self.url), params=self._generate_subnet_params(kwargs),
headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_subnets(self, sensor_luid=None, **kwargs):
"""
Generator to get all subnets seen by a sensor
:param sensor_luid: LUID of the sensor for which to get the subnets seen - required
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/subnets/{luid}'.format(url=self.url, luid=sensor_luid),
params=self._generate_subnet_params(kwargs), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
# TODO see if the endpoint should become a generator
@validate_api_v2
@request_error_handler
def get_ip_addresses(self, **kwargs):
"""
Get all active IPs seen by the brain
CAUTION: this is not a generator
:param include_ipv4: Include IPv4 addresses - default True
:param include_ipv6: Include IPv6 addresses - default True
"""
return requests.get('{url}/ip_addresses'.format(url=self.url), params=self._generate_ip_address_params(kwargs),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_internal_networks(self):
"""
Get all internal networks configured on the brain
"""
return requests.get('{url}/settings/internal_network'.format(url=self.url),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def set_internal_networks(self, include=[], exclude=[], drop=[], append=True):
"""
Get all internal networks configured on the brain
Set account tags
:param include: list of subnets to add the internal subnets list
:param exclude: list of subnets to exclude from the internal subnets list
:param drop: list of subnets to add to the drop list
:param append: overwrites existing lists if set to False, appends to existing tags if set to True
"""
if append and all(isinstance(i, list) for i in [include, exclude, drop]):
current_list = self.get_internal_networks().json()
# We must make all entries unique
payload = {
'include': list(set(include).union(set(current_list['included_subnets']))),
'exclude': list(set(exclude).union(set(current_list['excluded_subnets']))),
'drop': list(set(drop).union(set(current_list['dropped_subnets'])))
}
elif all(isinstance(i, list) for i in [include, exclude, drop]):
payload = {
'include': include,
'exclude': exclude,
'drop': drop
}
else:
raise TypeError('subnets must be of type list')
return requests.post('{url}/settings/internal_network'.format(url=self.url),
json=payload, headers=self.headers, verify=self.verify)
# TODO see if check parameter has been fixed - APP-10753
@request_error_handler
def get_health_check(self, check=None):
"""
Get health statistics for the appliance
:param check: specific check to run - optional
CAUTION: the check parameter is broken for the time being
"""
if not check:
return requests.get('{url}/health'.format(url=self.url), headers=self.headers, verify=self.verify)
else:
if not isinstance(check, str):
raise ValueError('check need to be a string')
return requests.get('{url}/health/{check}'.format(url=self.url, check=check), headers=self.headers, verify=self.verify)
class VectraClientV2_1(VectraClient):
def __init__(self, url=None, token=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param verify: Verify SSL (default: False) - optional
"""
super().__init__(url=url, token=token, verify=verify)
# Remove potential trailing slash
url = VectraClient._remove_trailing_slashes(url)
# Set endpoint to APIv2.1
self.url = '{url}/api/v2.1'.format(url=url)
@staticmethod
def _generate_account_params(args):
"""
Generate query parameters for accounts based provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte', 'fields', 'first_seen',
'include_detection_summaries', 'last_seen', 'last_source', 'max_id', 'min_id', 'name',
'note_modified_timestamp_gte', 'ordering', 'page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'threat', 'threat_gte', 'uid']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid account query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detect_usage_params(args):
"""
Generate query parameters for detect usage query based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
search = re.compile('[0-9]{4}-[0-9]{2}')
valid_keys = ['start', 'end']
for k, v in args.items():
if k in valid_keys:
if v is not None:
# We validate the parameters here as the error thrown by the endpoint is not very verbose
if search.match(v):
params[k] = v
else:
raise ValueError('{} is not a valid date string for detect usage query'.format(str(v)))
else:
raise ValueError('argument {} is an invalid detect usage query parameter'.format(str(k)))
return params
def get_campaigns(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_campaigns() which supports pagination')
def get_hosts(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_hosts() which supports pagination')
def get_detections(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_detections() which supports pagination')
def get_all_accounts(self, **kwargs):
"""
Generator to retrieve all accounts - all parameters are optional
:param all: does nothing
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
:param first_seen: first seen timestamp of the account (datetime)
:param include_detection_summaries: include detection summary in response (bool)
:param last_seen: last seen timestamp of the account (datetime)
:param last_source: registered ip address of host
:param max_id: maximum ID of account returned
:param min_id: minimum ID of account returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of account (low/medium/high)
:param privilege_level: privilege level of account (0-10)
:param privilege_level_gte: privilege of account level greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/accounts'.format(url=self.url), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_account_by_id(self, account_id=None, **kwargs):
"""
Get account by id
:param account_id: account id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
"""
if not account_id:
raise ValueError('Account id required')
return requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
@request_error_handler
def get_account_tags(self, account_id=None):
"""
Get Account tags
:param account_id: ID of the account for which to retrieve the tags
"""
return requests.get('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=self.headers,
verify=False)
@request_error_handler
def set_account_tags(self, account_id=None, tags=[], append=False):
"""
Set account tags
:param account_id: ID of the account for which to set the tags
:param tags: list of tags to add to account
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_account_tags(account_id=account_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
headers = self.headers.copy()
headers.update({
'Content-Type': "application/json",
'Cache-Control': "no-cache"
})
return requests.patch('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=headers,
json=payload, verify=self.verify)
@request_error_handler
def bulk_set_accounts_tag(self, tag, account_ids):
"""
Set a tag in bulk on multiple accounts. Only one tag can be set at a time
:param account_ids: IDs of the accounts for which to set the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.post('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def bulk_delete_accounts_tag(self, tag, account_ids):
"""
Delete a tag in bulk on multiple accounts. Only one tag can be deleted at a time
:param account_ids: IDs of the accounts on which to delete the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.delete('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def get_account_note(self, account_id=None):
"""
Get account notes
:param account_id: ID of the account for which to retrieve the note
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
account = requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers, verify=self.verify)
if account.status_code == 200:
account_note = account.json()['note']
# API endpoint return HTML escaped characters
account_note = html.unescape(account_note) if account_note else ''
json_dict = {'status': 'success', 'account_id': str(account_id), 'note': account_note}
account._content = json.dumps(json_dict).encode('utf-8')
return account
# TODO check if PATCH endpoint has been implemented on accounts
def set_account_note(self, account_id=None, note='', append=False):
raise NotImplementedError('The PATCH endpoint is not yet implemented on /accounts')
@request_error_handler
def get_locked_accounts(self):
"""
Get list of account locked by Account Lockdown
"""
return requests.get('{url}/lockdown/account'.format(url=self.url), headers=self.headers, verify=self.verify)
def get_rules(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules() which supports pagination')
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections, accounts)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ['hosts', 'detections', 'accounts']:
raise ValueError("Supported values for stype are hosts, detections or accounts")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
"""
if not rule_id:
raise ValueError('Rule id required')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
def get_rules_by_name(self, triage_category=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules with the "contains" query parameter')
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains: search for rules containing this string (substring matching)
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been created succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None,
source_conditions={'OR':[]}, additional_conditions={'OR':[]}, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage
possible values are: botnet activity, command & control, reconnaissance,
lateral movement, exfiltration
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise ValueError('Missing required parameter')
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist,
'source_conditions': source_conditions,
'additional_conditions': additional_conditions
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been updated succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
# CAUTION2: API will error out if original rule has empty source or additional_conditions and\
# payload has non-empty conditions - APP-11016
@request_error_handler
def update_rule(self, rule_id=None, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:param enabled: is the rule currently enables (boolean) - optional - Not yet implemented!
:returns request object
"""
if rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule id must be provided")
valid_keys = ['description', 'priority', 'enabled', 'triage_category',
'is_whitelist', 'source_conditions', 'additional_conditions']
for k, v in kwargs.items():
if k in valid_keys:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
def get_groups(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups() which supports pagination')
def get_groups_by_name(self, name=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups with the "description" query parameter')
def get_detect_usage(self, **kwargs):
"""
Get average montly IP count for Detect
:param start: starting month for the usage statistics - format YYYY-mm
:param end: end month for the usage statistics - format YYYY-mm
Default is statistics from last month
"""
return requests.get('{url}/usage/detect'.format(url=self.url), params=self._generate_detect_usage_params(kwargs),
headers=self.headers, verify=self.verify) | modules/vectra.py | import json
import requests
import warnings
import html
import re
warnings.filterwarnings('always', '.*', PendingDeprecationWarning)
class HTTPException(Exception):
def __init__(self, response):
"""
Custom exception class to report possible API errors
The body is contructed by extracting the API error code from the requests.Response object
"""
try:
r = response.json()
if 'detail' in r:
detail = r['detail']
elif 'errors' in r:
detail = r['errors'][0]['title']
elif '_meta' in r:
detail = r['_meta']['message']
else:
detail = response.content
except Exception:
detail = response.content
body = 'Status code: {code} - {detail}'.format(code=str(response.status_code), detail=detail)
super().__init__(body)
def request_error_handler(func):
def request_handler(self, *args, **kwargs):
response = func(self, *args, **kwargs)
if response.status_code in [200, 201, 204]:
return response
else:
raise HTTPException(response)
return request_handler
def validate_api_v2(func):
def api_validator(self, *args, **kwargs):
if self.version == 2:
return func(self, *args, **kwargs)
else:
raise NotImplementedError('Method only accessible via v2 of API')
return api_validator
def deprecation(message):
warnings.warn(message, PendingDeprecationWarning)
def param_deprecation(key):
message = '{0} will be deprecated with Vectra API v1 which will be annouced in an upcoming release'.format(key)
warnings.warn(message, PendingDeprecationWarning)
class VectraClient(object):
def __init__(self, url=None, token=None, user=None, password=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param user: Username to authenticate to Vectra brain when using API v1*
:param password: Password when using username to authenticate using API v1*
:param verify: Verify SSL (default: False) - optional
*Either token or user are required
"""
self.url = url
self.version = 2 if token else 1
self.verify = verify
url = VectraClient._remove_trailing_slashes(url)
if token:
self.url = '{url}/api/v2'.format(url=url)
self.headers = {
'Authorization': "Token " + token.strip(),
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
elif user and password:
self.url = '{url}/api'.format(url=url)
self.auth = (user, password)
deprecation('Deprecation of the Vectra API v1 will be announced in an upcoming release. Migrate to API v2'
' when possible')
else:
raise RuntimeError("At least one form of authentication is required. Please provide a token or username"
" and password")
@staticmethod
def _remove_trailing_slashes(url):
url = url[:-1] if url.endswith('/') else url
return url
@staticmethod
def _generate_campaign_params(args):
"""
Generate query parameters for campaigns based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'dst_ip', 'target_domain', 'state', 'name', 'last_updated_gte',
'note_modified_timestamp_gte','page', 'page_size']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid campaign query parameter'.format(str(k)))
return params
@staticmethod
def _generate_host_params(args):
"""
Generate query parameters for hosts based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['active_traffic', 'all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte',
'fields', 'has_active_traffic', 'include_detection_summaries', 'is_key_asset', 'is_targeting_key_asset',
'key_asset', 'last_detection_timestamp', 'last_source', 'mac_address', 'max_id', 'min_id',
'name', 'note_modified_timestamp_gte', 'ordering','page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'targets_key_asset', 'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'key_asset', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_host_by_id_params(args):
"""
Generate query parameters for host based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'include_external', 'include_ldap']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detection_params(args):
"""
Generate query parameters for detections based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['c_score', 'c_score_gte', 'category', 'certainty', 'certainty_gte', 'description',
'detection', 'detection_category', 'detection_type', 'fields', 'host_id', 'is_targeting_key_asset',
'is_triaged', 'last_timestamp', 'max_id', 'min_id', 'note_modified_timestamp_gte', 'ordering',
'page', 'page_size', 'src_ip', 'state', 't_score', 't_score_gte', 'tags', 'targets_key_asset',
'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'category', 'detection', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid detection query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_group_params(args):
"""
Generate query parameters for groups based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['description', 'domains', 'host_ids', 'host_names', 'last_modified_by',
'last_modified_timestamp', 'name', 'page', 'page_size', 'type']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid group query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_params(args):
"""
Generate query parameters for rules based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['contains', 'fields', 'include_templates', 'page', 'page_size', 'ordering']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_by_id_params(args):
"""
Generate query parameters for rule based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_user_params(args):
"""
Generate query parameters for users based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['username', 'role', 'account_type', 'authentication_profile', 'last_login_gte']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid user query parameter'.format(str(k)))
return params
@staticmethod
def _generate_ip_address_params(args):
"""
Generate query parameters for ip address queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid ip address query parameter'.format(str(k)))
return params
@staticmethod
def _generate_subnet_params(args):
"""
Generate query parameters for subnet queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['ordering', 'search']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid subnet query parameter'.format(str(k)))
return params
@staticmethod
def _generate_internal_network_params(args):
"""
Generate query parameters for internal network queries based on provided argsbased on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid internal network query parameter'.format(str(k)))
return params
@validate_api_v2
@request_error_handler
def _get_request(self, url, **kwargs):
"""
Do a get request on the provided URL
This is used by paginated endpoints
:rtype: requests.Response
"""
params = {}
for k, v in kwargs.items():
params[k] = v
if self.version == 2:
return requests.get(url, headers=self.headers, params=params, verify=self.verify)
else:
return requests.get(url, auth=self.auth, params=params, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_campaigns(self, **kwargs):
"""
Query all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
return requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
def get_all_campaigns(self, **kwargs):
"""
Generator to retrieve all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_campaign_by_id(self, campaign_id=None, **kwargs):
"""
Get campaign by id
"""
if not campaign_id:
raise ValueError('Campaign id required')
return requests.get('{url}/campaigns/{id}'.format(url=self.url, id=campaign_id),
headers=self.headers, verify=self.verify)
@request_error_handler
def get_hosts(self, **kwargs):
"""
Query all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
if self.version == 2:
return requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts'.format(url=self.url), auth=self.auth,
params=self._generate_host_params(kwargs), verify=self.verify)
def get_all_hosts(self, **kwargs):
"""
Generator to retrieve all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_host_by_id(self, host_id=None, **kwargs):
"""
Get host by id
:param host_id: host id - required
:param include_external: include fields regarding external connectors (e.g. CrowdStrike) - optional
:param include_ldap: include LDAP context pulled over AD connector - optional
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: active_traffic, assigned_date, assigned_to, c_score, campaign_summaries,
carbon_black, certainty, crowdstrike, detection_profile, detection_set, detection_summaries,
groups, has_active_traffic, has_custom_model, has_shell_knocker_learnings, host_artifact_set,
host_luid, host_session_luid, host_url, id, ip, is_key_asset, is_targeting_key_asset, key_asset,
last_detection_timestamp, last_modified, last_seen, last_source, ldap, name, note, note_modified_by,
note_modified_timestamp, previous_ips, privilege_category, privilege_level, probable_owner, sensor,
sensor_name, severity, shell_knocker, state, suspicious_admin_learnings, t_score, tags, targets_key_asset,
threat, url, vcenter
"""
if not host_id:
raise ValueError('Host id required')
if self.version == 2:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), auth=self.auth,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def set_key_asset(self, host_id=None, set=True):
"""
(Un)set host as key asset
:param host_id: id of host needing to be set - required
:param set: set flag to true if setting host as key asset
"""
if not host_id:
raise ValueError('Host id required')
if set:
payload = {'key_asset':'true'}
else:
payload = {'key_asset':'false'}
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_host_tags(self, host_id=None):
"""
Get host tags
:param host_id: ID of the host for which to retrieve the tags
"""
if not host_id:
raise ValueError('Host id required')
return requests.get('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_host_tags(self, host_id=None, tags=[], append=False):
"""
Set host tags
:param host_id:
:param tags: list of tags to add to host
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if not host_id:
raise ValueError('Host id required')
if append and type(tags) == list:
current_list = self.get_host_tags(host_id=host_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_hosts_tag(self, tag, host_ids):
"""
Set a tag in bulk on multiple hosts. Only one tag can be set at a time
:param host_ids: IDs of the hosts for which to set the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.post('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_hosts_tag(self, tag, host_ids):
"""
Delete a tag in bulk on multiple hosts. Only one tag can be deleted at a time
:param host_ids: IDs of the hosts on which to delete the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.delete('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_host_note(self, host_id=None):
"""
Get host notes
:param host_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
if not host_id:
raise ValueError('Host id required')
host = requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, verify=self.verify)
if host.status_code == 200:
host_note = host.json()['note']
# API endpoint return HTML escaped characters
host_note = html.unescape(host_note) if host_note else ''
json_dict = {'status': 'success', 'host_id': str(host_id), 'note': host_note}
host._content = json.dumps(json_dict).encode('utf-8')
return host
@validate_api_v2
@request_error_handler
def set_host_note(self, host_id=None, note='', append=False):
"""
Set host note
:param host_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear host note
"""
if not host_id:
raise ValueError('Host id required')
if append and isinstance(note, str):
current_note = self.get_host_note(host_id=host_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, data=json.dumps(payload),
verify=self.verify)
@request_error_handler
def get_detections(self, **kwargs):
"""
Query all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
if self.version == 2:
return requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections'.format(url=self.url), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
def get_all_detections(self, **kwargs):
"""
Generator to retrieve all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
resp = requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@request_error_handler
def get_detection_by_id(self, detection_id=None, **kwargs):
"""
Get detection by id
:param detection_id: detection id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
"""
if not detection_id:
raise ValueError('Detection id required')
if self.version == 2:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_fixed(self, detection_ids=None):
"""
Mark detections as fixed
:param detection_ids: list of detections to mark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=True)
@validate_api_v2
@request_error_handler
def unmark_detections_fixed(self, detection_ids=None):
"""
Unmark detections as fixed
:param detection_ids: list of detections to unmark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=False)
def _toggle_detections_fixed(self, detection_ids, fixed):
"""
Internal function to mark/unmark detections as fixed
"""
payload = {
'detectionIdList': detection_ids,
'mark_as_fixed': str(fixed)
}
return requests.patch('{url}/detections'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_custom(self, detection_ids=[], triage_category=None):
"""
Mark detections as custom
:param detection_ids: list of detection IDs to mark as custom
:param triage_category: custom name to give detection
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as custom')
payload = {
"triage_category": triage_category,
"detectionIdList": detection_ids
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def unmark_detections_custom(self, detection_ids=[]):
"""
Unmark detection as custom
:param detection_ids: list of detection IDs to unmark as custom
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as custom')
payload = {
"detectionIdList": detection_ids
}
response = requests.delete('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
# DELETE returns an empty response, but we populate the response for consistency with the mark_as_fixed() function
json_dict = {'_meta': {'message': 'Successfully unmarked detections', 'level': 'Success'}}
response._content = json.dumps(json_dict).encode('utf-8')
return response
@validate_api_v2
@request_error_handler
def get_detection_tags(self, detection_id=None):
"""
Get detection tags
:param detection_id:
"""
return requests.get('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_detection_tags(self, detection_id=None, tags=[], append=False):
"""
Set detection tags
:param detection_id:
:param tags: list of tags to add to detection
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear all tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_detection_tags(detection_id=detection_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_detections_tag(self, tag, detection_ids):
"""
Set a tag in bulk on multiple detections. Only one tag can be set at a time
:param detection_ids: IDs of the detections for which to set the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.post('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_detections_tag(self, tag, detection_ids):
"""
Delete a tag in bulk on multiple detections. Only one tag can be deleted at a time
:param detection_ids: IDs of the detections for which to delete the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.delete('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_detection_note(self, detection_id=None):
"""
Get detection notes
:param detection_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete detection body, we alter the response content
"""
detection = requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, verify=self.verify)
if detection.status_code == 200:
detection_note = detection.json()['note']
# API endpoint return HTML escaped characters
detection_note = html.unescape(detection_note) if detection_note else ''
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'note': detection_note}
detection._content = json.dumps(json_dict).encode('utf-8')
return detection
@validate_api_v2
@request_error_handler
def set_detection_note(self, detection_id=None, note='', append=False):
"""
Set detection note
:param detection_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear detection note
"""
if append and isinstance(note, str):
current_note = self.get_detection_note(detection_id=detection_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
def get_detection_pcap(self, detection_id=None, filename=None):
"""
Get detection pcap
:param detection_id: ID of the detection for which to get a pcap
:param filename: filename to write the pcap to. Will be overwriten if already exists.
"""
response = requests.get('{url}/detections/{id}/pcap'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
if response.status_code not in [200, 201, 204]:
raise HTTPException(response)
with open(filename, 'wb') as f:
f.write(response.content)
# Return a <Response> object for consistency
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'file_created': filename}
response._content = json.dumps(json_dict).encode('utf-8')
return response
# TODO add request_error_handler decorator as soon as get_rules_by_name() returns requests.Response object
@validate_api_v2
def get_rules(self, name=None, rule_id=None, **kwargs):
"""
Query all rules
:param name: name of rule to search (substring matching)
:param rule_id: ID of rule to return
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
return self.get_rules_by_name(triage_category=name)
elif rule_id:
deprecation('The "rule_id" argument will be removed from this function, please use the corresponding get_rule_by_id function')
return self.get_rule_by_id(rule_id)
else:
return requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
"""
if not rule_id:
raise ValueError('Rule id required')
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
# TODO make return type requests.Reponse
@validate_api_v2
def get_rules_by_name(self, triage_category=None, description=None):
"""
Get triage rules by name or description
Condition are to be read as OR
:param triage_category: 'Triage as' field of filter
:param description: Description of the triage filter
:rtype list: to be backwards compatible
"""
search_query = triage_category if triage_category else description
response = self.get_rules(contains=search_query)
return response.json()['results']
@validate_api_v2
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage [botnet activity, command & control, reconnaissance,
lateral movement, exfiltration]
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise KeyError("missing required parameter: "
"detection_category, detection_type, triage_category")
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist
}
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
payload[k] = v
else:
raise ValueError('argument {} is an invalid field for rule creation'.format(str(k)))
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_rule(self, rule_id=None, name=None, append=False, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param name: name of rule to update
:param append: set to True if appending to existing list (boolean)
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
matching_rules = self.get_rules_by_name(triage_category=name)
if len(matching_rules) > 1:
raise Exception('More than one rule matching the name')
elif len(matching_rules) < 1:
raise Exception('No rule matching the search')
else:
rule = matching_rules[0]
elif rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule name or id must be provided")
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
if append:
if isinstance(rule[k], list):
rule[k] += v
else:
rule[k] = v
else:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_rule(self, rule_id=None, restore_detections=True):
"""
Delete triage rule
:param rule_id:
:param restore_detections: restore previously triaged detections (bool) default behavior is to restore
detections
"""
if not rule_id:
raise ValueError('Rule id required')
params = {
'restore_detections': restore_detections
}
return requests.delete('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers, params=params,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_groups(self, **kwargs):
"""
Query all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
return requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
@validate_api_v2
def get_all_groups(self, **kwargs):
"""
Generator to retrieve all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
resp = requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_group_by_id(self, group_id):
"""
Get groups by id
:param rule_id: id of group to retrieve
"""
return requests.get('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=False)
@validate_api_v2
def get_groups_by_name(self, name=None, description=None):
"""
Get groups by name or description
:param name: Name of group*
:param description: Description of the group*
*params are to be read as OR
"""
if name and description:
raise Exception('Can only provide a name OR a description')
if name:
response = self.get_groups(name=name)
return response.json()['results']
elif description:
response = self.get_groups(description=description)
return response.json()['results']
@validate_api_v2
@request_error_handler
def create_group(self, name=None, description='', type=None, members=[], rules=[], **kwargs):
"""
Create group
:param name: name of the group to create
:param description: description of the group
:param type: type of the group to create (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of triage rule ids to add to group
:rtype requests.Response:
"""
if not name:
raise ValueError("missing required parameter: name")
if not type:
raise ValueError("missing required parameter: type")
if type not in ['host', 'domain', 'ip']:
raise ValueError('parameter type must have value "domain", "ip" or "host"')
if not isinstance(members, list):
raise TypeError("members must be type: list")
if not isinstance(rules, list):
raise TypeError("rules must be type: list")
payload = {
"name": name,
"description": description,
"type": type,
"members": members,
"rules": rules,
}
for k, v in kwargs.items():
if not type(v) == list:
raise TypeError("{} must be of type: list".format(k))
payload[k] = v
return requests.post('{url}/groups'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_group(self, group_id, append=False, **kwargs):
"""
Update group
:param group_id: id of group to update
:param name: name of group
:param description: description of the group
:param type: type of the group (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of rule ids to add to group
:param append: set to True if appending to existing list (boolean)
"""
valid_keys = ['name', 'description', 'type', 'members', 'rules']
group = self.get_group_by_id(group_id = group_id).json()
try:
id = group['id']
except KeyError:
raise KeyError('Group with id {} was not found'.format(str(group_id)))
# Transform members into flat list as API returns dicts for host groups
if group['type'] == 'host':
members = set()
for member in group['members']:
members.add(member['id'])
group['members'] = list(members)
for k, v in kwargs.items():
if k in valid_keys and v is not None:
if k in ['members', 'rules'] and not isinstance(v, list):
raise TypeError('{} must be of type: list'.format(k))
if append:
group[k] += v
else:
group[k] = v
else:
raise KeyError('Key {} is not valid'.format(k))
group['members'] = list(set(group['members']))
return requests.patch('{url}/groups/{id}'.format(url=self.url, id=id), headers=self.headers, json=group,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_group(self, group_id=None):
"""
Delete group
:param group_id:
detections
"""
return requests.delete('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_all_users(self, **kwargs):
"""
Generator to query all users
:param username: filter by username
:param role: filter by role
:param account_type: filter by account type (local, ldap, radius or tacacs)
:param authentication_profile: filter by authentication profile
:param last_login_gte: filter for users that have logged in since the given timestamp
"""
resp = requests.get('{url}/users'.format(url=self.url), headers=self.headers,
params=self._generate_user_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_user_by_id(self, user_id=None):
"""
Get users by id
:param user: id of user to retrieve
"""
if not user_id:
raise ValueError('User id required')
return requests.get('{url}/users/{id}'.format(url=self.url, id=user_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_user(self, user_id=None, account_type=None, authentication_profile=None):
"""
Update the authentication type for a user
:param user_id: user ID
:param account_type: new user account type (local, ldap, radius, tacacs)
:param authentication_profile: authentication profile name
"""
if not user_id:
raise ValueError('User id required')
if not account_type in ['local', 'ldap', 'radius', 'tacacs']:
raise ValueError('Invalid account_type provided')
if not authentication_profile:
raise ValueError('Authentication profile required')
payload = {
'account_type': account_type,
'authentication_profile': authentication_profile
}
return requests.patch('{url}/users/{id}'.format(url=self.url, id=user_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxies(self, proxy_id=None):
"""
Get all defined proxies
"""
if proxy_id:
deprecation('The "proxy_id" argument will be removed from this function, please use the get_proxy_by_id() function')
return self.get_proxy_by_id(proxy_id=proxy_id)
else:
return requests.get('{url}/proxies'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxy_by_id(self, proxy_id=None):
"""
Get proxy by id
:param proxy_id: id of proxy to retrieve - caution those are UUIDs not int
"""
if not proxy_id:
raise ValueError('Proxy id required')
return requests.get('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def add_proxy(self, address=None, enable=True):
"""
Add a proxy to the proxy list
:param address: IP address of the proxy to add
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
"""
payload = {
"proxy": {
"address": address,
"considerProxy": enable
}
}
return requests.post('{url}/proxies'.format(url=self.url), json=payload, headers=self.headers, verify=self.verify)
# TODO PATCH request modifies the proxy ID and 404 is actually a 500 - APP-10753
@validate_api_v2
@request_error_handler
def update_proxy(self, proxy_id=None, address=None, enable=True):
"""
Update an existing proxy in the system
:param proxy_id: ID of the proxy to update
:param address: IP address to set for this proxy
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
CAUTION: the proxy ID (ressource identifier) gets modified by the PATCH request at the moment
CAUTION: PATCHing an invalid ID returns a HTTP 500 instead of 404 at the moment
"""
if not proxy_id:
raise ValueError('Proxy id required')
payload = {"proxy": {}}
if address is not None:
payload["proxy"]["address"] = address
if enable is not None:
payload["proxy"]["considerProxy"] = enable
return requests.patch('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_proxy(self,proxy_id=None):
"""
Delete a proxy from the proxy list
:param proxy_id: ID of the proxy to delete
"""
return requests.delete('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def create_feed(self, name=None, category=None, certainty=None, itype=None, duration=None):
"""
Creates new threat feed
***Values for category, type, and certainty are case sensitive***
:param name: name of threat feed
:param category: category that detection will register. supported values are lateral, exfil, and cnc
:param certainty: certainty applied to detection. Supported values are Low, Medium, High
:param itype: indicator type - supported values are Anonymize, Exfiltration, Malware Artifacts, and Watchlist
:param duration: days that the threat feed will be applied
:returns: request object
"""
if not category in ['lateral', 'exfil', 'cnc']:
raise ValueError('Invalid category provided: {}'.format(category))
if not certainty in ['Low', 'Medium', 'High']:
raise ValueError('Invalid certainty provided: {}'.format(str(certainty)))
if not itype in ['Anonymize', 'Exfiltration', 'Malware Artifacts', 'Watchlist']:
raise ValueError('Invalid itype provided: {}'.format(str(itype)))
payload = {
"threatFeed": {
"name": name,
"defaults": {
"category": category,
"certainty": certainty,
"indicatorType": itype,
"duration": duration
}
}
}
return requests.post('{url}/threatFeeds'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_feed(self, feed_id=None):
"""
Deletes threat feed from Vectra
:param feed_id: id of threat feed (returned by get_feed_by_name())
"""
return requests.delete('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_feeds(self):
"""
Gets list of currently configured threat feeds
"""
return requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_feed_by_name(self, name=None):
"""
Gets configured threat feed by name and returns id (used in conjunction with updating and deleting feeds)
:param name: name of threat feed
"""
try:
response = requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
except requests.ConnectionError:
raise Exception('Unable to connect to remote host')
if response.status_code == 200:
for feed in response.json()['threatFeeds']:
if feed['name'].lower() == name.lower():
return feed['id']
else:
raise HTTPException(response)
@validate_api_v2
@request_error_handler
def post_stix_file(self, feed_id=None, stix_file=None):
"""
Uploads STIX file to new threat feed or overwrites STIX file in existing threat feed
:param feed_id: id of threat feed (returned by get_feed_by_name)
:param stix_file: stix filename
"""
return requests.post('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id), headers=self.headers,
files={'file': open(stix_file)}, verify=self.verify)
@validate_api_v2
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ["hosts", "detections"]:
raise ValueError("Supported values for stype are hosts or detections")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
def get_all_traffic_stats(self):
"""
Generator to get all traffic stats
"""
resp = requests.get('{url}/traffic'.format(url=self.url), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_traffic_stats(self, sensor_luid=None):
"""
Generator to get all traffic stats from a sensor
:param sensor_luid: LUID of the sensor for which to get the stats. Can be retrived in the UI under Manage > Sensors
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/traffic/{luid}'.format(url=self.url, luid=sensor_luid), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_subnets(self, **kwargs):
"""
Generator to get all subnets seen by the brain
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
resp = requests.get('{url}/subnets'.format(url=self.url), params=self._generate_subnet_params(kwargs),
headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_subnets(self, sensor_luid=None, **kwargs):
"""
Generator to get all subnets seen by a sensor
:param sensor_luid: LUID of the sensor for which to get the subnets seen - required
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/subnets/{luid}'.format(url=self.url, luid=sensor_luid),
params=self._generate_subnet_params(kwargs), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
# TODO see if the endpoint should become a generator
@validate_api_v2
@request_error_handler
def get_ip_addresses(self, **kwargs):
"""
Get all active IPs seen by the brain
CAUTION: this is not a generator
:param include_ipv4: Include IPv4 addresses - default True
:param include_ipv6: Include IPv6 addresses - default True
"""
return requests.get('{url}/ip_addresses'.format(url=self.url), params=self._generate_ip_address_params(kwargs),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_internal_networks(self):
"""
Get all internal networks configured on the brain
"""
return requests.get('{url}/settings/internal_network'.format(url=self.url),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def set_internal_networks(self, include=[], exclude=[], drop=[], append=True):
"""
Get all internal networks configured on the brain
Set account tags
:param include: list of subnets to add the internal subnets list
:param exclude: list of subnets to exclude from the internal subnets list
:param drop: list of subnets to add to the drop list
:param append: overwrites existing lists if set to False, appends to existing tags if set to True
"""
if append and all(isinstance(i, list) for i in [include, exclude, drop]):
current_list = self.get_internal_networks().json()
# We must make all entries unique
payload = {
'include': list(set(include).union(set(current_list['included_subnets']))),
'exclude': list(set(exclude).union(set(current_list['excluded_subnets']))),
'drop': list(set(drop).union(set(current_list['dropped_subnets'])))
}
elif all(isinstance(i, list) for i in [include, exclude, drop]):
payload = {
'include': include,
'exclude': exclude,
'drop': drop
}
else:
raise TypeError('subnets must be of type list')
return requests.post('{url}/settings/internal_network'.format(url=self.url),
json=payload, headers=self.headers, verify=self.verify)
# TODO see if check parameter has been fixed - APP-10753
@request_error_handler
def get_health_check(self, check=None):
"""
Get health statistics for the appliance
:param check: specific check to run - optional
CAUTION: the check parameter is broken for the time being
"""
if not check:
return requests.get('{url}/health'.format(url=self.url), headers=self.headers, verify=self.verify)
else:
if not isinstance(check, str):
raise ValueError('check need to be a string')
return requests.get('{url}/health/{check}'.format(url=self.url, check=check), headers=self.headers, verify=self.verify)
class VectraClientV2_1(VectraClient):
def __init__(self, url=None, token=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param verify: Verify SSL (default: False) - optional
"""
super().__init__(url=url, token=token, verify=verify)
# Remove potential trailing slash
url = VectraClient._remove_trailing_slashes(url)
# Set endpoint to APIv2.1
self.url = '{url}/api/v2.1'.format(url=url)
@staticmethod
def _generate_account_params(args):
"""
Generate query parameters for accounts based provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte', 'fields', 'first_seen',
'include_detection_summaries', 'last_seen', 'last_source', 'max_id', 'min_id', 'name',
'note_modified_timestamp_gte', 'ordering', 'page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'threat', 'threat_gte', 'uid']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid account query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detect_usage_params(args):
"""
Generate query parameters for detect usage query based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
search = re.compile('[0-9]{4}-[0-9]{2}')
valid_keys = ['start', 'end']
for k, v in args.items():
if k in valid_keys:
if v is not None:
# We validate the parameters here as the error thrown by the endpoint is not very verbose
if search.match(v):
params[k] = v
else:
raise ValueError('{} is not a valid date string for detect usage query'.format(str(v)))
else:
raise ValueError('argument {} is an invalid detect usage query parameter'.format(str(k)))
return params
def get_campaigns(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_campaigns() which supports pagination')
def get_hosts(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_hosts() which supports pagination')
def get_detections(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_detections() which supports pagination')
def get_all_accounts(self, **kwargs):
"""
Generator to retrieve all accounts - all parameters are optional
:param all: does nothing
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
:param first_seen: first seen timestamp of the account (datetime)
:param include_detection_summaries: include detection summary in response (bool)
:param last_seen: last seen timestamp of the account (datetime)
:param last_source: registered ip address of host
:param max_id: maximum ID of account returned
:param min_id: minimum ID of account returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of account (low/medium/high)
:param privilege_level: privilege level of account (0-10)
:param privilege_level_gte: privilege of account level greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/accounts'.format(url=self.url), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_account_by_id(self, account_id=None, **kwargs):
"""
Get account by id
:param account_id: account id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
"""
if not account_id:
raise ValueError('Account id required')
return requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
@request_error_handler
def get_account_tags(self, account_id=None):
"""
Get Account tags
:param account_id: ID of the account for which to retrieve the tags
"""
return requests.get('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=self.headers,
verify=False)
@request_error_handler
def set_account_tags(self, account_id=None, tags=[], append=False):
"""
Set account tags
:param account_id: ID of the account for which to set the tags
:param tags: list of tags to add to account
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_account_tags(account_id=account_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
headers = self.headers.copy()
headers.update({
'Content-Type': "application/json",
'Cache-Control': "no-cache"
})
return requests.patch('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=headers,
json=payload, verify=self.verify)
@request_error_handler
def bulk_set_accounts_tag(self, tag, account_ids):
"""
Set a tag in bulk on multiple accounts. Only one tag can be set at a time
:param account_ids: IDs of the accounts for which to set the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.post('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def bulk_delete_accounts_tag(self, tag, account_ids):
"""
Delete a tag in bulk on multiple accounts. Only one tag can be deleted at a time
:param account_ids: IDs of the accounts on which to delete the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.delete('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def get_account_note(self, account_id=None):
"""
Get account notes
:param account_id: ID of the account for which to retrieve the note
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
account = requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers, verify=self.verify)
if account.status_code == 200:
account_note = account.json()['note']
# API endpoint return HTML escaped characters
account_note = html.unescape(account_note) if account_note else ''
json_dict = {'status': 'success', 'account_id': str(account_id), 'note': account_note}
account._content = json.dumps(json_dict).encode('utf-8')
return account
# TODO check if PATCH endpoint has been implemented on accounts
def set_account_note(self, account_id=None, note='', append=False):
raise NotImplementedError('The PATCH endpoint is not yet implemented on /accounts')
@request_error_handler
def get_locked_accounts(self):
"""
Get list of account locked by Account Lockdown
"""
return requests.get('{url}/lockdown/account'.format(url=self.url), headers=self.headers, verify=self.verify)
def get_rules(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules() which supports pagination')
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections, accounts)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ['hosts', 'detections', 'accounts']:
raise ValueError("Supported values for stype are hosts, detections or accounts")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
"""
if not rule_id:
raise ValueError('Rule id required')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
def get_rules_by_name(self, triage_category=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules with the "contains" query parameter')
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains: search for rules containing this string (substring matching)
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been created succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None,
source_conditions={'OR':[]}, additional_conditions={'OR':[]}, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage
possible values are: botnet activity, command & control, reconnaissance,
lateral movement, exfiltration
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise ValueError('Missing required parameter')
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist,
'source_conditions': source_conditions,
'additional_conditions': additional_conditions
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been updated succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
# CAUTION2: API will error out if original rule has empty source or additional_conditions and\
# payload has non-empty conditions - APP-11016
@request_error_handler
def update_rule(self, rule_id=None, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:param enabled: is the rule currently enables (boolean) - optional - Not yet implemented!
:returns request object
"""
if rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule id must be provided")
valid_keys = ['description', 'priority', 'enabled', 'triage_category',
'is_whitelist', 'source_conditions', 'additional_conditions']
for k, v in kwargs.items():
if k in valid_keys:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
def get_groups(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups() which supports pagination')
def get_groups_by_name(self, name=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups with the "description" query parameter')
def get_detect_usage(self, **kwargs):
"""
Get average montly IP count for Detect
:param start: starting month for the usage statistics - format YYYY-mm
:param end: end month for the usage statistics - format YYYY-mm
Default is statistics from last month
"""
return requests.get('{url}/usage/detect'.format(url=self.url), params=self._generate_detect_usage_params(kwargs),
headers=self.headers, verify=self.verify) | 0.594551 | 0.14627 |
"""API module to serve cluster host service calls."""
import datetime
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from tradefed_cluster.util import ndb_shim as ndb
from tradefed_cluster import api_common
from tradefed_cluster import api_messages
from tradefed_cluster import common
from tradefed_cluster import datastore_entities
from tradefed_cluster import datastore_util
from tradefed_cluster import device_manager
from tradefed_cluster import harness_image_metadata_syncer
from tradefed_cluster import host_event
from tradefed_cluster import note_manager
_DEFAULT_LIST_NOTES_COUNT = 10
_DEFAULT_LIST_HOST_COUNT = 100
_DEFAULT_LIST_HISTORIES_COUNT = 100
_DEFAULT_LIST_CONFIGS_COUNT = 100
_HOST_UPDATE_STATE_CHANGED_EVENT_NAME = "HOST_UPDATE_STATE_CHANGED"
_HOST_UPDATE_STATE_PENDING = "PENDING"
def _CheckTimestamp(t1, operator, t2):
"""Compare 2 timestamps."""
if operator == common.Operator.EQUAL:
return t1 == t2
if operator == common.Operator.LESS_THAN:
return t1 < t2
if operator == common.Operator.LESS_THAN_OR_EQUAL:
return t1 <= t2
if operator == common.Operator.GREATER_THAN:
return t1 > t2
if operator == common.Operator.GREATER_THAN_OR_EQUAL:
return t1 >= t2
raise ValueError('Operator "%s" is not supported.' % operator)
@api_common.tradefed_cluster_api.api_class(resource_name="hosts", path="hosts")
class ClusterHostApi(remote.Service):
"""A class for cluster host API service."""
HOST_LIST_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
lab_name=messages.StringField(1),
include_hidden=messages.BooleanField(2, default=False),
include_devices=messages.BooleanField(3, default=False),
assignee=messages.StringField(4),
is_bad=messages.BooleanField(5),
hostnames=messages.StringField(6, repeated=True),
host_groups=messages.StringField(7, repeated=True),
test_harnesses=messages.StringField(8, repeated=True),
test_harness_versions=messages.StringField(9, repeated=True),
pools=messages.StringField(10, repeated=True),
host_states=messages.EnumField(api_messages.HostState, 11, repeated=True),
flated_extra_info=messages.StringField(12),
cursor=messages.StringField(13),
count=messages.IntegerField(
14, variant=messages.Variant.INT32, default=_DEFAULT_LIST_HOST_COUNT),
timestamp_operator=messages.EnumField(common.Operator, 15),
timestamp=message_types.DateTimeField(16),
recovery_states=messages.StringField(17, repeated=True),
# TODO: Please use test_harnesses, this field is deprecated.
test_harness=messages.StringField(18, repeated=True),
host_update_states=messages.EnumField(
api_messages.HostUpdateState, 19, repeated=True))
@endpoints.method(
HOST_LIST_RESOURCE,
api_messages.HostInfoCollection,
path="/hosts",
http_method="GET",
name="list")
@api_common.with_ndb_context
def ListHosts(self, request):
"""Fetches a list of hosts.
Args:
request: an API request.
Returns:
a HostInfoCollection object.
"""
if ((request.timestamp and not request.timestamp_operator) or
(not request.timestamp and request.timestamp_operator)):
raise endpoints.BadRequestException(
'"timestamp" and "timestamp_operator" must be set at the same time.')
query = datastore_entities.HostInfo.query()
if request.lab_name:
query = query.filter(
datastore_entities.HostInfo.lab_name == request.lab_name)
if request.assignee:
query = query.filter(
datastore_entities.HostInfo.assignee == request.assignee)
if request.is_bad is not None:
query = query.filter(datastore_entities.HostInfo.is_bad == request.is_bad)
if not request.include_hidden:
query = query.filter(datastore_entities.HostInfo.hidden == False)
if request.flated_extra_info:
query = query.filter(datastore_entities.HostInfo.flated_extra_info ==
request.flated_extra_info)
if len(request.host_groups) == 1:
query = query.filter(
datastore_entities.HostInfo.host_group == request.host_groups[0])
if len(request.hostnames) == 1:
query = query.filter(
datastore_entities.HostInfo.hostname == request.hostnames[0])
test_harnesses = request.test_harness + request.test_harnesses
if len(test_harnesses) == 1:
query = query.filter(
datastore_entities.HostInfo.test_harness == test_harnesses[0])
if len(request.test_harness_versions) == 1:
query = query.filter(
datastore_entities.HostInfo.test_harness_version ==
request.test_harness_versions[0])
if len(request.pools) == 1:
query = query.filter(
datastore_entities.HostInfo.pools == request.pools[0])
if len(request.host_states) == 1:
query = query.filter(
datastore_entities.HostInfo.host_state == request.host_states[0])
if len(request.recovery_states) == 1:
query = query.filter(
datastore_entities.HostInfo.recovery_state
== request.recovery_states[0])
hostnames_with_requested_update_states = set()
if request.host_update_states:
update_state_query = datastore_entities.HostUpdateState.query().filter(
datastore_entities.HostUpdateState.state.IN(
request.host_update_states))
hostnames_with_requested_update_states = set(
update_state.hostname for update_state in update_state_query.fetch(
projection=[datastore_entities.HostUpdateState.hostname]))
def _PostFilter(host):
if request.host_groups and host.host_group not in request.host_groups:
return
if request.hostnames and host.hostname not in request.hostnames:
return
if (test_harnesses and
host.test_harness not in test_harnesses):
return
if (request.test_harness_versions and
host.test_harness_version not in request.test_harness_versions):
return
if request.pools and not set(host.pools).intersection(set(request.pools)):
return
if request.host_states and host.host_state not in request.host_states:
return
if (request.recovery_states and
host.recovery_state not in request.recovery_states):
return
if request.timestamp:
if not host.timestamp:
return
return _CheckTimestamp(
host.timestamp, request.timestamp_operator, request.timestamp)
if request.host_update_states:
if host.hostname not in hostnames_with_requested_update_states:
return
return True
if request.timestamp:
query = query.order(
datastore_entities.HostInfo.timestamp,
datastore_entities.HostInfo.key)
else:
query = query.order(datastore_entities.HostInfo.key)
hosts, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, result_filter=_PostFilter)
host_update_state_keys = [
ndb.Key(datastore_entities.HostUpdateState, host.hostname)
for host in hosts]
host_update_states = ndb.get_multi(host_update_state_keys)
host_infos = []
for host, host_update_state in zip(hosts, host_update_states):
devices = []
if request.include_devices:
device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
if not request.include_hidden:
device_query = device_query.filter(
datastore_entities.DeviceInfo.hidden == False) devices = device_query.fetch()
host_infos.append(datastore_entities.ToMessage(
host, devices=devices,
host_update_state_entity=host_update_state))
return api_messages.HostInfoCollection(
host_infos=host_infos,
more=bool(next_cursor),
next_cursor=next_cursor,
prev_cursor=prev_cursor)
HOST_GET_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
hostname=messages.StringField(1, required=True),
include_notes=messages.BooleanField(2, default=False),
include_hidden=messages.BooleanField(3, default=False),
include_host_state_history=messages.BooleanField(4, default=False),
host_state_history_limit=messages.IntegerField(
5, default=device_manager.DEFAULT_HOST_HISTORY_SIZE),
)
@endpoints.method(
HOST_GET_RESOURCE,
api_messages.HostInfo,
path="{hostname}",
http_method="GET",
name="get")
@api_common.with_ndb_context
def GetHost(self, request):
"""Fetches the information and notes of a given hostname.
Args:
request: an API request.
Returns:
a HostInfo object.
Raises:
endpoints.NotFoundException: If the given host does not exist.
endpoint.BadRequestException: If request includes history info with
negative limit.
"""
hostname = request.hostname
host = device_manager.GetHost(hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." % hostname)
device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
if not request.include_hidden:
device_query = device_query.filter(
datastore_entities.DeviceInfo.hidden == False) devices = device_query.fetch()
host_update_state = ndb.Key(
datastore_entities.HostUpdateState, hostname).get()
host_info = datastore_entities.ToMessage(
host, devices=devices, host_update_state_entity=host_update_state)
# TODO: deprecate "include_notes".
if request.include_notes:
host_notes = (
datastore_entities.Note.query().filter(
datastore_entities.Note.type == common.NoteType.HOST_NOTE).filter(
datastore_entities.Note.hostname == hostname).order(
-datastore_entities.Note.timestamp))
host_info.notes = [
datastore_entities.ToMessage(note) for note in host_notes
]
if request.include_host_state_history:
history_states = None
limit = request.host_state_history_limit
try:
history_states = device_manager.GetHostStateHistory(
hostname, limit=limit)
except ValueError as err:
raise endpoints.BadRequestException(err)
host_state_history = [
datastore_entities.ToMessage(state) for state in history_states
]
host_info.state_history = host_state_history
return host_info
# TODO: deprecate "NewNote" endpoint.
NEW_NOTE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
user=messages.StringField(2, required=True),
message=messages.StringField(3),
offline_reason=messages.StringField(4),
recovery_action=messages.StringField(5),
offline_reason_id=messages.IntegerField(6),
recovery_action_id=messages.IntegerField(7),
lab_name=messages.StringField(8),
timestamp=message_types.DateTimeField(9, required=True),
)
@endpoints.method(
NEW_NOTE_RESOURCE,
api_messages.Note,
path="{hostname}/note",
http_method="POST",
name="newNote")
@api_common.with_ndb_context
def NewNote(self, request):
"""Submits a note for this host.
Args:
request: an API request.
Returns:
a VoidMessage
"""
timestamp = request.timestamp
# Datastore only accepts UTC times. Doing a conversion if necessary.
if timestamp.utcoffset() is not None:
timestamp = timestamp.replace(tzinfo=None) - timestamp.utcoffset()
note = datastore_entities.Note(
type=common.NoteType.HOST_NOTE,
hostname=request.hostname,
user=request.user,
timestamp=timestamp,
message=request.message,
offline_reason=request.offline_reason,
recovery_action=request.recovery_action)
note.put()
return datastore_entities.ToMessage(note)
NOTE_ADD_OR_UPDATE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
id=messages.IntegerField(2),
user=messages.StringField(3, required=True),
message=messages.StringField(4),
offline_reason=messages.StringField(5),
recovery_action=messages.StringField(6),
offline_reason_id=messages.IntegerField(7),
recovery_action_id=messages.IntegerField(8),
lab_name=messages.StringField(9),
event_time=message_types.DateTimeField(10),
)
@endpoints.method(
NOTE_ADD_OR_UPDATE_RESOURCE,
api_messages.Note,
path="{hostname}/notes",
http_method="POST",
name="addOrUpdateNote")
@api_common.with_ndb_context
def AddOrUpdateNote(self, request):
"""Add or update a host note.
Args:
request: an API request.
Returns:
an api_messages.Note.
"""
time_now = datetime.datetime.utcnow()
host_note_entity = datastore_util.GetOrCreateEntity(
datastore_entities.Note,
entity_id=request.id,
hostname=request.hostname,
type=common.NoteType.HOST_NOTE)
host_note_entity.populate(
user=request.user,
message=request.message,
timestamp=time_now,
event_time=request.event_time)
entities_to_update = [host_note_entity]
try:
offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_OFFLINE_REASON,
message_id=request.offline_reason_id,
lab_name=request.lab_name,
content=request.offline_reason)
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
if offline_reason_entity:
host_note_entity.offline_reason = offline_reason_entity.content
entities_to_update.append(offline_reason_entity)
try:
recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_RECOVERY_ACTION,
message_id=request.recovery_action_id,
lab_name=request.lab_name,
content=request.recovery_action)
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
if recovery_action_entity:
host_note_entity.recovery_action = recovery_action_entity.content
entities_to_update.append(recovery_action_entity)
keys = ndb.put_multi(entities_to_update)
host_note_msg = datastore_entities.ToMessage(host_note_entity)
host_note_event_msg = api_messages.NoteEvent(
note=host_note_msg, lab_name=request.lab_name)
note_manager.PublishMessage(host_note_event_msg,
common.PublishEventType.HOST_NOTE_EVENT)
note_key = keys[0]
if request.id != note_key.id():
# If ids are different, then a new note is created, we should create
# a history snapshot.
device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
request.hostname, note_key.id())
return host_note_msg
@endpoints.method(
api_messages.BatchUpdateNotesWithPredefinedMessageRequest,
api_messages.NoteCollection,
path="notes:batchUpdateNotesWithPredefinedMessage",
http_method="POST",
name="batchUpdateNotesWithPredefinedMessage")
@api_common.with_ndb_context
def BatchUpdateNotesWithPredefinedMessage(self, request):
"""Batch update notes with the same predefined message.
Args:
request: an API request.
Returns:
an api_messages.NoteCollection object.
"""
time_now = datetime.datetime.utcnow()
host_note_entities = []
for note in request.notes:
note_id = int(note.id) if note.id is not None else None
host_note_entity = datastore_util.GetOrCreateEntity(
datastore_entities.Note,
entity_id=note_id,
hostname=note.hostname,
type=common.NoteType.HOST_NOTE)
host_note_entity.populate(
user=request.user,
message=request.message,
timestamp=time_now,
event_time=request.event_time)
host_note_entities.append(host_note_entity)
try:
offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_OFFLINE_REASON,
message_id=request.offline_reason_id,
lab_name=request.lab_name,
content=request.offline_reason,
delta_count=len(host_note_entities))
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
if offline_reason_entity:
for host_note_entity in host_note_entities:
host_note_entity.offline_reason = offline_reason_entity.content
offline_reason_entity.put()
try:
recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_RECOVERY_ACTION,
message_id=request.recovery_action_id,
lab_name=request.lab_name,
content=request.recovery_action,
delta_count=len(host_note_entities))
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
if recovery_action_entity:
for host_note_entity in host_note_entities:
host_note_entity.recovery_action = recovery_action_entity.content
recovery_action_entity.put()
note_keys = ndb.put_multi(host_note_entities)
host_note_entities = ndb.get_multi(note_keys)
note_msgs = []
for host_note_entity in host_note_entities:
host_note_msg = datastore_entities.ToMessage(host_note_entity)
note_msgs.append(host_note_msg)
host_note_event_msg = api_messages.NoteEvent(
note=host_note_msg,
lab_name=request.lab_name)
note_manager.PublishMessage(
host_note_event_msg, common.PublishEventType.HOST_NOTE_EVENT)
for request_note, updated_note_key in zip(request.notes, note_keys):
if not request_note.id:
# If ids are not provided, then a new note is created, we should create
# a history snapshot.
device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
request_note.hostname, updated_note_key.id())
return api_messages.NoteCollection(
notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)
NOTES_BATCH_GET_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
ids=messages.IntegerField(2, repeated=True),
)
@endpoints.method(
NOTES_BATCH_GET_RESOURCE,
api_messages.NoteCollection,
path="{hostname}/notes:batchGet",
http_method="GET",
name="batchGetNotes")
@api_common.with_ndb_context
def BatchGetNotes(self, request):
"""Batch get notes of a host.
Args:
request: an API request.
Request Params:
hostname: string, the name of a lab host.
ids: a list of strings, the ids of notes to batch get.
Returns:
an api_messages.NoteCollection object.
"""
keys = [
ndb.Key(datastore_entities.Note, entity_id)
for entity_id in request.ids
]
note_entities = ndb.get_multi(keys)
note_msgs = [
datastore_entities.ToMessage(entity)
for entity in note_entities
if entity and entity.hostname == request.hostname
]
return api_messages.NoteCollection(
notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)
NOTES_LIST_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
count=messages.IntegerField(2, default=_DEFAULT_LIST_NOTES_COUNT),
cursor=messages.StringField(3),
backwards=messages.BooleanField(4, default=False),
include_device_notes=messages.BooleanField(5, default=False),
)
NOTES_DELETE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
ids=messages.IntegerField(2, repeated=True),
)
@endpoints.method(
NOTES_DELETE_RESOURCE,
message_types.VoidMessage,
path="{hostname}/notes",
http_method="DELETE",
name="batchDeleteNotes")
@api_common.with_ndb_context
def BatchDeleteNotes(self, request):
"""Delete notes of a host.
Args:
request: an API request.
Request Params:
hostname: string, the name of a lab host.
ids: a list of strings, the ids of notes to delete.
Returns:
a message_types.VoidMessage object.
Raises:
endpoints.BadRequestException, when request does not match existing notes.
"""
keys = [
ndb.Key(datastore_entities.Note, entity_id)
for entity_id in request.ids
]
note_entities = ndb.get_multi(keys)
for key, note_entity in zip(keys, note_entities):
if not note_entity or note_entity.hostname != request.hostname:
raise endpoints.BadRequestException(
"Note<id:{0}> does not exist under host<{1}>.".format(
key.id(), note_entity.hostname))
for key in keys:
key.delete()
return message_types.VoidMessage()
@endpoints.method(
NOTES_LIST_RESOURCE,
api_messages.NoteCollection,
path="{hostname}/notes",
http_method="GET",
name="listNotes")
@api_common.with_ndb_context
def ListNotes(self, request):
"""List notes of a host.
Args:
request: an API request.
Returns:
an api_messages.NoteCollection object.
"""
query = (
datastore_entities.Note.query()
.filter(datastore_entities.Note.hostname == request.hostname)
.order(-datastore_entities.Note.timestamp))
if not request.include_device_notes:
query = query.filter(
datastore_entities.Note.type == common.NoteType.HOST_NOTE)
note_entities, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, backwards=request.backwards)
note_msgs = [
datastore_entities.ToMessage(entity) for entity in note_entities
]
return api_messages.NoteCollection(
notes=note_msgs,
more=bool(next_cursor),
next_cursor=next_cursor,
prev_cursor=prev_cursor)
ASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True),
assignee=messages.StringField(2, required=True))
@endpoints.method(
ASSIGN_HOSTS_RESOURCE,
message_types.VoidMessage,
path="assign",
http_method="POST",
name="assign")
@api_common.with_ndb_context
def Assign(self, request):
"""Mark the hosts as recover.
TODO: deprecated, use set_recovery_state
Args:
request: request with a list of hostnames and an assignee.
Returns:
message_types.VoidMessage
"""
device_manager.AssignHosts(request.hostnames, request.assignee)
return message_types.VoidMessage()
UNASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True))
@endpoints.method(
UNASSIGN_HOSTS_RESOURCE,
message_types.VoidMessage,
path="unassign",
http_method="POST",
name="unassign")
@api_common.with_ndb_context
def Unassign(self, request):
"""Mark the hosts as recover.
TODO: deprecated, use set_recovery_state
Args:
request: request with a list of hostnames.
Returns:
message_types.VoidMessage
"""
device_manager.AssignHosts(request.hostnames, None)
return message_types.VoidMessage()
@endpoints.method(
api_messages.HostRecoveryStateRequests,
message_types.VoidMessage,
path="batchSetRecoveryState",
http_method="POST",
name="batchSetRecoveryState")
@api_common.with_ndb_context
def BatchSetRecoveryState(self, request):
"""Batch set recovery state for hosts.
Args:
request: a HostRecoveryStateRequests.
Returns:
message_types.VoidMessage
"""
device_manager.SetHostsRecoveryState(request.host_recovery_state_requests)
return message_types.VoidMessage()
HOSTNAME_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),)
@endpoints.method(
HOSTNAME_RESOURCE,
api_messages.HostInfo,
path="{hostname}/remove",
http_method="POST",
name="remove")
@api_common.with_ndb_context
def Remove(self, request):
"""Remove this host.
Args:
request: an API request.
Returns:
an updated HostInfo
Raises:
endpoints.NotFoundException: If the given device does not exist.
"""
host = device_manager.HideHost(request.hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." %
request.hostname)
return datastore_entities.ToMessage(host)
@endpoints.method(
HOSTNAME_RESOURCE,
api_messages.HostInfo,
path="{hostname}/restore",
http_method="POST",
name="restore")
@api_common.with_ndb_context
def Restore(self, request):
"""Restore this host.
Args:
request: an API request.
Returns:
an updated HostInfo
Raises:
endpoints.NotFoundException: If the given device does not exist.
"""
host = device_manager.RestoreHost(request.hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." %
request.hostname)
return datastore_entities.ToMessage(host)
HISTORIES_LIST_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
count=messages.IntegerField(2, default=_DEFAULT_LIST_HISTORIES_COUNT),
cursor=messages.StringField(3),
backwards=messages.BooleanField(4, default=False),
)
@endpoints.method(
HISTORIES_LIST_RESOURCE,
api_messages.HostInfoHistoryCollection,
path="{hostname}/histories",
http_method="GET",
name="listHistories")
@api_common.with_ndb_context
def ListHistories(self, request):
"""List histories of a host.
Args:
request: an API request.
Returns:
an api_messages.HostInfoHistoryCollection object.
"""
query = (
datastore_entities.HostInfoHistory.query(
ancestor=ndb.Key(datastore_entities.HostInfo, request.hostname))
.order(-datastore_entities.HostInfoHistory.timestamp))
histories, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, backwards=request.backwards)
history_msgs = [
datastore_entities.ToMessage(entity) for entity in histories
]
return api_messages.HostInfoHistoryCollection(
histories=history_msgs,
next_cursor=next_cursor,
prev_cursor=prev_cursor)
CONFIGS_LIST_RESOURCE = endpoints.ResourceContainer(
lab_name=messages.StringField(1),
count=messages.IntegerField(2, default=_DEFAULT_LIST_CONFIGS_COUNT),
cursor=messages.StringField(3),
)
@endpoints.method(
CONFIGS_LIST_RESOURCE,
api_messages.HostConfigCollection,
path="configs",
http_method="GET",
name="listHostConfigs")
@api_common.with_ndb_context
def ListHostConfigs(self, request):
"""List host configs.
Args:
request: an API request.
Returns:
an api_messages.HostConfigCollection object.
"""
query = datastore_entities.HostConfig.query()
if request.lab_name:
query = query.filter(
datastore_entities.HostConfig.lab_name == request.lab_name)
host_configs, _, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor)
host_config_msgs = [datastore_entities.ToMessage(host_config)
for host_config in host_configs]
return api_messages.HostConfigCollection(
host_configs=host_config_msgs, next_cursor=next_cursor)
METADATA_GET_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
)
@endpoints.method(
METADATA_GET_RESOURCE,
api_messages.HostMetadata,
path="{hostname}/metadata",
http_method="GET",
name="getMetadata")
@api_common.with_ndb_context
def GetMetadata(self, request):
"""Get a host metadata.
Args:
request: an API request.
Returns:
an api_messages.HostMetadata object.
"""
metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
if not metadata:
metadata = datastore_entities.HostMetadata(hostname=request.hostname)
metadata_msg = datastore_entities.ToMessage(metadata)
return metadata_msg
METADATA_PATCH_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
test_harness_image=messages.StringField(2),
)
@endpoints.method(
METADATA_PATCH_RESOURCE,
api_messages.HostMetadata,
path="{hostname}/metadata",
http_method="PATCH",
name="patchMetadata")
@api_common.with_ndb_context
def PatchMetadata(self, request):
"""Patch a host metadata.
Args:
request: an API request.
Returns:
an api_messages.HostMetadata object.
"""
metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
if not metadata:
metadata = datastore_entities.HostMetadata(
id=request.hostname,
hostname=request.hostname)
if request.test_harness_image:
metadata.populate(test_harness_image=request.test_harness_image)
metadata.put()
metadata_msg = datastore_entities.ToMessage(metadata)
return metadata_msg
BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True),
test_harness_image=messages.StringField(2),
user=messages.StringField(3),
)
@endpoints.method(
BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE,
message_types.VoidMessage,
path="hostMetadata:batchUpdate",
http_method="POST",
name="batchUpdateHostMetadata")
@api_common.with_ndb_context
def BatchUpdateHostMetadata(self, request):
"""Update HostMetadata on multiple hosts.
Args:
request: an API request.
Request Params:
hostname: list of strings, the name of hosts.
test_harness_image: string, the url to test harness image.
user: string, the user sending the request.
Returns:
a message_types.VoidMessage object.
Raises:
endpoints.BadRequestException, when request does not match existing hosts.
"""
host_configs = ndb.get_multi(
ndb.Key(datastore_entities.HostConfig, hostname)
for hostname in request.hostnames)
host_metadatas = ndb.get_multi(
ndb.Key(datastore_entities.HostMetadata, hostname)
for hostname in request.hostnames)
hosts_no_permission = []
hosts_not_enabled = []
metadatas_to_update = []
for hostname, config, metadata in zip(
request.hostnames, host_configs, host_metadatas):
if not config or not config.enable_ui_update:
hosts_not_enabled.append(hostname)
continue
if request.user not in config.owners:
hosts_no_permission.append(hostname)
continue
if not metadata:
metadata = datastore_entities.HostMetadata(
id=hostname, hostname=hostname)
if not harness_image_metadata_syncer.AreHarnessImagesEqual(
metadata.test_harness_image, request.test_harness_image):
event = host_event.HostEvent(
time=datetime.datetime.utcnow(),
type=_HOST_UPDATE_STATE_CHANGED_EVENT_NAME,
hostname=hostname,
host_update_state=_HOST_UPDATE_STATE_PENDING,
data={"host_update_target_image": request.test_harness_image})
device_manager.HandleDeviceSnapshotWithNDB(event)
metadata.populate(test_harness_image=request.test_harness_image)
metadatas_to_update.append(metadata)
ndb.put_multi(metadatas_to_update)
if not hosts_no_permission and not hosts_not_enabled:
return message_types.VoidMessage()
error_message = ""
if hosts_no_permission:
error_message += (
"Request user %s is not in the owner list of hosts [%s]. "
% (request.user, ", ".join(hosts_no_permission)))
if hosts_not_enabled:
error_message += ("Hosts [%s] are not enabled to be updated from UI. "
% ", ".join(hosts_not_enabled))
raise endpoints.BadRequestException(error_message) | tradefed_cluster/cluster_host_api.py | """API module to serve cluster host service calls."""
import datetime
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from tradefed_cluster.util import ndb_shim as ndb
from tradefed_cluster import api_common
from tradefed_cluster import api_messages
from tradefed_cluster import common
from tradefed_cluster import datastore_entities
from tradefed_cluster import datastore_util
from tradefed_cluster import device_manager
from tradefed_cluster import harness_image_metadata_syncer
from tradefed_cluster import host_event
from tradefed_cluster import note_manager
_DEFAULT_LIST_NOTES_COUNT = 10
_DEFAULT_LIST_HOST_COUNT = 100
_DEFAULT_LIST_HISTORIES_COUNT = 100
_DEFAULT_LIST_CONFIGS_COUNT = 100
_HOST_UPDATE_STATE_CHANGED_EVENT_NAME = "HOST_UPDATE_STATE_CHANGED"
_HOST_UPDATE_STATE_PENDING = "PENDING"
def _CheckTimestamp(t1, operator, t2):
"""Compare 2 timestamps."""
if operator == common.Operator.EQUAL:
return t1 == t2
if operator == common.Operator.LESS_THAN:
return t1 < t2
if operator == common.Operator.LESS_THAN_OR_EQUAL:
return t1 <= t2
if operator == common.Operator.GREATER_THAN:
return t1 > t2
if operator == common.Operator.GREATER_THAN_OR_EQUAL:
return t1 >= t2
raise ValueError('Operator "%s" is not supported.' % operator)
@api_common.tradefed_cluster_api.api_class(resource_name="hosts", path="hosts")
class ClusterHostApi(remote.Service):
"""A class for cluster host API service."""
HOST_LIST_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
lab_name=messages.StringField(1),
include_hidden=messages.BooleanField(2, default=False),
include_devices=messages.BooleanField(3, default=False),
assignee=messages.StringField(4),
is_bad=messages.BooleanField(5),
hostnames=messages.StringField(6, repeated=True),
host_groups=messages.StringField(7, repeated=True),
test_harnesses=messages.StringField(8, repeated=True),
test_harness_versions=messages.StringField(9, repeated=True),
pools=messages.StringField(10, repeated=True),
host_states=messages.EnumField(api_messages.HostState, 11, repeated=True),
flated_extra_info=messages.StringField(12),
cursor=messages.StringField(13),
count=messages.IntegerField(
14, variant=messages.Variant.INT32, default=_DEFAULT_LIST_HOST_COUNT),
timestamp_operator=messages.EnumField(common.Operator, 15),
timestamp=message_types.DateTimeField(16),
recovery_states=messages.StringField(17, repeated=True),
# TODO: Please use test_harnesses, this field is deprecated.
test_harness=messages.StringField(18, repeated=True),
host_update_states=messages.EnumField(
api_messages.HostUpdateState, 19, repeated=True))
@endpoints.method(
HOST_LIST_RESOURCE,
api_messages.HostInfoCollection,
path="/hosts",
http_method="GET",
name="list")
@api_common.with_ndb_context
def ListHosts(self, request):
"""Fetches a list of hosts.
Args:
request: an API request.
Returns:
a HostInfoCollection object.
"""
if ((request.timestamp and not request.timestamp_operator) or
(not request.timestamp and request.timestamp_operator)):
raise endpoints.BadRequestException(
'"timestamp" and "timestamp_operator" must be set at the same time.')
query = datastore_entities.HostInfo.query()
if request.lab_name:
query = query.filter(
datastore_entities.HostInfo.lab_name == request.lab_name)
if request.assignee:
query = query.filter(
datastore_entities.HostInfo.assignee == request.assignee)
if request.is_bad is not None:
query = query.filter(datastore_entities.HostInfo.is_bad == request.is_bad)
if not request.include_hidden:
query = query.filter(datastore_entities.HostInfo.hidden == False)
if request.flated_extra_info:
query = query.filter(datastore_entities.HostInfo.flated_extra_info ==
request.flated_extra_info)
if len(request.host_groups) == 1:
query = query.filter(
datastore_entities.HostInfo.host_group == request.host_groups[0])
if len(request.hostnames) == 1:
query = query.filter(
datastore_entities.HostInfo.hostname == request.hostnames[0])
test_harnesses = request.test_harness + request.test_harnesses
if len(test_harnesses) == 1:
query = query.filter(
datastore_entities.HostInfo.test_harness == test_harnesses[0])
if len(request.test_harness_versions) == 1:
query = query.filter(
datastore_entities.HostInfo.test_harness_version ==
request.test_harness_versions[0])
if len(request.pools) == 1:
query = query.filter(
datastore_entities.HostInfo.pools == request.pools[0])
if len(request.host_states) == 1:
query = query.filter(
datastore_entities.HostInfo.host_state == request.host_states[0])
if len(request.recovery_states) == 1:
query = query.filter(
datastore_entities.HostInfo.recovery_state
== request.recovery_states[0])
hostnames_with_requested_update_states = set()
if request.host_update_states:
update_state_query = datastore_entities.HostUpdateState.query().filter(
datastore_entities.HostUpdateState.state.IN(
request.host_update_states))
hostnames_with_requested_update_states = set(
update_state.hostname for update_state in update_state_query.fetch(
projection=[datastore_entities.HostUpdateState.hostname]))
def _PostFilter(host):
if request.host_groups and host.host_group not in request.host_groups:
return
if request.hostnames and host.hostname not in request.hostnames:
return
if (test_harnesses and
host.test_harness not in test_harnesses):
return
if (request.test_harness_versions and
host.test_harness_version not in request.test_harness_versions):
return
if request.pools and not set(host.pools).intersection(set(request.pools)):
return
if request.host_states and host.host_state not in request.host_states:
return
if (request.recovery_states and
host.recovery_state not in request.recovery_states):
return
if request.timestamp:
if not host.timestamp:
return
return _CheckTimestamp(
host.timestamp, request.timestamp_operator, request.timestamp)
if request.host_update_states:
if host.hostname not in hostnames_with_requested_update_states:
return
return True
if request.timestamp:
query = query.order(
datastore_entities.HostInfo.timestamp,
datastore_entities.HostInfo.key)
else:
query = query.order(datastore_entities.HostInfo.key)
hosts, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, result_filter=_PostFilter)
host_update_state_keys = [
ndb.Key(datastore_entities.HostUpdateState, host.hostname)
for host in hosts]
host_update_states = ndb.get_multi(host_update_state_keys)
host_infos = []
for host, host_update_state in zip(hosts, host_update_states):
devices = []
if request.include_devices:
device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
if not request.include_hidden:
device_query = device_query.filter(
datastore_entities.DeviceInfo.hidden == False) devices = device_query.fetch()
host_infos.append(datastore_entities.ToMessage(
host, devices=devices,
host_update_state_entity=host_update_state))
return api_messages.HostInfoCollection(
host_infos=host_infos,
more=bool(next_cursor),
next_cursor=next_cursor,
prev_cursor=prev_cursor)
HOST_GET_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
hostname=messages.StringField(1, required=True),
include_notes=messages.BooleanField(2, default=False),
include_hidden=messages.BooleanField(3, default=False),
include_host_state_history=messages.BooleanField(4, default=False),
host_state_history_limit=messages.IntegerField(
5, default=device_manager.DEFAULT_HOST_HISTORY_SIZE),
)
@endpoints.method(
HOST_GET_RESOURCE,
api_messages.HostInfo,
path="{hostname}",
http_method="GET",
name="get")
@api_common.with_ndb_context
def GetHost(self, request):
"""Fetches the information and notes of a given hostname.
Args:
request: an API request.
Returns:
a HostInfo object.
Raises:
endpoints.NotFoundException: If the given host does not exist.
endpoint.BadRequestException: If request includes history info with
negative limit.
"""
hostname = request.hostname
host = device_manager.GetHost(hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." % hostname)
device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
if not request.include_hidden:
device_query = device_query.filter(
datastore_entities.DeviceInfo.hidden == False) devices = device_query.fetch()
host_update_state = ndb.Key(
datastore_entities.HostUpdateState, hostname).get()
host_info = datastore_entities.ToMessage(
host, devices=devices, host_update_state_entity=host_update_state)
# TODO: deprecate "include_notes".
if request.include_notes:
host_notes = (
datastore_entities.Note.query().filter(
datastore_entities.Note.type == common.NoteType.HOST_NOTE).filter(
datastore_entities.Note.hostname == hostname).order(
-datastore_entities.Note.timestamp))
host_info.notes = [
datastore_entities.ToMessage(note) for note in host_notes
]
if request.include_host_state_history:
history_states = None
limit = request.host_state_history_limit
try:
history_states = device_manager.GetHostStateHistory(
hostname, limit=limit)
except ValueError as err:
raise endpoints.BadRequestException(err)
host_state_history = [
datastore_entities.ToMessage(state) for state in history_states
]
host_info.state_history = host_state_history
return host_info
# TODO: deprecate "NewNote" endpoint.
NEW_NOTE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
user=messages.StringField(2, required=True),
message=messages.StringField(3),
offline_reason=messages.StringField(4),
recovery_action=messages.StringField(5),
offline_reason_id=messages.IntegerField(6),
recovery_action_id=messages.IntegerField(7),
lab_name=messages.StringField(8),
timestamp=message_types.DateTimeField(9, required=True),
)
@endpoints.method(
NEW_NOTE_RESOURCE,
api_messages.Note,
path="{hostname}/note",
http_method="POST",
name="newNote")
@api_common.with_ndb_context
def NewNote(self, request):
"""Submits a note for this host.
Args:
request: an API request.
Returns:
a VoidMessage
"""
timestamp = request.timestamp
# Datastore only accepts UTC times. Doing a conversion if necessary.
if timestamp.utcoffset() is not None:
timestamp = timestamp.replace(tzinfo=None) - timestamp.utcoffset()
note = datastore_entities.Note(
type=common.NoteType.HOST_NOTE,
hostname=request.hostname,
user=request.user,
timestamp=timestamp,
message=request.message,
offline_reason=request.offline_reason,
recovery_action=request.recovery_action)
note.put()
return datastore_entities.ToMessage(note)
NOTE_ADD_OR_UPDATE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
id=messages.IntegerField(2),
user=messages.StringField(3, required=True),
message=messages.StringField(4),
offline_reason=messages.StringField(5),
recovery_action=messages.StringField(6),
offline_reason_id=messages.IntegerField(7),
recovery_action_id=messages.IntegerField(8),
lab_name=messages.StringField(9),
event_time=message_types.DateTimeField(10),
)
@endpoints.method(
NOTE_ADD_OR_UPDATE_RESOURCE,
api_messages.Note,
path="{hostname}/notes",
http_method="POST",
name="addOrUpdateNote")
@api_common.with_ndb_context
def AddOrUpdateNote(self, request):
"""Add or update a host note.
Args:
request: an API request.
Returns:
an api_messages.Note.
"""
time_now = datetime.datetime.utcnow()
host_note_entity = datastore_util.GetOrCreateEntity(
datastore_entities.Note,
entity_id=request.id,
hostname=request.hostname,
type=common.NoteType.HOST_NOTE)
host_note_entity.populate(
user=request.user,
message=request.message,
timestamp=time_now,
event_time=request.event_time)
entities_to_update = [host_note_entity]
try:
offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_OFFLINE_REASON,
message_id=request.offline_reason_id,
lab_name=request.lab_name,
content=request.offline_reason)
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
if offline_reason_entity:
host_note_entity.offline_reason = offline_reason_entity.content
entities_to_update.append(offline_reason_entity)
try:
recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_RECOVERY_ACTION,
message_id=request.recovery_action_id,
lab_name=request.lab_name,
content=request.recovery_action)
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
if recovery_action_entity:
host_note_entity.recovery_action = recovery_action_entity.content
entities_to_update.append(recovery_action_entity)
keys = ndb.put_multi(entities_to_update)
host_note_msg = datastore_entities.ToMessage(host_note_entity)
host_note_event_msg = api_messages.NoteEvent(
note=host_note_msg, lab_name=request.lab_name)
note_manager.PublishMessage(host_note_event_msg,
common.PublishEventType.HOST_NOTE_EVENT)
note_key = keys[0]
if request.id != note_key.id():
# If ids are different, then a new note is created, we should create
# a history snapshot.
device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
request.hostname, note_key.id())
return host_note_msg
@endpoints.method(
api_messages.BatchUpdateNotesWithPredefinedMessageRequest,
api_messages.NoteCollection,
path="notes:batchUpdateNotesWithPredefinedMessage",
http_method="POST",
name="batchUpdateNotesWithPredefinedMessage")
@api_common.with_ndb_context
def BatchUpdateNotesWithPredefinedMessage(self, request):
"""Batch update notes with the same predefined message.
Args:
request: an API request.
Returns:
an api_messages.NoteCollection object.
"""
time_now = datetime.datetime.utcnow()
host_note_entities = []
for note in request.notes:
note_id = int(note.id) if note.id is not None else None
host_note_entity = datastore_util.GetOrCreateEntity(
datastore_entities.Note,
entity_id=note_id,
hostname=note.hostname,
type=common.NoteType.HOST_NOTE)
host_note_entity.populate(
user=request.user,
message=request.message,
timestamp=time_now,
event_time=request.event_time)
host_note_entities.append(host_note_entity)
try:
offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_OFFLINE_REASON,
message_id=request.offline_reason_id,
lab_name=request.lab_name,
content=request.offline_reason,
delta_count=len(host_note_entities))
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
if offline_reason_entity:
for host_note_entity in host_note_entities:
host_note_entity.offline_reason = offline_reason_entity.content
offline_reason_entity.put()
try:
recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
common.PredefinedMessageType.HOST_RECOVERY_ACTION,
message_id=request.recovery_action_id,
lab_name=request.lab_name,
content=request.recovery_action,
delta_count=len(host_note_entities))
except note_manager.InvalidParameterError as err:
raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
if recovery_action_entity:
for host_note_entity in host_note_entities:
host_note_entity.recovery_action = recovery_action_entity.content
recovery_action_entity.put()
note_keys = ndb.put_multi(host_note_entities)
host_note_entities = ndb.get_multi(note_keys)
note_msgs = []
for host_note_entity in host_note_entities:
host_note_msg = datastore_entities.ToMessage(host_note_entity)
note_msgs.append(host_note_msg)
host_note_event_msg = api_messages.NoteEvent(
note=host_note_msg,
lab_name=request.lab_name)
note_manager.PublishMessage(
host_note_event_msg, common.PublishEventType.HOST_NOTE_EVENT)
for request_note, updated_note_key in zip(request.notes, note_keys):
if not request_note.id:
# If ids are not provided, then a new note is created, we should create
# a history snapshot.
device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
request_note.hostname, updated_note_key.id())
return api_messages.NoteCollection(
notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)
NOTES_BATCH_GET_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
ids=messages.IntegerField(2, repeated=True),
)
@endpoints.method(
NOTES_BATCH_GET_RESOURCE,
api_messages.NoteCollection,
path="{hostname}/notes:batchGet",
http_method="GET",
name="batchGetNotes")
@api_common.with_ndb_context
def BatchGetNotes(self, request):
"""Batch get notes of a host.
Args:
request: an API request.
Request Params:
hostname: string, the name of a lab host.
ids: a list of strings, the ids of notes to batch get.
Returns:
an api_messages.NoteCollection object.
"""
keys = [
ndb.Key(datastore_entities.Note, entity_id)
for entity_id in request.ids
]
note_entities = ndb.get_multi(keys)
note_msgs = [
datastore_entities.ToMessage(entity)
for entity in note_entities
if entity and entity.hostname == request.hostname
]
return api_messages.NoteCollection(
notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)
NOTES_LIST_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
count=messages.IntegerField(2, default=_DEFAULT_LIST_NOTES_COUNT),
cursor=messages.StringField(3),
backwards=messages.BooleanField(4, default=False),
include_device_notes=messages.BooleanField(5, default=False),
)
NOTES_DELETE_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
ids=messages.IntegerField(2, repeated=True),
)
@endpoints.method(
NOTES_DELETE_RESOURCE,
message_types.VoidMessage,
path="{hostname}/notes",
http_method="DELETE",
name="batchDeleteNotes")
@api_common.with_ndb_context
def BatchDeleteNotes(self, request):
"""Delete notes of a host.
Args:
request: an API request.
Request Params:
hostname: string, the name of a lab host.
ids: a list of strings, the ids of notes to delete.
Returns:
a message_types.VoidMessage object.
Raises:
endpoints.BadRequestException, when request does not match existing notes.
"""
keys = [
ndb.Key(datastore_entities.Note, entity_id)
for entity_id in request.ids
]
note_entities = ndb.get_multi(keys)
for key, note_entity in zip(keys, note_entities):
if not note_entity or note_entity.hostname != request.hostname:
raise endpoints.BadRequestException(
"Note<id:{0}> does not exist under host<{1}>.".format(
key.id(), note_entity.hostname))
for key in keys:
key.delete()
return message_types.VoidMessage()
@endpoints.method(
NOTES_LIST_RESOURCE,
api_messages.NoteCollection,
path="{hostname}/notes",
http_method="GET",
name="listNotes")
@api_common.with_ndb_context
def ListNotes(self, request):
"""List notes of a host.
Args:
request: an API request.
Returns:
an api_messages.NoteCollection object.
"""
query = (
datastore_entities.Note.query()
.filter(datastore_entities.Note.hostname == request.hostname)
.order(-datastore_entities.Note.timestamp))
if not request.include_device_notes:
query = query.filter(
datastore_entities.Note.type == common.NoteType.HOST_NOTE)
note_entities, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, backwards=request.backwards)
note_msgs = [
datastore_entities.ToMessage(entity) for entity in note_entities
]
return api_messages.NoteCollection(
notes=note_msgs,
more=bool(next_cursor),
next_cursor=next_cursor,
prev_cursor=prev_cursor)
ASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True),
assignee=messages.StringField(2, required=True))
@endpoints.method(
ASSIGN_HOSTS_RESOURCE,
message_types.VoidMessage,
path="assign",
http_method="POST",
name="assign")
@api_common.with_ndb_context
def Assign(self, request):
"""Mark the hosts as recover.
TODO: deprecated, use set_recovery_state
Args:
request: request with a list of hostnames and an assignee.
Returns:
message_types.VoidMessage
"""
device_manager.AssignHosts(request.hostnames, request.assignee)
return message_types.VoidMessage()
UNASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True))
@endpoints.method(
UNASSIGN_HOSTS_RESOURCE,
message_types.VoidMessage,
path="unassign",
http_method="POST",
name="unassign")
@api_common.with_ndb_context
def Unassign(self, request):
"""Mark the hosts as recover.
TODO: deprecated, use set_recovery_state
Args:
request: request with a list of hostnames.
Returns:
message_types.VoidMessage
"""
device_manager.AssignHosts(request.hostnames, None)
return message_types.VoidMessage()
@endpoints.method(
api_messages.HostRecoveryStateRequests,
message_types.VoidMessage,
path="batchSetRecoveryState",
http_method="POST",
name="batchSetRecoveryState")
@api_common.with_ndb_context
def BatchSetRecoveryState(self, request):
"""Batch set recovery state for hosts.
Args:
request: a HostRecoveryStateRequests.
Returns:
message_types.VoidMessage
"""
device_manager.SetHostsRecoveryState(request.host_recovery_state_requests)
return message_types.VoidMessage()
HOSTNAME_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),)
@endpoints.method(
HOSTNAME_RESOURCE,
api_messages.HostInfo,
path="{hostname}/remove",
http_method="POST",
name="remove")
@api_common.with_ndb_context
def Remove(self, request):
"""Remove this host.
Args:
request: an API request.
Returns:
an updated HostInfo
Raises:
endpoints.NotFoundException: If the given device does not exist.
"""
host = device_manager.HideHost(request.hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." %
request.hostname)
return datastore_entities.ToMessage(host)
@endpoints.method(
HOSTNAME_RESOURCE,
api_messages.HostInfo,
path="{hostname}/restore",
http_method="POST",
name="restore")
@api_common.with_ndb_context
def Restore(self, request):
"""Restore this host.
Args:
request: an API request.
Returns:
an updated HostInfo
Raises:
endpoints.NotFoundException: If the given device does not exist.
"""
host = device_manager.RestoreHost(request.hostname)
if not host:
raise endpoints.NotFoundException("Host %s does not exist." %
request.hostname)
return datastore_entities.ToMessage(host)
HISTORIES_LIST_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
count=messages.IntegerField(2, default=_DEFAULT_LIST_HISTORIES_COUNT),
cursor=messages.StringField(3),
backwards=messages.BooleanField(4, default=False),
)
@endpoints.method(
HISTORIES_LIST_RESOURCE,
api_messages.HostInfoHistoryCollection,
path="{hostname}/histories",
http_method="GET",
name="listHistories")
@api_common.with_ndb_context
def ListHistories(self, request):
"""List histories of a host.
Args:
request: an API request.
Returns:
an api_messages.HostInfoHistoryCollection object.
"""
query = (
datastore_entities.HostInfoHistory.query(
ancestor=ndb.Key(datastore_entities.HostInfo, request.hostname))
.order(-datastore_entities.HostInfoHistory.timestamp))
histories, prev_cursor, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor, backwards=request.backwards)
history_msgs = [
datastore_entities.ToMessage(entity) for entity in histories
]
return api_messages.HostInfoHistoryCollection(
histories=history_msgs,
next_cursor=next_cursor,
prev_cursor=prev_cursor)
CONFIGS_LIST_RESOURCE = endpoints.ResourceContainer(
lab_name=messages.StringField(1),
count=messages.IntegerField(2, default=_DEFAULT_LIST_CONFIGS_COUNT),
cursor=messages.StringField(3),
)
@endpoints.method(
CONFIGS_LIST_RESOURCE,
api_messages.HostConfigCollection,
path="configs",
http_method="GET",
name="listHostConfigs")
@api_common.with_ndb_context
def ListHostConfigs(self, request):
"""List host configs.
Args:
request: an API request.
Returns:
an api_messages.HostConfigCollection object.
"""
query = datastore_entities.HostConfig.query()
if request.lab_name:
query = query.filter(
datastore_entities.HostConfig.lab_name == request.lab_name)
host_configs, _, next_cursor = datastore_util.FetchPage(
query, request.count, request.cursor)
host_config_msgs = [datastore_entities.ToMessage(host_config)
for host_config in host_configs]
return api_messages.HostConfigCollection(
host_configs=host_config_msgs, next_cursor=next_cursor)
METADATA_GET_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
)
@endpoints.method(
METADATA_GET_RESOURCE,
api_messages.HostMetadata,
path="{hostname}/metadata",
http_method="GET",
name="getMetadata")
@api_common.with_ndb_context
def GetMetadata(self, request):
"""Get a host metadata.
Args:
request: an API request.
Returns:
an api_messages.HostMetadata object.
"""
metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
if not metadata:
metadata = datastore_entities.HostMetadata(hostname=request.hostname)
metadata_msg = datastore_entities.ToMessage(metadata)
return metadata_msg
METADATA_PATCH_RESOURCE = endpoints.ResourceContainer(
hostname=messages.StringField(1, required=True),
test_harness_image=messages.StringField(2),
)
@endpoints.method(
METADATA_PATCH_RESOURCE,
api_messages.HostMetadata,
path="{hostname}/metadata",
http_method="PATCH",
name="patchMetadata")
@api_common.with_ndb_context
def PatchMetadata(self, request):
"""Patch a host metadata.
Args:
request: an API request.
Returns:
an api_messages.HostMetadata object.
"""
metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
if not metadata:
metadata = datastore_entities.HostMetadata(
id=request.hostname,
hostname=request.hostname)
if request.test_harness_image:
metadata.populate(test_harness_image=request.test_harness_image)
metadata.put()
metadata_msg = datastore_entities.ToMessage(metadata)
return metadata_msg
BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE = endpoints.ResourceContainer(
hostnames=messages.StringField(1, repeated=True),
test_harness_image=messages.StringField(2),
user=messages.StringField(3),
)
@endpoints.method(
BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE,
message_types.VoidMessage,
path="hostMetadata:batchUpdate",
http_method="POST",
name="batchUpdateHostMetadata")
@api_common.with_ndb_context
def BatchUpdateHostMetadata(self, request):
"""Update HostMetadata on multiple hosts.
Args:
request: an API request.
Request Params:
hostname: list of strings, the name of hosts.
test_harness_image: string, the url to test harness image.
user: string, the user sending the request.
Returns:
a message_types.VoidMessage object.
Raises:
endpoints.BadRequestException, when request does not match existing hosts.
"""
host_configs = ndb.get_multi(
ndb.Key(datastore_entities.HostConfig, hostname)
for hostname in request.hostnames)
host_metadatas = ndb.get_multi(
ndb.Key(datastore_entities.HostMetadata, hostname)
for hostname in request.hostnames)
hosts_no_permission = []
hosts_not_enabled = []
metadatas_to_update = []
for hostname, config, metadata in zip(
request.hostnames, host_configs, host_metadatas):
if not config or not config.enable_ui_update:
hosts_not_enabled.append(hostname)
continue
if request.user not in config.owners:
hosts_no_permission.append(hostname)
continue
if not metadata:
metadata = datastore_entities.HostMetadata(
id=hostname, hostname=hostname)
if not harness_image_metadata_syncer.AreHarnessImagesEqual(
metadata.test_harness_image, request.test_harness_image):
event = host_event.HostEvent(
time=datetime.datetime.utcnow(),
type=_HOST_UPDATE_STATE_CHANGED_EVENT_NAME,
hostname=hostname,
host_update_state=_HOST_UPDATE_STATE_PENDING,
data={"host_update_target_image": request.test_harness_image})
device_manager.HandleDeviceSnapshotWithNDB(event)
metadata.populate(test_harness_image=request.test_harness_image)
metadatas_to_update.append(metadata)
ndb.put_multi(metadatas_to_update)
if not hosts_no_permission and not hosts_not_enabled:
return message_types.VoidMessage()
error_message = ""
if hosts_no_permission:
error_message += (
"Request user %s is not in the owner list of hosts [%s]. "
% (request.user, ", ".join(hosts_no_permission)))
if hosts_not_enabled:
error_message += ("Hosts [%s] are not enabled to be updated from UI. "
% ", ".join(hosts_not_enabled))
raise endpoints.BadRequestException(error_message) | 0.613005 | 0.152505 |
from nicos.core import Attach, HasTimeout, Moveable, Override, PositionError, \
Readable, oneof, status
class SR7Shutter(HasTimeout, Moveable):
"""Class for the PUMA secondary shutter."""
attached_devices = {
'sr7cl': Attach('status of SR7 shutter closed/open', Readable),
'sr7p1': Attach('status of SR7 position 1', Readable),
'sr7p2': Attach('status of SR7 position 2', Readable),
'sr7p3': Attach('status of SR7 position 3', Readable),
# 'hdiacl': Attach('status of virtual source closed/open',
# Readable),
# 'stopinh': Attach('status of emergency button active/inactive',
# Readable),
# 'sr7save': Attach('SR7 security circle ok for open the beam',
# Readable),
'sr7set': Attach('emergency close of all shutters', Moveable),
}
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
'timeout': Override(mandatory=False, default=5),
}
positions = ['close', 'S1', 'S2', 'S3']
valuetype = oneof(*positions)
def doIsAllowed(self, pos):
# only shutter close is allowed
if pos != 'close':
return False, 'can only close shutter from remote'
return True, ''
def doStart(self, target):
if target == self.read(0):
return
self._attached_sr7set.start(self.positions.index(target))
if self.wait() != target:
raise PositionError(self, 'device returned wrong position')
self.log.info('SR7: %s', target)
def doRead(self, maxage=0):
res = self.doStatus()[0]
if res == status.OK:
if self._attached_sr7cl.doRead() == 1:
return 'close'
elif self._attached_sr7p1.doRead() == 1:
return 'S1'
elif self._attached_sr7p2.doRead() == 1:
return 'S2'
elif self._attached_sr7p3.doRead() == 1:
return 'S3'
else:
raise PositionError(self, 'SR7 shutter moving or undefined')
def doStatus(self, maxage=0):
cl, p1, p2, p3 = self._attached_sr7cl.doRead(), \
self._attached_sr7p1.doRead(), self._attached_sr7p2.doRead(), \
self._attached_sr7p3.doRead()
if p1 == 1 and p2 == 1 and p3 == 1:
return status.BUSY, 'moving'
elif cl == 1 or p1 == 1 or p2 == 1 or p3 == 1:
return status.OK, 'idle'
else:
return status.ERROR, 'undefined position' | nicos_mlz/puma/devices/sr7.py | from nicos.core import Attach, HasTimeout, Moveable, Override, PositionError, \
Readable, oneof, status
class SR7Shutter(HasTimeout, Moveable):
"""Class for the PUMA secondary shutter."""
attached_devices = {
'sr7cl': Attach('status of SR7 shutter closed/open', Readable),
'sr7p1': Attach('status of SR7 position 1', Readable),
'sr7p2': Attach('status of SR7 position 2', Readable),
'sr7p3': Attach('status of SR7 position 3', Readable),
# 'hdiacl': Attach('status of virtual source closed/open',
# Readable),
# 'stopinh': Attach('status of emergency button active/inactive',
# Readable),
# 'sr7save': Attach('SR7 security circle ok for open the beam',
# Readable),
'sr7set': Attach('emergency close of all shutters', Moveable),
}
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
'timeout': Override(mandatory=False, default=5),
}
positions = ['close', 'S1', 'S2', 'S3']
valuetype = oneof(*positions)
def doIsAllowed(self, pos):
# only shutter close is allowed
if pos != 'close':
return False, 'can only close shutter from remote'
return True, ''
def doStart(self, target):
if target == self.read(0):
return
self._attached_sr7set.start(self.positions.index(target))
if self.wait() != target:
raise PositionError(self, 'device returned wrong position')
self.log.info('SR7: %s', target)
def doRead(self, maxage=0):
res = self.doStatus()[0]
if res == status.OK:
if self._attached_sr7cl.doRead() == 1:
return 'close'
elif self._attached_sr7p1.doRead() == 1:
return 'S1'
elif self._attached_sr7p2.doRead() == 1:
return 'S2'
elif self._attached_sr7p3.doRead() == 1:
return 'S3'
else:
raise PositionError(self, 'SR7 shutter moving or undefined')
def doStatus(self, maxage=0):
cl, p1, p2, p3 = self._attached_sr7cl.doRead(), \
self._attached_sr7p1.doRead(), self._attached_sr7p2.doRead(), \
self._attached_sr7p3.doRead()
if p1 == 1 and p2 == 1 and p3 == 1:
return status.BUSY, 'moving'
elif cl == 1 or p1 == 1 or p2 == 1 or p3 == 1:
return status.OK, 'idle'
else:
return status.ERROR, 'undefined position' | 0.573798 | 0.266724 |
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class OutputMssql(kulado.CustomResource):
database: kulado.Output[str]
name: kulado.Output[str]
"""
The name of the Stream Output. Changing this forces a new resource to be created.
"""
password: kulado.Output[str]
"""
Password used together with username, to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
"""
resource_group_name: kulado.Output[str]
"""
The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
"""
server: kulado.Output[str]
"""
The SQL server url. Changing this forces a new resource to be created.
"""
stream_analytics_job_name: kulado.Output[str]
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
table: kulado.Output[str]
"""
Table in the database that the output points to. Changing this forces a new resource to be created.
"""
user: kulado.Output[str]
"""
Username used to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
"""
def __init__(__self__, resource_name, opts=None, database=None, name=None, password=<PASSWORD>, resource_group_name=None, server=None, stream_analytics_job_name=None, table=None, user=None, __name__=None, __opts__=None):
"""
Manages a Stream Analytics Output to Microsoft SQL Server Database.
:param str resource_name: The name of the resource.
:param kulado.ResourceOptions opts: Options for the resource.
:param kulado.Input[str] name: The name of the Stream Output. Changing this forces a new resource to be created.
:param kulado.Input[str] password: <PASSWORD> username, to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
:param kulado.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
:param kulado.Input[str] server: The SQL server url. Changing this forces a new resource to be created.
:param kulado.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param kulado.Input[str] table: Table in the database that the output points to. Changing this forces a new resource to be created.
:param kulado.Input[str] user: Username used to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/stream_analytics_output_mssql.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if database is None:
raise TypeError("Missing required property 'database'")
__props__['database'] = database
__props__['name'] = name
if password is None:
raise TypeError("Missing required property 'password'")
__props__['password'] = password
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server is None:
raise TypeError("Missing required property 'server'")
__props__['server'] = server
if stream_analytics_job_name is None:
raise TypeError("Missing required property 'stream_analytics_job_name'")
__props__['stream_analytics_job_name'] = stream_analytics_job_name
if table is None:
raise TypeError("Missing required property 'table'")
__props__['table'] = table
if user is None:
raise TypeError("Missing required property 'user'")
__props__['user'] = user
super(OutputMssql, __self__).__init__(
'azure:streamanalytics/outputMssql:OutputMssql',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/kulado_azure/streamanalytics/output_mssql.py |
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class OutputMssql(kulado.CustomResource):
database: kulado.Output[str]
name: kulado.Output[str]
"""
The name of the Stream Output. Changing this forces a new resource to be created.
"""
password: kulado.Output[str]
"""
Password used together with username, to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
"""
resource_group_name: kulado.Output[str]
"""
The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
"""
server: kulado.Output[str]
"""
The SQL server url. Changing this forces a new resource to be created.
"""
stream_analytics_job_name: kulado.Output[str]
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
table: kulado.Output[str]
"""
Table in the database that the output points to. Changing this forces a new resource to be created.
"""
user: kulado.Output[str]
"""
Username used to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
"""
def __init__(__self__, resource_name, opts=None, database=None, name=None, password=<PASSWORD>, resource_group_name=None, server=None, stream_analytics_job_name=None, table=None, user=None, __name__=None, __opts__=None):
"""
Manages a Stream Analytics Output to Microsoft SQL Server Database.
:param str resource_name: The name of the resource.
:param kulado.ResourceOptions opts: Options for the resource.
:param kulado.Input[str] name: The name of the Stream Output. Changing this forces a new resource to be created.
:param kulado.Input[str] password: <PASSWORD> username, to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
:param kulado.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
:param kulado.Input[str] server: The SQL server url. Changing this forces a new resource to be created.
:param kulado.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param kulado.Input[str] table: Table in the database that the output points to. Changing this forces a new resource to be created.
:param kulado.Input[str] user: Username used to login to the Microsoft SQL Server. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/stream_analytics_output_mssql.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if database is None:
raise TypeError("Missing required property 'database'")
__props__['database'] = database
__props__['name'] = name
if password is None:
raise TypeError("Missing required property 'password'")
__props__['password'] = password
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server is None:
raise TypeError("Missing required property 'server'")
__props__['server'] = server
if stream_analytics_job_name is None:
raise TypeError("Missing required property 'stream_analytics_job_name'")
__props__['stream_analytics_job_name'] = stream_analytics_job_name
if table is None:
raise TypeError("Missing required property 'table'")
__props__['table'] = table
if user is None:
raise TypeError("Missing required property 'user'")
__props__['user'] = user
super(OutputMssql, __self__).__init__(
'azure:streamanalytics/outputMssql:OutputMssql',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.580828 | 0.224586 |
import matplotlib as plb
plb.use('Agg')
# python3 compareClock.py --standard /data/acs/pea/example/EX03/standard/igs20624.clk --test /data/acs/pea/example/EX03/standard/aus20624.clk
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
import math
from numpy import loadtxt
from matplotlib.ticker import MultipleLocator
import argparse
import datetime as dt
import re
#==============================================================================
def parsePPPOutputFile(pppfile): #, ds, satels):
recPOSRGX = re.compile('REC_POS\s+(\w+)\s+(\d+)\s+(-?[\d\.]*)\s+(-?[\d\.]*)\s+(-?[\d\.]*)')
output = {}
count = 0
with open(pppfile) as fstandard:
for line in fstandard:
line = line.rstrip()
if( recPOSRGX.search(line) ):
match = recPOSRGX.search(line)
#print("RECpos match",match[1],match[2],match[3],match[4],match[5])
if ( 'recPos' not in output) :
output['recPos'] = {}
output['recPos']['stns'] = []
if ( match[1] not in output['recPos']['stns'] ):
output['recPos']['stns'].append(match[1])
output['recPos'][match[1]] = {}
output['recPos'][match[1]]['X'] = []
output['recPos'][match[1]]['Y'] = []
output['recPos'][match[1]]['Z'] = []
if ( int(match[2]) == 0):
output['recPos'][match[1]]['X'].append(np.float(match[3])) # = []
elif ( int(match[2]) == 1):
output['recPos'][match[1]]['Y'].append(np.float(match[3])) # = []
elif ( int(match[2]) == 2):
output['recPos'][match[1]]['Z'].append(np.float(match[3]))
return output
def get_sinex_recpos(snxfile, station):
'''
Given the SINEX file and station name, return the X,Y,Z coords of receiver position (REC POS)
Input
snxfile - directory that holds the sinex file - str
station - station name in 4 char format: SSSS - str
Output
X,Y,Z - coords of the receiver position - 3-tuple of floats
STD_X, STD_Y, STD_Z - standard deviation of coords = 3-tuple of floats
'''
sfile = Path(snxfile)
with open(sfile, 'r') as f:
# Exit flag
ex = 0
while ex == 0:
# Read lines until station info is reached:
line = f.readline()
# Once you reach the station POS estimate, find the station of interest
if '+SOLUTION/ESTIMATE' in line:
# Skip next line
line = f.readline()
# Search for station name
station_found = 0
while station_found == 0:
line = f.readline()
if line[14:18] == station:
if line[7:11] == 'STAX':
xval = float(line[47:68])
xstd = float(line[69:-1])
elif line[7:11] == 'STAY':
yval = float(line[47:68])
ystd = float(line[69:-1])
elif line[7:11] == 'STAZ':
zval = float(line[47:68])
zstd = float(line[69:-1])
station_found = 1
ex = 1
if '-SOLUTION/ESTIMATE' in line:
print('>>>>>>>Station Not Found<<<<<<<<<<<<')
station_found = 1
ex = 1
else:
continue
return (xval, yval, zval),(xstd,ystd,zstd)
def get_date(pppfile, multi=False):
'''
From the filename, get out the date so it can be used in naming the figures
'''
f1d = pppfile.split('.')
f1s = f1d[-2].split('/')
f1l = f1s[-1].split('_')
for f in f1l:
try:
idate = int(f)
except ValueError:
if (len(f) == 2) & ('D' in f):
multi = f
continue
try:
idate
except NameError:
idate = int(f1l[1][7:])
if multi:
idate = str(idate) + f'_multi_{multi}'
return str(idate)
def plot_PPP(pppfile, snxfile, multi=False):
'''
Taking in the .PPP file and snx file, produce 2 plots:
* POS - mean(POS)
* POS - Sinex soln
Saved to pwd
'''
results = parsePPPOutputFile(pppfile)#, results, satels)
#==============================================================================
## Plot the results up
#==============================================================================
for stn in (results['recPos']['stns']):
snx,stds = get_sinex_recpos(snxfile, stn)
print("Plotting station:",stn)
if multi:
date_str = get_date(pppfile, multi=multi)
else:
date_str = get_date(pppfile)
# Figure 1: results compared to median
fig1,ax1 = plt.subplots(1,1,figsize=(13,8))
plt.title(stn+" Estimated pos - median position_"+date_str[:4]+'_DOY'+date_str[4:7]+'_H'+date_str[7:])
X = results['recPos'][stn]['X'][:] - np.median(results['recPos'][stn]['X'][:])
Y = results['recPos'][stn]['Y'][:] - np.median(results['recPos'][stn]['Y'][:])
Z = results['recPos'][stn]['Z'][:] - np.median(results['recPos'][stn]['Z'][:])
medZ =np.median(results['recPos'][stn]['Z'][:])
ax1.plot(X,label='X') #,results['std1'],'o',alpha=ALPHA,label='East sigma')
ax1.plot(Y,label='Y') #,results['std2'],'o',alpha=ALPHA,label='North sigma')
ax1.plot(Z,label='Z') #,results['std3'],'o',alpha=ALPHA,label='Up sigma')
ax1.legend(loc='best')
ax1.grid(True)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Difference (m)')
figsavename1 = stn+f'_pos__{date_str}.png'
fig1.savefig(figsavename1)
print("Saved the plot:", figsavename1)
# Figure 2: results compared to snx solution
# Plot the difference with respect to the SINEX solution
fig2,ax2 = plt.subplots(1,1,figsize=(13,8))
plt.title(stn+" Estimated pos - sinex position_"+date_str[:4]+'_DOY'+date_str[4:7]+'_H'+date_str[7:])
snxX = np.array(results['recPos'][stn]['X'][:]) - snx[0]
snxY = np.array(results['recPos'][stn]['Y'][:]) - snx[1]
snxZ = np.array(results['recPos'][stn]['Z'][:]) - snx[2]
ax2.plot(snxX,label='X')
ax2.plot(snxY,label='Y')
ax2.plot(snxZ,label='Z')
plt.yscale('symlog', linthresh=0.1)
ax2.grid(True)
ax2.legend(loc='best')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Difference (m)')
#plt.grid('true')
figsavename2 = stn+f'_snx_pos_{date_str}.png'
fig2.savefig(figsavename2)
print("Saved the plot:", figsavename2)
return figsavename1, figsavename2
#==============================================================================
parser = argparse.ArgumentParser(description='Compare clock solutions from processing runs of the pea',
epilog='''\
pppPlot.py:\nthis script takes the output from the pea and plots the differences between the median of the estimated coordinates, and then those provided in the IGS SINEX solution.
To run, first grep the reported receiver positions reported in the station specific trace files to a seperate file
> grep "REC_POS" /data/acs/pea/output/exs/EX01_IF/EX01_IF-ALIC201919900.TRACE > ALIC_201919900.PPP
> pppPlot.py --ppp output/EX01_IF_PPP/ALIC_201919900.PPP --snx proc/exs/products/igs19P2062.snx
you should be able to view the plots ALIC
#==============================================================================
''')
if __name__ == "__main__":
parser.add_argument('--ppp', dest = 'pppfile', default = '', help = "Directory of PPP file")
parser.add_argument('--snx', dest = 'snxfile', default = '', help = "Directory of SNX file")
args = parser.parse_args()
#==============================================================================
if not args.pppfile:
print("Usage: pppPlot.py --ppp <output/EX01_IF_PPP/ALIC_201919900.PPP> --snx <products/igs19P2062.snx>")
print("Outputs: <station>_pos.png, <station>_snx_pos.png")
raise SystemExit
plot_PPP(args.pppfile, args.snxfile) | scripts/backup_old/pppPlot.py | import matplotlib as plb
plb.use('Agg')
# python3 compareClock.py --standard /data/acs/pea/example/EX03/standard/igs20624.clk --test /data/acs/pea/example/EX03/standard/aus20624.clk
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
import math
from numpy import loadtxt
from matplotlib.ticker import MultipleLocator
import argparse
import datetime as dt
import re
#==============================================================================
def parsePPPOutputFile(pppfile): #, ds, satels):
recPOSRGX = re.compile('REC_POS\s+(\w+)\s+(\d+)\s+(-?[\d\.]*)\s+(-?[\d\.]*)\s+(-?[\d\.]*)')
output = {}
count = 0
with open(pppfile) as fstandard:
for line in fstandard:
line = line.rstrip()
if( recPOSRGX.search(line) ):
match = recPOSRGX.search(line)
#print("RECpos match",match[1],match[2],match[3],match[4],match[5])
if ( 'recPos' not in output) :
output['recPos'] = {}
output['recPos']['stns'] = []
if ( match[1] not in output['recPos']['stns'] ):
output['recPos']['stns'].append(match[1])
output['recPos'][match[1]] = {}
output['recPos'][match[1]]['X'] = []
output['recPos'][match[1]]['Y'] = []
output['recPos'][match[1]]['Z'] = []
if ( int(match[2]) == 0):
output['recPos'][match[1]]['X'].append(np.float(match[3])) # = []
elif ( int(match[2]) == 1):
output['recPos'][match[1]]['Y'].append(np.float(match[3])) # = []
elif ( int(match[2]) == 2):
output['recPos'][match[1]]['Z'].append(np.float(match[3]))
return output
def get_sinex_recpos(snxfile, station):
'''
Given the SINEX file and station name, return the X,Y,Z coords of receiver position (REC POS)
Input
snxfile - directory that holds the sinex file - str
station - station name in 4 char format: SSSS - str
Output
X,Y,Z - coords of the receiver position - 3-tuple of floats
STD_X, STD_Y, STD_Z - standard deviation of coords = 3-tuple of floats
'''
sfile = Path(snxfile)
with open(sfile, 'r') as f:
# Exit flag
ex = 0
while ex == 0:
# Read lines until station info is reached:
line = f.readline()
# Once you reach the station POS estimate, find the station of interest
if '+SOLUTION/ESTIMATE' in line:
# Skip next line
line = f.readline()
# Search for station name
station_found = 0
while station_found == 0:
line = f.readline()
if line[14:18] == station:
if line[7:11] == 'STAX':
xval = float(line[47:68])
xstd = float(line[69:-1])
elif line[7:11] == 'STAY':
yval = float(line[47:68])
ystd = float(line[69:-1])
elif line[7:11] == 'STAZ':
zval = float(line[47:68])
zstd = float(line[69:-1])
station_found = 1
ex = 1
if '-SOLUTION/ESTIMATE' in line:
print('>>>>>>>Station Not Found<<<<<<<<<<<<')
station_found = 1
ex = 1
else:
continue
return (xval, yval, zval),(xstd,ystd,zstd)
def get_date(pppfile, multi=False):
'''
From the filename, get out the date so it can be used in naming the figures
'''
f1d = pppfile.split('.')
f1s = f1d[-2].split('/')
f1l = f1s[-1].split('_')
for f in f1l:
try:
idate = int(f)
except ValueError:
if (len(f) == 2) & ('D' in f):
multi = f
continue
try:
idate
except NameError:
idate = int(f1l[1][7:])
if multi:
idate = str(idate) + f'_multi_{multi}'
return str(idate)
def plot_PPP(pppfile, snxfile, multi=False):
'''
Taking in the .PPP file and snx file, produce 2 plots:
* POS - mean(POS)
* POS - Sinex soln
Saved to pwd
'''
results = parsePPPOutputFile(pppfile)#, results, satels)
#==============================================================================
## Plot the results up
#==============================================================================
for stn in (results['recPos']['stns']):
snx,stds = get_sinex_recpos(snxfile, stn)
print("Plotting station:",stn)
if multi:
date_str = get_date(pppfile, multi=multi)
else:
date_str = get_date(pppfile)
# Figure 1: results compared to median
fig1,ax1 = plt.subplots(1,1,figsize=(13,8))
plt.title(stn+" Estimated pos - median position_"+date_str[:4]+'_DOY'+date_str[4:7]+'_H'+date_str[7:])
X = results['recPos'][stn]['X'][:] - np.median(results['recPos'][stn]['X'][:])
Y = results['recPos'][stn]['Y'][:] - np.median(results['recPos'][stn]['Y'][:])
Z = results['recPos'][stn]['Z'][:] - np.median(results['recPos'][stn]['Z'][:])
medZ =np.median(results['recPos'][stn]['Z'][:])
ax1.plot(X,label='X') #,results['std1'],'o',alpha=ALPHA,label='East sigma')
ax1.plot(Y,label='Y') #,results['std2'],'o',alpha=ALPHA,label='North sigma')
ax1.plot(Z,label='Z') #,results['std3'],'o',alpha=ALPHA,label='Up sigma')
ax1.legend(loc='best')
ax1.grid(True)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Difference (m)')
figsavename1 = stn+f'_pos__{date_str}.png'
fig1.savefig(figsavename1)
print("Saved the plot:", figsavename1)
# Figure 2: results compared to snx solution
# Plot the difference with respect to the SINEX solution
fig2,ax2 = plt.subplots(1,1,figsize=(13,8))
plt.title(stn+" Estimated pos - sinex position_"+date_str[:4]+'_DOY'+date_str[4:7]+'_H'+date_str[7:])
snxX = np.array(results['recPos'][stn]['X'][:]) - snx[0]
snxY = np.array(results['recPos'][stn]['Y'][:]) - snx[1]
snxZ = np.array(results['recPos'][stn]['Z'][:]) - snx[2]
ax2.plot(snxX,label='X')
ax2.plot(snxY,label='Y')
ax2.plot(snxZ,label='Z')
plt.yscale('symlog', linthresh=0.1)
ax2.grid(True)
ax2.legend(loc='best')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Difference (m)')
#plt.grid('true')
figsavename2 = stn+f'_snx_pos_{date_str}.png'
fig2.savefig(figsavename2)
print("Saved the plot:", figsavename2)
return figsavename1, figsavename2
#==============================================================================
parser = argparse.ArgumentParser(description='Compare clock solutions from processing runs of the pea',
epilog='''\
pppPlot.py:\nthis script takes the output from the pea and plots the differences between the median of the estimated coordinates, and then those provided in the IGS SINEX solution.
To run, first grep the reported receiver positions reported in the station specific trace files to a seperate file
> grep "REC_POS" /data/acs/pea/output/exs/EX01_IF/EX01_IF-ALIC201919900.TRACE > ALIC_201919900.PPP
> pppPlot.py --ppp output/EX01_IF_PPP/ALIC_201919900.PPP --snx proc/exs/products/igs19P2062.snx
you should be able to view the plots ALIC
#==============================================================================
''')
if __name__ == "__main__":
parser.add_argument('--ppp', dest = 'pppfile', default = '', help = "Directory of PPP file")
parser.add_argument('--snx', dest = 'snxfile', default = '', help = "Directory of SNX file")
args = parser.parse_args()
#==============================================================================
if not args.pppfile:
print("Usage: pppPlot.py --ppp <output/EX01_IF_PPP/ALIC_201919900.PPP> --snx <products/igs19P2062.snx>")
print("Outputs: <station>_pos.png, <station>_snx_pos.png")
raise SystemExit
plot_PPP(args.pppfile, args.snxfile) | 0.304559 | 0.434041 |
from barbican.common import utils
from barbican.plugin.interface import certificate_manager as cert
LOG = utils.getLogger(__name__)
MSEC_UNTIL_CHECK_STATUS = 5000
class SimpleCertificatePlugin(cert.CertificatePluginBase):
"""Simple/default certificate plugin."""
def get_default_ca_name(self):
return "Simple CA"
def get_default_signing_cert(self):
return "XXXXXXXXXXXXXXXXX"
def get_default_intermediates(self):
return "YYYYYYYYYYYYYYYY"
def issue_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Create the initial order with CA
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking issue_certificate_request()')
return cert.ResultDTO(
cert.CertificateStatus.WAITING_FOR_CA,
retry_msec=MSEC_UNTIL_CHECK_STATUS)
def modify_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Update the order meta-data
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking modify_certificate_request()')
return cert.ResultDTO(cert.CertificateStatus.WAITING_FOR_CA)
def cancel_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Cancel the order
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking cancel_certificate_request()')
return cert.ResultDTO(cert.CertificateStatus.REQUEST_CANCELED)
def check_certificate_status(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Check status of the order
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking check_certificate_status()')
return cert.ResultDTO(cert.CertificateStatus.CERTIFICATE_GENERATED)
def supports(self, certificate_spec):
"""Indicates whether the plugin supports the certificate type.
:param certificate_spec: Contains details on the certificate to
generate the certificate order
:returns: boolean indicating if the plugin supports the certificate
type
"""
return True
def supported_request_types(self):
"""Returns the request types supported by this plugin.
:returns: dict containing Barbican-core defined request types
supported by this plugin.
"""
return [cert.CertificateRequestType.CUSTOM_REQUEST,
cert.CertificateRequestType.SIMPLE_CMC_REQUEST,
cert.CertificateRequestType.FULL_CMC_REQUEST,
cert.CertificateRequestType.STORED_KEY_REQUEST]
class SimpleCertificateEventPlugin(cert.CertificateEventPluginBase):
"""Simple/default certificate event plugin."""
def notify_certificate_is_ready(
self, project_id, order_ref, container_ref):
"""Notify that a certificate has been generated and is ready to use.
:param project_id: Project ID associated with this certificate
:param order_ref: HATEOAS reference URI to the submitted Barbican Order
:param container_ref: HATEOAS reference URI to the Container storing
the certificate
:returns: None
"""
LOG.info('Invoking notify_certificate_is_ready()')
def notify_ca_is_unavailable(
self, project_id, order_ref, error_msg, retry_in_msec):
"""Notify that the certificate authority (CA) isn't available.
:param project_id: Project ID associated with this order
:param order_ref: HATEOAS reference URI to the submitted Barbican Order
:param error_msg: Error message if it is available
:param retry_in_msec: Delay before attempting to talk to the CA again.
If this is 0, then no attempt will be made.
:returns: None
"""
LOG.info('Invoking notify_ca_is_unavailable()') | barbican/plugin/simple_certificate_manager.py | from barbican.common import utils
from barbican.plugin.interface import certificate_manager as cert
LOG = utils.getLogger(__name__)
MSEC_UNTIL_CHECK_STATUS = 5000
class SimpleCertificatePlugin(cert.CertificatePluginBase):
"""Simple/default certificate plugin."""
def get_default_ca_name(self):
return "Simple CA"
def get_default_signing_cert(self):
return "XXXXXXXXXXXXXXXXX"
def get_default_intermediates(self):
return "YYYYYYYYYYYYYYYY"
def issue_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Create the initial order with CA
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking issue_certificate_request()')
return cert.ResultDTO(
cert.CertificateStatus.WAITING_FOR_CA,
retry_msec=MSEC_UNTIL_CHECK_STATUS)
def modify_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Update the order meta-data
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking modify_certificate_request()')
return cert.ResultDTO(cert.CertificateStatus.WAITING_FOR_CA)
def cancel_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Cancel the order
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking cancel_certificate_request()')
return cert.ResultDTO(cert.CertificateStatus.REQUEST_CANCELED)
def check_certificate_status(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Check status of the order
:param order_id: ID associated with the order
:param order_meta: Dict of meta-data associated with the order.
:param plugin_meta: Plugin meta-data previously set by calls to
this plugin. Plugins may also update/add
information here which Barbican will persist
on their behalf.
:param barbican_meta_dto: additional data needed to process order.
:returns: A :class:`ResultDTO` instance containing the result
populated by the plugin implementation
:rtype: :class:`ResultDTO`
"""
LOG.info('Invoking check_certificate_status()')
return cert.ResultDTO(cert.CertificateStatus.CERTIFICATE_GENERATED)
def supports(self, certificate_spec):
"""Indicates whether the plugin supports the certificate type.
:param certificate_spec: Contains details on the certificate to
generate the certificate order
:returns: boolean indicating if the plugin supports the certificate
type
"""
return True
def supported_request_types(self):
"""Returns the request types supported by this plugin.
:returns: dict containing Barbican-core defined request types
supported by this plugin.
"""
return [cert.CertificateRequestType.CUSTOM_REQUEST,
cert.CertificateRequestType.SIMPLE_CMC_REQUEST,
cert.CertificateRequestType.FULL_CMC_REQUEST,
cert.CertificateRequestType.STORED_KEY_REQUEST]
class SimpleCertificateEventPlugin(cert.CertificateEventPluginBase):
"""Simple/default certificate event plugin."""
def notify_certificate_is_ready(
self, project_id, order_ref, container_ref):
"""Notify that a certificate has been generated and is ready to use.
:param project_id: Project ID associated with this certificate
:param order_ref: HATEOAS reference URI to the submitted Barbican Order
:param container_ref: HATEOAS reference URI to the Container storing
the certificate
:returns: None
"""
LOG.info('Invoking notify_certificate_is_ready()')
def notify_ca_is_unavailable(
self, project_id, order_ref, error_msg, retry_in_msec):
"""Notify that the certificate authority (CA) isn't available.
:param project_id: Project ID associated with this order
:param order_ref: HATEOAS reference URI to the submitted Barbican Order
:param error_msg: Error message if it is available
:param retry_in_msec: Delay before attempting to talk to the CA again.
If this is 0, then no attempt will be made.
:returns: None
"""
LOG.info('Invoking notify_ca_is_unavailable()') | 0.824037 | 0.347454 |
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split, desc, col
from pyspark.sql.types import StructType
if __name__ == "__main__":
"""if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])"""
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
"""lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()"""
#"ID","language","Date","source","len","likes","RTs","Hashtags","Usernames","Userid","name","Place","followers","friends"
user_schema = StructType().add("ID", "string").add("language", "string").add("Date", "string").add("source", "string").add("len", "string").add("likes", "integer").add("RTs", "integer").add("Hashtags", "string").add("Usernames", "string").add("Userid", "string").add("name", "string").add("Place", "string").add("followers", "integer").add("friends", "integer")
lines = spark\
.readStream\
.option("sep", ";")\
.schema(user_schema)\
.csv("hdfs://localhost:9000/stream")
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
lines.name, lines.friends/lines.followers
)
# Generate running word count
wordCounts = words.groupBy(lines.name).agg({"(friends / followers)": "max" }).limit(5)
wordCounts = wordCounts.select(col("name"), col("max((friends / followers))").alias("FRRatio"))
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination(120) | adminmgr/media/code/A3/task2/BD_275_950_1346.py | from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split, desc, col
from pyspark.sql.types import StructType
if __name__ == "__main__":
"""if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])"""
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
"""lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()"""
#"ID","language","Date","source","len","likes","RTs","Hashtags","Usernames","Userid","name","Place","followers","friends"
user_schema = StructType().add("ID", "string").add("language", "string").add("Date", "string").add("source", "string").add("len", "string").add("likes", "integer").add("RTs", "integer").add("Hashtags", "string").add("Usernames", "string").add("Userid", "string").add("name", "string").add("Place", "string").add("followers", "integer").add("friends", "integer")
lines = spark\
.readStream\
.option("sep", ";")\
.schema(user_schema)\
.csv("hdfs://localhost:9000/stream")
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
lines.name, lines.friends/lines.followers
)
# Generate running word count
wordCounts = words.groupBy(lines.name).agg({"(friends / followers)": "max" }).limit(5)
wordCounts = wordCounts.select(col("name"), col("max((friends / followers))").alias("FRRatio"))
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination(120) | 0.326701 | 0.164114 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class WriteOffLeftRightDetailOpenApiDTO(object):
def __init__(self):
self._inst_id = None
self._ip_role_id = None
self._left_apply_write_off_amt = None
self._left_bill_no = None
self._left_write_off_bill_type = None
self._right_apply_write_off_amt = None
self._right_bill_no = None
self._right_write_off_bill_type = None
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def left_apply_write_off_amt(self):
return self._left_apply_write_off_amt
@left_apply_write_off_amt.setter
def left_apply_write_off_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._left_apply_write_off_amt = value
else:
self._left_apply_write_off_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def left_bill_no(self):
return self._left_bill_no
@left_bill_no.setter
def left_bill_no(self, value):
self._left_bill_no = value
@property
def left_write_off_bill_type(self):
return self._left_write_off_bill_type
@left_write_off_bill_type.setter
def left_write_off_bill_type(self, value):
self._left_write_off_bill_type = value
@property
def right_apply_write_off_amt(self):
return self._right_apply_write_off_amt
@right_apply_write_off_amt.setter
def right_apply_write_off_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._right_apply_write_off_amt = value
else:
self._right_apply_write_off_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def right_bill_no(self):
return self._right_bill_no
@right_bill_no.setter
def right_bill_no(self, value):
self._right_bill_no = value
@property
def right_write_off_bill_type(self):
return self._right_write_off_bill_type
@right_write_off_bill_type.setter
def right_write_off_bill_type(self, value):
self._right_write_off_bill_type = value
def to_alipay_dict(self):
params = dict()
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.left_apply_write_off_amt:
if hasattr(self.left_apply_write_off_amt, 'to_alipay_dict'):
params['left_apply_write_off_amt'] = self.left_apply_write_off_amt.to_alipay_dict()
else:
params['left_apply_write_off_amt'] = self.left_apply_write_off_amt
if self.left_bill_no:
if hasattr(self.left_bill_no, 'to_alipay_dict'):
params['left_bill_no'] = self.left_bill_no.to_alipay_dict()
else:
params['left_bill_no'] = self.left_bill_no
if self.left_write_off_bill_type:
if hasattr(self.left_write_off_bill_type, 'to_alipay_dict'):
params['left_write_off_bill_type'] = self.left_write_off_bill_type.to_alipay_dict()
else:
params['left_write_off_bill_type'] = self.left_write_off_bill_type
if self.right_apply_write_off_amt:
if hasattr(self.right_apply_write_off_amt, 'to_alipay_dict'):
params['right_apply_write_off_amt'] = self.right_apply_write_off_amt.to_alipay_dict()
else:
params['right_apply_write_off_amt'] = self.right_apply_write_off_amt
if self.right_bill_no:
if hasattr(self.right_bill_no, 'to_alipay_dict'):
params['right_bill_no'] = self.right_bill_no.to_alipay_dict()
else:
params['right_bill_no'] = self.right_bill_no
if self.right_write_off_bill_type:
if hasattr(self.right_write_off_bill_type, 'to_alipay_dict'):
params['right_write_off_bill_type'] = self.right_write_off_bill_type.to_alipay_dict()
else:
params['right_write_off_bill_type'] = self.right_write_off_bill_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = WriteOffLeftRightDetailOpenApiDTO()
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'left_apply_write_off_amt' in d:
o.left_apply_write_off_amt = d['left_apply_write_off_amt']
if 'left_bill_no' in d:
o.left_bill_no = d['left_bill_no']
if 'left_write_off_bill_type' in d:
o.left_write_off_bill_type = d['left_write_off_bill_type']
if 'right_apply_write_off_amt' in d:
o.right_apply_write_off_amt = d['right_apply_write_off_amt']
if 'right_bill_no' in d:
o.right_bill_no = d['right_bill_no']
if 'right_write_off_bill_type' in d:
o.right_write_off_bill_type = d['right_write_off_bill_type']
return o | alipay/aop/api/domain/WriteOffLeftRightDetailOpenApiDTO.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class WriteOffLeftRightDetailOpenApiDTO(object):
def __init__(self):
self._inst_id = None
self._ip_role_id = None
self._left_apply_write_off_amt = None
self._left_bill_no = None
self._left_write_off_bill_type = None
self._right_apply_write_off_amt = None
self._right_bill_no = None
self._right_write_off_bill_type = None
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def left_apply_write_off_amt(self):
return self._left_apply_write_off_amt
@left_apply_write_off_amt.setter
def left_apply_write_off_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._left_apply_write_off_amt = value
else:
self._left_apply_write_off_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def left_bill_no(self):
return self._left_bill_no
@left_bill_no.setter
def left_bill_no(self, value):
self._left_bill_no = value
@property
def left_write_off_bill_type(self):
return self._left_write_off_bill_type
@left_write_off_bill_type.setter
def left_write_off_bill_type(self, value):
self._left_write_off_bill_type = value
@property
def right_apply_write_off_amt(self):
return self._right_apply_write_off_amt
@right_apply_write_off_amt.setter
def right_apply_write_off_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._right_apply_write_off_amt = value
else:
self._right_apply_write_off_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def right_bill_no(self):
return self._right_bill_no
@right_bill_no.setter
def right_bill_no(self, value):
self._right_bill_no = value
@property
def right_write_off_bill_type(self):
return self._right_write_off_bill_type
@right_write_off_bill_type.setter
def right_write_off_bill_type(self, value):
self._right_write_off_bill_type = value
def to_alipay_dict(self):
params = dict()
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.left_apply_write_off_amt:
if hasattr(self.left_apply_write_off_amt, 'to_alipay_dict'):
params['left_apply_write_off_amt'] = self.left_apply_write_off_amt.to_alipay_dict()
else:
params['left_apply_write_off_amt'] = self.left_apply_write_off_amt
if self.left_bill_no:
if hasattr(self.left_bill_no, 'to_alipay_dict'):
params['left_bill_no'] = self.left_bill_no.to_alipay_dict()
else:
params['left_bill_no'] = self.left_bill_no
if self.left_write_off_bill_type:
if hasattr(self.left_write_off_bill_type, 'to_alipay_dict'):
params['left_write_off_bill_type'] = self.left_write_off_bill_type.to_alipay_dict()
else:
params['left_write_off_bill_type'] = self.left_write_off_bill_type
if self.right_apply_write_off_amt:
if hasattr(self.right_apply_write_off_amt, 'to_alipay_dict'):
params['right_apply_write_off_amt'] = self.right_apply_write_off_amt.to_alipay_dict()
else:
params['right_apply_write_off_amt'] = self.right_apply_write_off_amt
if self.right_bill_no:
if hasattr(self.right_bill_no, 'to_alipay_dict'):
params['right_bill_no'] = self.right_bill_no.to_alipay_dict()
else:
params['right_bill_no'] = self.right_bill_no
if self.right_write_off_bill_type:
if hasattr(self.right_write_off_bill_type, 'to_alipay_dict'):
params['right_write_off_bill_type'] = self.right_write_off_bill_type.to_alipay_dict()
else:
params['right_write_off_bill_type'] = self.right_write_off_bill_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = WriteOffLeftRightDetailOpenApiDTO()
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'left_apply_write_off_amt' in d:
o.left_apply_write_off_amt = d['left_apply_write_off_amt']
if 'left_bill_no' in d:
o.left_bill_no = d['left_bill_no']
if 'left_write_off_bill_type' in d:
o.left_write_off_bill_type = d['left_write_off_bill_type']
if 'right_apply_write_off_amt' in d:
o.right_apply_write_off_amt = d['right_apply_write_off_amt']
if 'right_bill_no' in d:
o.right_bill_no = d['right_bill_no']
if 'right_write_off_bill_type' in d:
o.right_write_off_bill_type = d['right_write_off_bill_type']
return o | 0.678647 | 0.193967 |
from Firefly import logging
from Firefly.components.zwave.device_types.switch import ZwaveSwitch
from Firefly.const import ACTION_OFF, ACTION_ON, SWITCH
from Firefly.services.alexa.alexa_const import ALEXA_SMARTPLUG
TITLE = 'Aeotec Smart Switch 5'
BATTERY = 'battery'
ALARM = 'alarm'
POWER_METER = 'power_meter'
VOLTAGE_METER = 'voltage_meter'
CURRENT = 'power_current'
CURRENT_ENERGY_READING = 'current_energy_reading'
PREVIOUS_ENERGY_READING = 'previous_energy_reading'
VOLTAGE = 'voltage'
WATTS = 'watts'
COMMANDS = [ACTION_OFF, ACTION_ON]
REQUESTS = [SWITCH, CURRENT, VOLTAGE, WATTS]
INITIAL_VALUES = {}
CAPABILITIES = {
POWER_METER: True,
SWITCH: True,
}
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
switch = ZwaveAeotecSwitch5(firefly, package, **kwargs)
return firefly.install_component(switch)
class ZwaveAeotecSwitch5(ZwaveSwitch):
def __init__(self, firefly, package, **kwargs):
initial_values = INITIAL_VALUES
if kwargs.get('initial_values') is not None:
initial_values_updated = INITIAL_VALUES.copy()
initial_values_updated.update(kwargs.get('initial_values'))
initial_values = initial_values_updated
kwargs.update({
'initial_values': initial_values,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, capabilities=CAPABILITIES, **kwargs)
self.set_alexa_categories(ALEXA_SMARTPLUG)
def update_device_config(self, **kwargs):
# TODO: Pull these out into config values
# TODO Copy this retry logic to all zwave devices
"""
Updated the devices to the desired config params. This will be useful to make new default devices configs.
Args:
**kwargs ():
"""
# REF:
# https://aeotec.freshdesk.com/helpdesk/attachments/6009584529
# https://github.com/OpenZWave/open-zwave/blob/master/config/aeotec/dsc06106.xml
# Configure what kind of reports to send and when to send them.
# TODO: Document these
r1 = (101, 4)
r2 = (102, 4)
r3 = (111, 30)
# What kind of report to send
report_idx = 80
report_node = 0
report_hail = 1
report_basic = 2
report_type = report_basic
report = (report_idx, report_basic)
# The three options below keep the device from reporting every 10 seconds. Mainly param 90 that enables only send report when triggered.
# Min change in watts to trigger report (see below) [default 50]
min_change_watts_idx = 91
min_change_watts_val = 50
min_change_watts = (min_change_watts_idx, min_change_watts_val)
# Min change in watts % to trigger report (see below) [default 10]
min_change_watts_pct_idx = 92
min_change_watts_pct_val = 10
min_change_watts_pct = (min_change_watts_pct_idx, min_change_watts_pct_val)
# Send wattage reports only when wattage changes by default 10%
report_on_wattage_change_idx = 90
report_on_wattage_change = (report_on_wattage_change_idx, 1, 1)
successful = self.verify_set_zwave_params([
min_change_watts,
min_change_watts_pct,
r1,
r2,
r3,
report,
report_on_wattage_change
])
self._update_try_count += 1
self._config_updated = successful | Firefly/components/zwave/aeotec/dsc06106_smart_energy_switch.py | from Firefly import logging
from Firefly.components.zwave.device_types.switch import ZwaveSwitch
from Firefly.const import ACTION_OFF, ACTION_ON, SWITCH
from Firefly.services.alexa.alexa_const import ALEXA_SMARTPLUG
TITLE = 'Aeotec Smart Switch 5'
BATTERY = 'battery'
ALARM = 'alarm'
POWER_METER = 'power_meter'
VOLTAGE_METER = 'voltage_meter'
CURRENT = 'power_current'
CURRENT_ENERGY_READING = 'current_energy_reading'
PREVIOUS_ENERGY_READING = 'previous_energy_reading'
VOLTAGE = 'voltage'
WATTS = 'watts'
COMMANDS = [ACTION_OFF, ACTION_ON]
REQUESTS = [SWITCH, CURRENT, VOLTAGE, WATTS]
INITIAL_VALUES = {}
CAPABILITIES = {
POWER_METER: True,
SWITCH: True,
}
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
switch = ZwaveAeotecSwitch5(firefly, package, **kwargs)
return firefly.install_component(switch)
class ZwaveAeotecSwitch5(ZwaveSwitch):
def __init__(self, firefly, package, **kwargs):
initial_values = INITIAL_VALUES
if kwargs.get('initial_values') is not None:
initial_values_updated = INITIAL_VALUES.copy()
initial_values_updated.update(kwargs.get('initial_values'))
initial_values = initial_values_updated
kwargs.update({
'initial_values': initial_values,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, capabilities=CAPABILITIES, **kwargs)
self.set_alexa_categories(ALEXA_SMARTPLUG)
def update_device_config(self, **kwargs):
# TODO: Pull these out into config values
# TODO Copy this retry logic to all zwave devices
"""
Updated the devices to the desired config params. This will be useful to make new default devices configs.
Args:
**kwargs ():
"""
# REF:
# https://aeotec.freshdesk.com/helpdesk/attachments/6009584529
# https://github.com/OpenZWave/open-zwave/blob/master/config/aeotec/dsc06106.xml
# Configure what kind of reports to send and when to send them.
# TODO: Document these
r1 = (101, 4)
r2 = (102, 4)
r3 = (111, 30)
# What kind of report to send
report_idx = 80
report_node = 0
report_hail = 1
report_basic = 2
report_type = report_basic
report = (report_idx, report_basic)
# The three options below keep the device from reporting every 10 seconds. Mainly param 90 that enables only send report when triggered.
# Min change in watts to trigger report (see below) [default 50]
min_change_watts_idx = 91
min_change_watts_val = 50
min_change_watts = (min_change_watts_idx, min_change_watts_val)
# Min change in watts % to trigger report (see below) [default 10]
min_change_watts_pct_idx = 92
min_change_watts_pct_val = 10
min_change_watts_pct = (min_change_watts_pct_idx, min_change_watts_pct_val)
# Send wattage reports only when wattage changes by default 10%
report_on_wattage_change_idx = 90
report_on_wattage_change = (report_on_wattage_change_idx, 1, 1)
successful = self.verify_set_zwave_params([
min_change_watts,
min_change_watts_pct,
r1,
r2,
r3,
report,
report_on_wattage_change
])
self._update_try_count += 1
self._config_updated = successful | 0.449876 | 0.169097 |
import asyncio
import binascii
from collections import OrderedDict
import copy
import logging
import RFXtrx as rfxtrxmod
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
DEGREE,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_STOP,
LENGTH_MILLIMETERS,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
TIME_HOURS,
UV_INDEX,
VOLT,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_EVENT,
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_DEBUG,
CONF_FIRE_EVENT,
CONF_OFF_DELAY,
CONF_REMOVE_DEVICE,
CONF_SIGNAL_REPETITIONS,
DEVICE_PACKET_TYPE_LIGHTING4,
EVENT_RFXTRX_EVENT,
SERVICE_SEND,
)
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
SIGNAL_EVENT = f"{DOMAIN}_event"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", PERCENTAGE),
("Barometer", PRESSURE_HPA),
("Wind direction", DEGREE),
("Rain rate", f"{LENGTH_MILLIMETERS}/{TIME_HOURS}"),
("Energy usage", POWER_WATT),
("Total usage", ENERGY_KILO_WATT_HOUR),
("Sound", None),
("Sensor Status", None),
("Counter value", "count"),
("UV", UV_INDEX),
("Humidity status", None),
("Forecast", None),
("Forecast numeric", None),
("Rain total", LENGTH_MILLIMETERS),
("Wind average speed", SPEED_METERS_PER_SECOND),
("Wind gust", SPEED_METERS_PER_SECOND),
("Chill", TEMP_CELSIUS),
("Count", "count"),
("Current Ch. 1", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 2", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 3", ELECTRICAL_CURRENT_AMPERE),
("Voltage", VOLT),
("Current", ELECTRICAL_CURRENT_AMPERE),
("Battery numeric", PERCENTAGE),
("Rssi numeric", SIGNAL_STRENGTH_DECIBELS_MILLIWATT),
]
)
_LOGGER = logging.getLogger(__name__)
DATA_RFXOBJECT = "rfxobject"
DATA_LISTENER = "ha_stop"
def _bytearray_string(data):
val = cv.string(data)
try:
return bytearray.fromhex(val)
except ValueError as err:
raise vol.Invalid(
"Data must be a hex string with multiple of two characters"
) from err
def _ensure_device(value):
if value is None:
return DEVICE_DATA_SCHEMA({})
return DEVICE_DATA_SCHEMA(value)
SERVICE_SEND_SCHEMA = vol.Schema({ATTR_EVENT: _bytearray_string})
DEVICE_DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta, lambda value: value.total_seconds()
),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=1): cv.positive_int,
}
)
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG): cv.boolean,
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {cv.string: _ensure_device},
},
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.deprecated(CONF_DEBUG), vol.Any(DEVICE_SCHEMA, PORT_SCHEMA))},
extra=vol.ALLOW_EXTRA,
)
DOMAINS = ["switch", "sensor", "light", "binary_sensor", "cover"]
async def async_setup(hass, config):
"""Set up the RFXtrx component."""
if DOMAIN not in config:
return True
data = {
CONF_HOST: config[DOMAIN].get(CONF_HOST),
CONF_PORT: config[DOMAIN].get(CONF_PORT),
CONF_DEVICE: config[DOMAIN].get(CONF_DEVICE),
CONF_AUTOMATIC_ADD: config[DOMAIN].get(CONF_AUTOMATIC_ADD),
CONF_DEVICES: config[DOMAIN][CONF_DEVICES],
}
# Read device_id from the event code add to the data that will end up in the ConfigEntry
for event_code, event_config in data[CONF_DEVICES].items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
event_config[CONF_DEVICE_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=data,
)
)
return True
async def async_setup_entry(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
hass.data.setdefault(DOMAIN, {})
await async_setup_internal(hass, entry)
for domain in DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
return True
async def async_unload_entry(hass, entry: config_entries.ConfigEntry):
"""Unload RFXtrx component."""
if not all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in DOMAINS
]
)
):
return False
hass.services.async_remove(DOMAIN, SERVICE_SEND)
listener = hass.data[DOMAIN][DATA_LISTENER]
listener()
rfx_object = hass.data[DOMAIN][DATA_RFXOBJECT]
await hass.async_add_executor_job(rfx_object.close_connection)
return True
def _create_rfx(config):
"""Construct a rfx object based on config."""
if config[CONF_PORT] is not None:
# If port is set then we create a TCP connection
rfx = rfxtrxmod.Connect(
(config[CONF_HOST], config[CONF_PORT]),
None,
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx = rfxtrxmod.Connect(config[CONF_DEVICE], None)
return rfx
def _get_device_lookup(devices):
"""Get a lookup structure for devices."""
lookup = {}
for event_code, event_config in devices.items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
lookup[device_id] = event_config
return lookup
async def async_setup_internal(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
config = entry.data
# Initialize library
try:
async with async_timeout.timeout(5):
rfx_object = await hass.async_add_executor_job(_create_rfx, config)
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
# Setup some per device config
devices = _get_device_lookup(config[CONF_DEVICES])
# Declare the Handle event
@callback
def async_handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
event_data = {
"packet_type": event.device.packettype,
"sub_type": event.device.subtype,
"type_string": event.device.type_string,
"id_string": event.device.id_string,
"data": binascii.hexlify(event.data).decode("ASCII"),
"values": getattr(event, "values", None),
}
_LOGGER.debug("Receive RFXCOM event: %s", event_data)
data_bits = get_device_data_bits(event.device, devices)
device_id = get_device_id(event.device, data_bits=data_bits)
# Register new devices
if config[CONF_AUTOMATIC_ADD] and device_id not in devices:
_add_device(event, device_id)
# Callback to HA registered components.
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_EVENT, event, device_id)
# Signal event to any other listeners
fire_event = devices.get(device_id, {}).get(CONF_FIRE_EVENT)
if fire_event:
hass.bus.async_fire(EVENT_RFXTRX_EVENT, event_data)
@callback
def _add_device(event, device_id):
"""Add a device to config entry."""
config = DEVICE_DATA_SCHEMA({})
config[CONF_DEVICE_ID] = device_id
data = entry.data.copy()
data[CONF_DEVICES] = copy.deepcopy(entry.data[CONF_DEVICES])
event_code = binascii.hexlify(event.data).decode("ASCII")
data[CONF_DEVICES][event_code] = config
hass.config_entries.async_update_entry(entry=entry, data=data)
devices[device_id] = config
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
rfx_object.close_connection()
listener = hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
hass.data[DOMAIN][DATA_LISTENER] = listener
hass.data[DOMAIN][DATA_RFXOBJECT] = rfx_object
rfx_object.event_callback = lambda event: hass.add_job(async_handle_receive, event)
def send(call):
event = call.data[ATTR_EVENT]
rfx_object.transport.send(event)
hass.services.async_register(DOMAIN, SERVICE_SEND, send, schema=SERVICE_SEND_SCHEMA)
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
obj.data = binarypacket
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
if nb_data_bits is None:
return
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data) - 1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
def get_device_data_bits(device, devices):
"""Deduce data bits for device based on a cache of device bits."""
data_bits = None
if device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
for device_id, entity_config in devices.items():
bits = entity_config.get(CONF_DATA_BITS)
if get_device_id(device, bits) == device_id:
data_bits = bits
break
return data_bits
def find_possible_pt2262_device(device_ids, device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id in device_ids:
if len(dev_id) == len(device_id):
size = None
for i, (char1, char2) in enumerate(zip(dev_id, device_id)):
if char1 != char2:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info(
"rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:],
device_id[-size:],
)
return dev_id
return None
def get_device_id(device, data_bits=None):
"""Calculate a device id for device."""
id_string = device.id_string
if data_bits and device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
masked_id = get_pt2262_deviceid(id_string, data_bits)
if masked_id:
id_string = masked_id.decode("ASCII")
return (f"{device.packettype:x}", f"{device.subtype:x}", id_string)
class RfxtrxEntity(RestoreEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, event=None):
"""Initialize the device."""
self._name = f"{device.type_string} {device.id_string}"
self._device = device
self._event = event
self._device_id = device_id
self._unique_id = "_".join(x for x in self._device_id)
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
if self._event:
self._apply_event(self._event)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_EVENT, self._handle_event
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
f"{DOMAIN}_{CONF_REMOVE_DEVICE}_{self._device_id}", self.async_remove
)
)
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self._event:
return None
return {ATTR_EVENT: "".join(f"{x:02x}" for x in self._event.data)}
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def unique_id(self):
"""Return unique identifier of remote device."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, *self._device_id)},
"name": f"{self._device.type_string} {self._device.id_string}",
"model": self._device.type_string,
}
def _apply_event(self, event):
"""Apply a received event."""
self._event = event
@callback
def _handle_event(self, event, device_id):
"""Handle a reception of data, overridden by other classes."""
class RfxtrxCommandEntity(RfxtrxEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, signal_repetitions=1, event=None):
"""Initialzie a switch or light device."""
super().__init__(device, device_id, event=event)
self.signal_repetitions = signal_repetitions
self._state = None
async def _async_send(self, fun, *args):
rfx_object = self.hass.data[DOMAIN][DATA_RFXOBJECT]
for _ in range(self.signal_repetitions):
await self.hass.async_add_executor_job(fun, rfx_object.transport, *args) | homeassistant/components/rfxtrx/__init__.py | import asyncio
import binascii
from collections import OrderedDict
import copy
import logging
import RFXtrx as rfxtrxmod
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
DEGREE,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_STOP,
LENGTH_MILLIMETERS,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
TIME_HOURS,
UV_INDEX,
VOLT,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_EVENT,
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_DEBUG,
CONF_FIRE_EVENT,
CONF_OFF_DELAY,
CONF_REMOVE_DEVICE,
CONF_SIGNAL_REPETITIONS,
DEVICE_PACKET_TYPE_LIGHTING4,
EVENT_RFXTRX_EVENT,
SERVICE_SEND,
)
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
SIGNAL_EVENT = f"{DOMAIN}_event"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", PERCENTAGE),
("Barometer", PRESSURE_HPA),
("Wind direction", DEGREE),
("Rain rate", f"{LENGTH_MILLIMETERS}/{TIME_HOURS}"),
("Energy usage", POWER_WATT),
("Total usage", ENERGY_KILO_WATT_HOUR),
("Sound", None),
("Sensor Status", None),
("Counter value", "count"),
("UV", UV_INDEX),
("Humidity status", None),
("Forecast", None),
("Forecast numeric", None),
("Rain total", LENGTH_MILLIMETERS),
("Wind average speed", SPEED_METERS_PER_SECOND),
("Wind gust", SPEED_METERS_PER_SECOND),
("Chill", TEMP_CELSIUS),
("Count", "count"),
("Current Ch. 1", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 2", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 3", ELECTRICAL_CURRENT_AMPERE),
("Voltage", VOLT),
("Current", ELECTRICAL_CURRENT_AMPERE),
("Battery numeric", PERCENTAGE),
("Rssi numeric", SIGNAL_STRENGTH_DECIBELS_MILLIWATT),
]
)
_LOGGER = logging.getLogger(__name__)
DATA_RFXOBJECT = "rfxobject"
DATA_LISTENER = "ha_stop"
def _bytearray_string(data):
val = cv.string(data)
try:
return bytearray.fromhex(val)
except ValueError as err:
raise vol.Invalid(
"Data must be a hex string with multiple of two characters"
) from err
def _ensure_device(value):
if value is None:
return DEVICE_DATA_SCHEMA({})
return DEVICE_DATA_SCHEMA(value)
SERVICE_SEND_SCHEMA = vol.Schema({ATTR_EVENT: _bytearray_string})
DEVICE_DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta, lambda value: value.total_seconds()
),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=1): cv.positive_int,
}
)
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG): cv.boolean,
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {cv.string: _ensure_device},
},
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.deprecated(CONF_DEBUG), vol.Any(DEVICE_SCHEMA, PORT_SCHEMA))},
extra=vol.ALLOW_EXTRA,
)
DOMAINS = ["switch", "sensor", "light", "binary_sensor", "cover"]
async def async_setup(hass, config):
"""Set up the RFXtrx component."""
if DOMAIN not in config:
return True
data = {
CONF_HOST: config[DOMAIN].get(CONF_HOST),
CONF_PORT: config[DOMAIN].get(CONF_PORT),
CONF_DEVICE: config[DOMAIN].get(CONF_DEVICE),
CONF_AUTOMATIC_ADD: config[DOMAIN].get(CONF_AUTOMATIC_ADD),
CONF_DEVICES: config[DOMAIN][CONF_DEVICES],
}
# Read device_id from the event code add to the data that will end up in the ConfigEntry
for event_code, event_config in data[CONF_DEVICES].items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
event_config[CONF_DEVICE_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=data,
)
)
return True
async def async_setup_entry(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
hass.data.setdefault(DOMAIN, {})
await async_setup_internal(hass, entry)
for domain in DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
return True
async def async_unload_entry(hass, entry: config_entries.ConfigEntry):
"""Unload RFXtrx component."""
if not all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in DOMAINS
]
)
):
return False
hass.services.async_remove(DOMAIN, SERVICE_SEND)
listener = hass.data[DOMAIN][DATA_LISTENER]
listener()
rfx_object = hass.data[DOMAIN][DATA_RFXOBJECT]
await hass.async_add_executor_job(rfx_object.close_connection)
return True
def _create_rfx(config):
"""Construct a rfx object based on config."""
if config[CONF_PORT] is not None:
# If port is set then we create a TCP connection
rfx = rfxtrxmod.Connect(
(config[CONF_HOST], config[CONF_PORT]),
None,
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx = rfxtrxmod.Connect(config[CONF_DEVICE], None)
return rfx
def _get_device_lookup(devices):
"""Get a lookup structure for devices."""
lookup = {}
for event_code, event_config in devices.items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
lookup[device_id] = event_config
return lookup
async def async_setup_internal(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
config = entry.data
# Initialize library
try:
async with async_timeout.timeout(5):
rfx_object = await hass.async_add_executor_job(_create_rfx, config)
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
# Setup some per device config
devices = _get_device_lookup(config[CONF_DEVICES])
# Declare the Handle event
@callback
def async_handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
event_data = {
"packet_type": event.device.packettype,
"sub_type": event.device.subtype,
"type_string": event.device.type_string,
"id_string": event.device.id_string,
"data": binascii.hexlify(event.data).decode("ASCII"),
"values": getattr(event, "values", None),
}
_LOGGER.debug("Receive RFXCOM event: %s", event_data)
data_bits = get_device_data_bits(event.device, devices)
device_id = get_device_id(event.device, data_bits=data_bits)
# Register new devices
if config[CONF_AUTOMATIC_ADD] and device_id not in devices:
_add_device(event, device_id)
# Callback to HA registered components.
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_EVENT, event, device_id)
# Signal event to any other listeners
fire_event = devices.get(device_id, {}).get(CONF_FIRE_EVENT)
if fire_event:
hass.bus.async_fire(EVENT_RFXTRX_EVENT, event_data)
@callback
def _add_device(event, device_id):
"""Add a device to config entry."""
config = DEVICE_DATA_SCHEMA({})
config[CONF_DEVICE_ID] = device_id
data = entry.data.copy()
data[CONF_DEVICES] = copy.deepcopy(entry.data[CONF_DEVICES])
event_code = binascii.hexlify(event.data).decode("ASCII")
data[CONF_DEVICES][event_code] = config
hass.config_entries.async_update_entry(entry=entry, data=data)
devices[device_id] = config
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
rfx_object.close_connection()
listener = hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
hass.data[DOMAIN][DATA_LISTENER] = listener
hass.data[DOMAIN][DATA_RFXOBJECT] = rfx_object
rfx_object.event_callback = lambda event: hass.add_job(async_handle_receive, event)
def send(call):
event = call.data[ATTR_EVENT]
rfx_object.transport.send(event)
hass.services.async_register(DOMAIN, SERVICE_SEND, send, schema=SERVICE_SEND_SCHEMA)
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
obj.data = binarypacket
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
if nb_data_bits is None:
return
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data) - 1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
def get_device_data_bits(device, devices):
"""Deduce data bits for device based on a cache of device bits."""
data_bits = None
if device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
for device_id, entity_config in devices.items():
bits = entity_config.get(CONF_DATA_BITS)
if get_device_id(device, bits) == device_id:
data_bits = bits
break
return data_bits
def find_possible_pt2262_device(device_ids, device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id in device_ids:
if len(dev_id) == len(device_id):
size = None
for i, (char1, char2) in enumerate(zip(dev_id, device_id)):
if char1 != char2:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info(
"rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:],
device_id[-size:],
)
return dev_id
return None
def get_device_id(device, data_bits=None):
"""Calculate a device id for device."""
id_string = device.id_string
if data_bits and device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
masked_id = get_pt2262_deviceid(id_string, data_bits)
if masked_id:
id_string = masked_id.decode("ASCII")
return (f"{device.packettype:x}", f"{device.subtype:x}", id_string)
class RfxtrxEntity(RestoreEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, event=None):
"""Initialize the device."""
self._name = f"{device.type_string} {device.id_string}"
self._device = device
self._event = event
self._device_id = device_id
self._unique_id = "_".join(x for x in self._device_id)
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
if self._event:
self._apply_event(self._event)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_EVENT, self._handle_event
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
f"{DOMAIN}_{CONF_REMOVE_DEVICE}_{self._device_id}", self.async_remove
)
)
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self._event:
return None
return {ATTR_EVENT: "".join(f"{x:02x}" for x in self._event.data)}
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def unique_id(self):
"""Return unique identifier of remote device."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, *self._device_id)},
"name": f"{self._device.type_string} {self._device.id_string}",
"model": self._device.type_string,
}
def _apply_event(self, event):
"""Apply a received event."""
self._event = event
@callback
def _handle_event(self, event, device_id):
"""Handle a reception of data, overridden by other classes."""
class RfxtrxCommandEntity(RfxtrxEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, signal_repetitions=1, event=None):
"""Initialzie a switch or light device."""
super().__init__(device, device_id, event=event)
self.signal_repetitions = signal_repetitions
self._state = None
async def _async_send(self, fun, *args):
rfx_object = self.hass.data[DOMAIN][DATA_RFXOBJECT]
for _ in range(self.signal_repetitions):
await self.hass.async_add_executor_job(fun, rfx_object.transport, *args) | 0.437103 | 0.12416 |
from .. import tf
from ..Arch import Discriminator
from ..Framework.GAN import loss_bce_gan
from ..Framework.SuperResolution import SuperResolution
from ..Util import Vgg, prelu
def _normalize(x):
return x / 127.5 - 1
def _denormalize(x):
return (x + 1) * 127.5
def _clip(image):
return tf.cast(tf.clip_by_value(image, 0, 255), 'uint8')
class SRGAN(SuperResolution):
"""Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
Args:
glayers: number of layers in generator.
dlayers: number of layers in discriminator.
vgg_layer: vgg feature layer name for perceptual loss.
init_epoch: number of initializing epochs.
mse_weight:
gan_weight:
vgg_weight:
"""
def __init__(self, glayers=16, dlayers=4, vgg_layer='block2_conv2',
init_epoch=100, mse_weight=1, gan_weight=1e-3,
use_vgg=False, vgg_weight=2e-6, name='srgan', **kwargs):
super(SRGAN, self).__init__(**kwargs)
self.name = name
self.g_layers = glayers
self.init_epoch = init_epoch
self.mse_weight = mse_weight
self.gan_weight = gan_weight
self.vgg_weight = vgg_weight
self.vgg_layer = vgg_layer
self.use_vgg = use_vgg
self.vgg = None
if self.use_vgg:
self.vgg = Vgg(False, 'vgg19')
self.D = Discriminator.dcgan_d(self, [None, None, self.channel], 64,
times_stride=dlayers, norm='bn',
name_or_scope='Critic')
def build_graph(self):
super(SRGAN, self).build_graph()
inputs_norm = _normalize(self.inputs_preproc[-1])
label_norm = _normalize(self.label[-1])
with tf.variable_scope(self.name):
shallow_feature = self.prelu_conv2d(inputs_norm, 64, 9)
x = shallow_feature
for _ in range(self.g_layers):
x = self.resblock(x, 64, 3, activation='prelu',
use_batchnorm=True)
x = self.bn_conv2d(x, 64, 3)
x += shallow_feature
x = self.conv2d(x, 256, 3)
sr = self.upscale(x, direct_output=False, activator=prelu)
sr = self.tanh_conv2d(sr, self.channel, 9)
self.outputs.append(_denormalize(sr))
disc_real = self.D(label_norm)
disc_fake = self.D(sr)
with tf.name_scope('Loss'):
loss_gen, loss_disc = loss_bce_gan(disc_real, disc_fake)
mse = tf.losses.mean_squared_error(label_norm, sr)
reg = tf.losses.get_regularization_losses()
loss = tf.add_n(
[mse * self.mse_weight, loss_gen * self.gan_weight] + reg)
if self.use_vgg:
vgg_real = self.vgg(self.label[-1], self.vgg_layer)
vgg_fake = self.vgg(self.outputs[-1], self.vgg_layer)
loss_vgg = tf.losses.mean_squared_error(
vgg_real, vgg_fake, self.vgg_weight)
loss += loss_vgg
var_g = tf.trainable_variables(self.name)
var_d = tf.trainable_variables('Critic')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
opt_i = tf.train.AdamOptimizer(self.learning_rate).minimize(
mse, self.global_steps, var_list=var_g)
opt_g = tf.train.AdamOptimizer(self.learning_rate).minimize(
loss, self.global_steps, var_list=var_g)
opt_d = tf.train.AdamOptimizer(self.learning_rate).minimize(
loss_disc, var_list=var_d)
self.loss = [opt_i, opt_d, opt_g]
self.train_metric['g_loss'] = loss_gen
self.train_metric['d_loss'] = loss_disc
self.train_metric['loss'] = loss
self.metrics['psnr'] = tf.reduce_mean(
tf.image.psnr(self.label[-1], self.outputs[-1], 255))
self.metrics['ssim'] = tf.reduce_mean(
tf.image.ssim(self.label[-1], self.outputs[-1], 255))
def build_loss(self):
pass
def build_summary(self):
super(SRGAN, self).build_summary()
tf.summary.image('SR', _clip(self.outputs[-1]))
def build_saver(self):
var_d = tf.global_variables('Critic')
var_g = tf.global_variables(self.name)
loss = tf.global_variables('Loss')
steps = [self.global_steps]
self.savers.update({
'Critic': tf.train.Saver(var_d, max_to_keep=1),
'Gen': tf.train.Saver(var_g, max_to_keep=1),
'Misc': tf.train.Saver(loss + steps, max_to_keep=1),
})
def train_batch(self, feature, label, learning_rate=1e-4, **kwargs):
epoch = kwargs.get('epochs')
if epoch <= self.init_epoch:
loss = self.loss[0]
else:
loss = self.loss[1:]
return super(SRGAN, self).train_batch(feature, label, learning_rate,
loss=loss) | VSR/Backend/TF/Models/SrGan.py | from .. import tf
from ..Arch import Discriminator
from ..Framework.GAN import loss_bce_gan
from ..Framework.SuperResolution import SuperResolution
from ..Util import Vgg, prelu
def _normalize(x):
return x / 127.5 - 1
def _denormalize(x):
return (x + 1) * 127.5
def _clip(image):
return tf.cast(tf.clip_by_value(image, 0, 255), 'uint8')
class SRGAN(SuperResolution):
"""Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
Args:
glayers: number of layers in generator.
dlayers: number of layers in discriminator.
vgg_layer: vgg feature layer name for perceptual loss.
init_epoch: number of initializing epochs.
mse_weight:
gan_weight:
vgg_weight:
"""
def __init__(self, glayers=16, dlayers=4, vgg_layer='block2_conv2',
init_epoch=100, mse_weight=1, gan_weight=1e-3,
use_vgg=False, vgg_weight=2e-6, name='srgan', **kwargs):
super(SRGAN, self).__init__(**kwargs)
self.name = name
self.g_layers = glayers
self.init_epoch = init_epoch
self.mse_weight = mse_weight
self.gan_weight = gan_weight
self.vgg_weight = vgg_weight
self.vgg_layer = vgg_layer
self.use_vgg = use_vgg
self.vgg = None
if self.use_vgg:
self.vgg = Vgg(False, 'vgg19')
self.D = Discriminator.dcgan_d(self, [None, None, self.channel], 64,
times_stride=dlayers, norm='bn',
name_or_scope='Critic')
def build_graph(self):
super(SRGAN, self).build_graph()
inputs_norm = _normalize(self.inputs_preproc[-1])
label_norm = _normalize(self.label[-1])
with tf.variable_scope(self.name):
shallow_feature = self.prelu_conv2d(inputs_norm, 64, 9)
x = shallow_feature
for _ in range(self.g_layers):
x = self.resblock(x, 64, 3, activation='prelu',
use_batchnorm=True)
x = self.bn_conv2d(x, 64, 3)
x += shallow_feature
x = self.conv2d(x, 256, 3)
sr = self.upscale(x, direct_output=False, activator=prelu)
sr = self.tanh_conv2d(sr, self.channel, 9)
self.outputs.append(_denormalize(sr))
disc_real = self.D(label_norm)
disc_fake = self.D(sr)
with tf.name_scope('Loss'):
loss_gen, loss_disc = loss_bce_gan(disc_real, disc_fake)
mse = tf.losses.mean_squared_error(label_norm, sr)
reg = tf.losses.get_regularization_losses()
loss = tf.add_n(
[mse * self.mse_weight, loss_gen * self.gan_weight] + reg)
if self.use_vgg:
vgg_real = self.vgg(self.label[-1], self.vgg_layer)
vgg_fake = self.vgg(self.outputs[-1], self.vgg_layer)
loss_vgg = tf.losses.mean_squared_error(
vgg_real, vgg_fake, self.vgg_weight)
loss += loss_vgg
var_g = tf.trainable_variables(self.name)
var_d = tf.trainable_variables('Critic')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
opt_i = tf.train.AdamOptimizer(self.learning_rate).minimize(
mse, self.global_steps, var_list=var_g)
opt_g = tf.train.AdamOptimizer(self.learning_rate).minimize(
loss, self.global_steps, var_list=var_g)
opt_d = tf.train.AdamOptimizer(self.learning_rate).minimize(
loss_disc, var_list=var_d)
self.loss = [opt_i, opt_d, opt_g]
self.train_metric['g_loss'] = loss_gen
self.train_metric['d_loss'] = loss_disc
self.train_metric['loss'] = loss
self.metrics['psnr'] = tf.reduce_mean(
tf.image.psnr(self.label[-1], self.outputs[-1], 255))
self.metrics['ssim'] = tf.reduce_mean(
tf.image.ssim(self.label[-1], self.outputs[-1], 255))
def build_loss(self):
pass
def build_summary(self):
super(SRGAN, self).build_summary()
tf.summary.image('SR', _clip(self.outputs[-1]))
def build_saver(self):
var_d = tf.global_variables('Critic')
var_g = tf.global_variables(self.name)
loss = tf.global_variables('Loss')
steps = [self.global_steps]
self.savers.update({
'Critic': tf.train.Saver(var_d, max_to_keep=1),
'Gen': tf.train.Saver(var_g, max_to_keep=1),
'Misc': tf.train.Saver(loss + steps, max_to_keep=1),
})
def train_batch(self, feature, label, learning_rate=1e-4, **kwargs):
epoch = kwargs.get('epochs')
if epoch <= self.init_epoch:
loss = self.loss[0]
else:
loss = self.loss[1:]
return super(SRGAN, self).train_batch(feature, label, learning_rate,
loss=loss) | 0.897367 | 0.233357 |
import tensorflow as tf
import numpy as np
import os
import random
def _single_process(image, label, specs, cropped_size):
"""Map function to process single instance of dataset object.
Args:
image: numpy array image object, (28, 28), 0 ~ 255 uint8;
label: numpy array label, (,);
specs: dataset specifications;
cropped_size: image size after cropping;
Returns:
feature: a dictionary contains image, label, recons_image, recons_label.
"""
if specs['distort']:
if cropped_size <= specs['image_size']:
if specs['split'] == 'train':
# random cropping
image = tf.random_crop(image, [cropped_size, cropped_size])
# random rotation within -15° ~ 15°
image = tf.contrib.image.rotate(
image, random.uniform(-0.26179938779, 0.26179938779))
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1)
elif specs['split'] == 'test':
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1)
# central cropping (requires HWC)
image = tf.image.resize_image_with_crop_or_pad(
image, cropped_size, cropped_size)
else:
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1) # (HWC)
# convert from 0 ~ 255 to 0. ~ 1.
image = tf.cast(image, tf.float32) * (1. / 255.)
# transpose image into (CHW)
image = tf.transpose(image, [2, 0, 1]) # (CHW)
feature = {
'image': image,
'label': tf.one_hot(label, 10)
}
return feature
def _feature_process(feature):
"""Map function to process batched data inside feature dictionary.
Args:
feature: a dictionary contains image, label, recons_image, recons_label.
Returns:
batched_feature: a dictionary contains images, labels, recons_images, recons_labels.
"""
batched_feature = {
'images': feature['image'],
'labels': feature['label'],
}
return batched_feature
def inputs(total_batch_size, num_gpus, max_epochs, cropped_size,
data_dir, split, distort=True):
"""Construct inputs for mnist dataset.
Args:
total_batch_size: total number of images per batch;
num_gpus: number of GPUs available to use;
max_epochs: maximum epochs to go through the model;
cropped_size: image size after cropping;
data_dir: path to the mnist tfrecords data directory;
split: 'train' or 'test', which split of dataset to read from;
distort: whether to distort the images, including random cropping, rotations.
Returns:
batched_dataset: Dataset object each instance is a feature dictionary
specs: dataset specifications.
"""
assert split == 'train' or split == 'test'
"""Dataset specs"""
specs = {
'split': split,
'total_size': None, # total size of one epoch
'steps_per_epoch': None, # number of steps per epoch
'total_batch_size': int(total_batch_size),
'num_gpus': int(num_gpus),
'batch_size': int(total_batch_size / num_gpus),
'max_epochs': int(max_epochs), # number of epochs to repeat
'image_size': 28,
'depth': 1,
'num_classes': 10,
'distort': distort
}
if cropped_size == None:
cropped_size = specs['image_size']
assert cropped_size <= specs['image_size']
"""Load data from numpy array file"""
with np.load(os.path.join(data_dir, 'mnist.npz')) as f:
images, labels = f['x_%s' % split], f['y_%s' % split]
# image: 0 ~ 255 uint8
# labels 0 ~ 9 uint8
assert images.shape[0] == labels.shape[0]
specs['total_size'] = int(images.shape[0])
specs['steps_per_epoch'] = int(specs['total_size'] // specs['total_batch_size'])
"""Process dataset object"""
# read from numpy array
dataset = tf.data.Dataset.from_tensor_slices((images, labels)) # ((28, 28), (,))
# prefetch examples
dataset = dataset.prefetch(
buffer_size=specs['batch_size']*specs['num_gpus']*2)
# shuffle (if 'train') and repeat `max_epochs`
if split == 'train':
dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(
buffer_size=specs['batch_size']*specs['num_gpus']*10,
count=specs['max_epochs']))
else:
dataset = dataset.repeat(specs['max_epochs'])
# process single example
dataset = dataset.map(
lambda image, label: _single_process(image, label, specs, cropped_size),
num_parallel_calls=3)
specs['image_size'] = cropped_size # after processed single example, the image size
# will be cropped into cropped_size.
# stack into batches
batched_dataset = dataset.batch(specs['batch_size'])
# process into feature
batched_dataset = batched_dataset.map(
_feature_process,
num_parallel_calls=3)
# prefetch to improve the performance
batched_dataset = batched_dataset.prefetch(specs['num_gpus'])
return batched_dataset, specs | input_data/mnist/mnist_input.py | import tensorflow as tf
import numpy as np
import os
import random
def _single_process(image, label, specs, cropped_size):
"""Map function to process single instance of dataset object.
Args:
image: numpy array image object, (28, 28), 0 ~ 255 uint8;
label: numpy array label, (,);
specs: dataset specifications;
cropped_size: image size after cropping;
Returns:
feature: a dictionary contains image, label, recons_image, recons_label.
"""
if specs['distort']:
if cropped_size <= specs['image_size']:
if specs['split'] == 'train':
# random cropping
image = tf.random_crop(image, [cropped_size, cropped_size])
# random rotation within -15° ~ 15°
image = tf.contrib.image.rotate(
image, random.uniform(-0.26179938779, 0.26179938779))
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1)
elif specs['split'] == 'test':
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1)
# central cropping (requires HWC)
image = tf.image.resize_image_with_crop_or_pad(
image, cropped_size, cropped_size)
else:
# expand image dimensions into (HWC)
image = tf.expand_dims(image, -1) # (HWC)
# convert from 0 ~ 255 to 0. ~ 1.
image = tf.cast(image, tf.float32) * (1. / 255.)
# transpose image into (CHW)
image = tf.transpose(image, [2, 0, 1]) # (CHW)
feature = {
'image': image,
'label': tf.one_hot(label, 10)
}
return feature
def _feature_process(feature):
"""Map function to process batched data inside feature dictionary.
Args:
feature: a dictionary contains image, label, recons_image, recons_label.
Returns:
batched_feature: a dictionary contains images, labels, recons_images, recons_labels.
"""
batched_feature = {
'images': feature['image'],
'labels': feature['label'],
}
return batched_feature
def inputs(total_batch_size, num_gpus, max_epochs, cropped_size,
data_dir, split, distort=True):
"""Construct inputs for mnist dataset.
Args:
total_batch_size: total number of images per batch;
num_gpus: number of GPUs available to use;
max_epochs: maximum epochs to go through the model;
cropped_size: image size after cropping;
data_dir: path to the mnist tfrecords data directory;
split: 'train' or 'test', which split of dataset to read from;
distort: whether to distort the images, including random cropping, rotations.
Returns:
batched_dataset: Dataset object each instance is a feature dictionary
specs: dataset specifications.
"""
assert split == 'train' or split == 'test'
"""Dataset specs"""
specs = {
'split': split,
'total_size': None, # total size of one epoch
'steps_per_epoch': None, # number of steps per epoch
'total_batch_size': int(total_batch_size),
'num_gpus': int(num_gpus),
'batch_size': int(total_batch_size / num_gpus),
'max_epochs': int(max_epochs), # number of epochs to repeat
'image_size': 28,
'depth': 1,
'num_classes': 10,
'distort': distort
}
if cropped_size == None:
cropped_size = specs['image_size']
assert cropped_size <= specs['image_size']
"""Load data from numpy array file"""
with np.load(os.path.join(data_dir, 'mnist.npz')) as f:
images, labels = f['x_%s' % split], f['y_%s' % split]
# image: 0 ~ 255 uint8
# labels 0 ~ 9 uint8
assert images.shape[0] == labels.shape[0]
specs['total_size'] = int(images.shape[0])
specs['steps_per_epoch'] = int(specs['total_size'] // specs['total_batch_size'])
"""Process dataset object"""
# read from numpy array
dataset = tf.data.Dataset.from_tensor_slices((images, labels)) # ((28, 28), (,))
# prefetch examples
dataset = dataset.prefetch(
buffer_size=specs['batch_size']*specs['num_gpus']*2)
# shuffle (if 'train') and repeat `max_epochs`
if split == 'train':
dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(
buffer_size=specs['batch_size']*specs['num_gpus']*10,
count=specs['max_epochs']))
else:
dataset = dataset.repeat(specs['max_epochs'])
# process single example
dataset = dataset.map(
lambda image, label: _single_process(image, label, specs, cropped_size),
num_parallel_calls=3)
specs['image_size'] = cropped_size # after processed single example, the image size
# will be cropped into cropped_size.
# stack into batches
batched_dataset = dataset.batch(specs['batch_size'])
# process into feature
batched_dataset = batched_dataset.map(
_feature_process,
num_parallel_calls=3)
# prefetch to improve the performance
batched_dataset = batched_dataset.prefetch(specs['num_gpus'])
return batched_dataset, specs | 0.835517 | 0.507324 |
import unittest
from controller.array_action import errors
from controller.array_action.svc_cli_result_reader import SVCListResultsReader
host_1 = "\n".join(("id 1", "name host_1", "WWPN wwpn1", "protocol fc", "WWPN wwpn2"))
host_2 = "\n".join(("id 2", "name host_2", "", " ", "iscsi", "status not active"))
host_3 = "\n".join(("id 3", "name host_3", "iscsi iscsi1"))
class TestSVCListReader(unittest.TestCase):
def test_single_host_success(self):
hosts_reader = SVCListResultsReader(host_1)
hosts_list = list(hosts_reader)
self.assertEqual(len(hosts_list), 1)
self._assert_host_1(hosts_list[0])
def test_multiple_hosts_success(self):
hosts_count = 3
hosts_raw_input = "\n".join((host_1, "\n \n", host_2, host_3, " "))
assert_methods = [self._assert_host_1, self._assert_host_2, self._assert_host_3]
hosts_reader = SVCListResultsReader(hosts_raw_input)
hosts_list = list(hosts_reader)
self.assertEqual(len(hosts_list), hosts_count)
for i in range(hosts_count):
assert_methods[i](hosts_list[i])
def test_no_hosts_empty_input(self):
hosts_reader = SVCListResultsReader("")
hosts_list = list(hosts_reader)
self.assertFalse(hosts_list)
def test_no_hosts_whitespace_input(self):
hosts_reader = SVCListResultsReader("\n\n\n")
hosts_list = list(hosts_reader)
self.assertFalse(hosts_list)
def test_illegal_input(self):
illegal_input = "\n".join(("name host_3", "id 3"))
with self.assertRaises(errors.InvalidCliResponseError):
SVCListResultsReader(illegal_input)
def _assert_host_1(self, host):
self.assertEqual(host.get("id"), "1")
self.assertEqual(host.get("name"), "host_1")
self.assertEqual(host.get_as_list("WWPN"), ["wwpn1", "wwpn2"])
self.assertEqual(host.get("protocol"), "fc")
self.assertEqual(host.get_as_list("protocol"), ["fc"])
self.assertEqual(host.get("non_existing_value", "no-value"), "no-value")
self.assertEqual(host.get_as_list("non_existing_value"), [])
def _assert_host_2(self, host):
self.assertEqual(host.get("id"), "2")
self.assertEqual(host.get("name"), "host_2")
self.assertEqual(host.get("iscsi"), "")
self.assertEqual(host.get("status"), "not active")
def _assert_host_3(self, host):
self.assertEqual(host.get("id"), "3")
self.assertEqual(host.get("name"), "host_3")
self.assertEqual(host.get("iscsi"), "iscsi1") | controller/tests/array_action/svc/svc_cli_result_reader_test.py | import unittest
from controller.array_action import errors
from controller.array_action.svc_cli_result_reader import SVCListResultsReader
host_1 = "\n".join(("id 1", "name host_1", "WWPN wwpn1", "protocol fc", "WWPN wwpn2"))
host_2 = "\n".join(("id 2", "name host_2", "", " ", "iscsi", "status not active"))
host_3 = "\n".join(("id 3", "name host_3", "iscsi iscsi1"))
class TestSVCListReader(unittest.TestCase):
def test_single_host_success(self):
hosts_reader = SVCListResultsReader(host_1)
hosts_list = list(hosts_reader)
self.assertEqual(len(hosts_list), 1)
self._assert_host_1(hosts_list[0])
def test_multiple_hosts_success(self):
hosts_count = 3
hosts_raw_input = "\n".join((host_1, "\n \n", host_2, host_3, " "))
assert_methods = [self._assert_host_1, self._assert_host_2, self._assert_host_3]
hosts_reader = SVCListResultsReader(hosts_raw_input)
hosts_list = list(hosts_reader)
self.assertEqual(len(hosts_list), hosts_count)
for i in range(hosts_count):
assert_methods[i](hosts_list[i])
def test_no_hosts_empty_input(self):
hosts_reader = SVCListResultsReader("")
hosts_list = list(hosts_reader)
self.assertFalse(hosts_list)
def test_no_hosts_whitespace_input(self):
hosts_reader = SVCListResultsReader("\n\n\n")
hosts_list = list(hosts_reader)
self.assertFalse(hosts_list)
def test_illegal_input(self):
illegal_input = "\n".join(("name host_3", "id 3"))
with self.assertRaises(errors.InvalidCliResponseError):
SVCListResultsReader(illegal_input)
def _assert_host_1(self, host):
self.assertEqual(host.get("id"), "1")
self.assertEqual(host.get("name"), "host_1")
self.assertEqual(host.get_as_list("WWPN"), ["wwpn1", "wwpn2"])
self.assertEqual(host.get("protocol"), "fc")
self.assertEqual(host.get_as_list("protocol"), ["fc"])
self.assertEqual(host.get("non_existing_value", "no-value"), "no-value")
self.assertEqual(host.get_as_list("non_existing_value"), [])
def _assert_host_2(self, host):
self.assertEqual(host.get("id"), "2")
self.assertEqual(host.get("name"), "host_2")
self.assertEqual(host.get("iscsi"), "")
self.assertEqual(host.get("status"), "not active")
def _assert_host_3(self, host):
self.assertEqual(host.get("id"), "3")
self.assertEqual(host.get("name"), "host_3")
self.assertEqual(host.get("iscsi"), "iscsi1") | 0.477067 | 0.377828 |
import unittest
from protein_inference.problem_network import ProblemNetwork
import networkx as nx
class ProblemNetworkTest(unittest.TestCase):
def test_get_proteins_several(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_proteins(),[1,2,3])
def test_get_proteins_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 0)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_proteins(),[])
def test_get_peptides_several(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 0)
g.add_nodes_from([4,5], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_peptides(),[1,2,3])
def test_update_nodes_no_prior_att(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
pn.update_nodes([1,2,3], "att",3)
self.assertEqual(pn.network.nodes[1]["att"],3)
def test_update_nodes_prior_atts(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
pn.update_nodes([1,2,3], "att",3)
pn.update_nodes([2,3], "att",4)
self.assertEqual(pn.network.nodes[1]["att"],3)
self.assertEqual(pn.network.nodes[2]["att"],4)
def test_pick_nodes_all(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.pick_nodes("protein",1), [1,2,3])
def test_pick_nodes_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.pick_nodes("protein",0), [])
def test_get_node_attribute_dict_all(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("protein"), {1:1,2:1,3:1})
def test_get_node_attribute_dict_some(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
g.add_nodes_from([4,5])
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("protein"), {1:1,2:1,3:1})
def test_get_node_attribute_dict_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("peptide"), {}) | tests/unit/problem_network_test.py | import unittest
from protein_inference.problem_network import ProblemNetwork
import networkx as nx
class ProblemNetworkTest(unittest.TestCase):
def test_get_proteins_several(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_proteins(),[1,2,3])
def test_get_proteins_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 0)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_proteins(),[])
def test_get_peptides_several(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 0)
g.add_nodes_from([4,5], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_peptides(),[1,2,3])
def test_update_nodes_no_prior_att(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
pn.update_nodes([1,2,3], "att",3)
self.assertEqual(pn.network.nodes[1]["att"],3)
def test_update_nodes_prior_atts(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
pn.update_nodes([1,2,3], "att",3)
pn.update_nodes([2,3], "att",4)
self.assertEqual(pn.network.nodes[1]["att"],3)
self.assertEqual(pn.network.nodes[2]["att"],4)
def test_pick_nodes_all(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.pick_nodes("protein",1), [1,2,3])
def test_pick_nodes_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.pick_nodes("protein",0), [])
def test_get_node_attribute_dict_all(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("protein"), {1:1,2:1,3:1})
def test_get_node_attribute_dict_some(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
g.add_nodes_from([4,5])
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("protein"), {1:1,2:1,3:1})
def test_get_node_attribute_dict_none(self):
g = nx.Graph()
g.add_nodes_from([1,2,3], protein = 1)
pn = ProblemNetwork(g)
self.assertEqual(pn.get_node_attribute_dict("peptide"), {}) | 0.443118 | 0.647687 |
import torch
from UnarySim.stream.gen import RNG, SourceGen, BSGen
class FSUHardtanh(torch.nn.Identity):
"""
This module is used for inference in unary domain.
"""
def __init__(self):
super(FSUHardtanh, self).__init__()
class ScaleHardtanh(torch.nn.Hardtanh):
"""
Inputs within range [-1, +1] directly pass through, while inputs outsides will be clipped to -1 and +1.
This module is used for training and inference in binary domain.
"""
def __init__(self):
super(ScaleHardtanh, self).__init__()
class tanhP1(torch.nn.Module):
"""
this module is for combinational tanh. The module is able to compute tanh(ax), where a = 1 in this implementation.
the detail can be found at "<NAME> and <NAME>. 2017. Computing Arithmetic Functions Using Stochastic Logic by Series Expansion. Transactions on Emerging Topics in Computing (2017).", fig.10.
"""
def __init__(self,
mode="unipolar",
rng="Sobol",
rng_dim=1,
rng_width=8,
rtype=torch.float,
stype=torch.float,
btype=torch.float):
super(tanhP1, self).__init__()
self.bitwidth = rng_width
self.mode = mode
self.rng = rng
self.rng_dim = rng_dim
self.rtype = rtype
self.stype = stype
self.btype = btype
assert mode == "unipolar", "Combinational tanhP1 needs unipolar mode."
self.rng_2 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+0, rng=self.rng, rtype=self.rtype)()
self.rng_3 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+1, rng=self.rng, rtype=self.rtype)()
self.rng_4 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+2, rng=self.rng, rtype=self.rtype)()
self.rng_5 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+3, rng=self.rng, rtype=self.rtype)()
# constants used in computation
self.n2_c = torch.tensor([62/153]).type(self.rtype)
self.n3_c = torch.tensor([ 17/42]).type(self.rtype)
self.n4_c = torch.tensor([ 2/5]).type(self.rtype)
self.n5_c = torch.tensor([ 1/3]).type(self.rtype)
self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode, self.rtype)()
self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)
self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)
# 4 dff in series
self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d5 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d6 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d7 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d8 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
def tanh_comb_forward(self, input):
n_1 = (input.type(torch.int8) & self.input_d4.type(torch.int8))
# Operating units
n_2_c = self.bs_n2_c(self.bs_idx)
n_3_c = self.bs_n3_c(self.bs_idx)
n_4_c = self.bs_n4_c(self.bs_idx)
n_5_c = self.bs_n5_c(self.bs_idx)
n_2 = 1 - (n_1 & n_2_c.type(torch.int8))
n_3 = 1 - (n_2 & n_3_c.type(torch.int8) & self.n_1_d1.type(torch.int8))
n_4 = 1 - (n_3 & n_4_c.type(torch.int8) & self.n_1_d2.type(torch.int8))
n_5 = 1 - (n_4 & n_5_c.type(torch.int8) & self.n_1_d3.type(torch.int8))
output = (n_5 & self.input_d8.type(torch.int8))
# Update buffers and idx
self.n_1_d3.data = self.n_1_d2
self.n_1_d2.data = self.n_1_d1
self.n_1_d1.data = n_1
self.input_d8.data = self.input_d7
self.input_d7.data = self.input_d6
self.input_d6.data = self.input_d5
self.input_d5.data = self.input_d4
self.input_d4.data = self.input_d3
self.input_d3.data = self.input_d2
self.input_d2.data = self.input_d1
self.input_d1.data = input
self.bs_idx.data = self.bs_idx + 1
return output
def forward(self, input_x):
return self.tanh_comb_forward(input_x).type(self.stype)
class tanhPN(torch.nn.Module):
"""
This module is for fsm tanh(Nx/2), positive N/2.
Input is bipolar, output is bipolar.
"Stochastic neural computation I: Computational elements"
"""
def __init__(self,
mode="bipolar",
depth=5,
rtype=torch.float,
stype=torch.float,
btype=torch.float):
super(tanhPN, self).__init__()
self.depth = depth
self.mode = mode
self.rtype = rtype
self.stype = stype
self.btype = btype
assert mode == "bipolar", "FSM tanhPNhalf needs bipolar mode."
# N is the number of states
self.max = torch.nn.Parameter(torch.tensor([2**depth-1]).type(self.btype), requires_grad=False)
self.thd = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(self.btype), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(self.btype), requires_grad=False)
def tanh_fsm_forward(self, input):
output = torch.zeros_like(input)
output = output + torch.ge(self.cnt, self.thd.item()).type(self.stype)
self.cnt.data = input.type(self.btype) * (self.cnt + 1) + (1 - input.type(self.btype)) * (self.cnt - 1)
self.cnt.data = self.cnt.clamp(0, self.max.item())
return output
def forward(self, input):
return self.tanh_fsm_forward(input) | kernel/tanh.py | import torch
from UnarySim.stream.gen import RNG, SourceGen, BSGen
class FSUHardtanh(torch.nn.Identity):
"""
This module is used for inference in unary domain.
"""
def __init__(self):
super(FSUHardtanh, self).__init__()
class ScaleHardtanh(torch.nn.Hardtanh):
"""
Inputs within range [-1, +1] directly pass through, while inputs outsides will be clipped to -1 and +1.
This module is used for training and inference in binary domain.
"""
def __init__(self):
super(ScaleHardtanh, self).__init__()
class tanhP1(torch.nn.Module):
"""
this module is for combinational tanh. The module is able to compute tanh(ax), where a = 1 in this implementation.
the detail can be found at "<NAME> and <NAME>. 2017. Computing Arithmetic Functions Using Stochastic Logic by Series Expansion. Transactions on Emerging Topics in Computing (2017).", fig.10.
"""
def __init__(self,
mode="unipolar",
rng="Sobol",
rng_dim=1,
rng_width=8,
rtype=torch.float,
stype=torch.float,
btype=torch.float):
super(tanhP1, self).__init__()
self.bitwidth = rng_width
self.mode = mode
self.rng = rng
self.rng_dim = rng_dim
self.rtype = rtype
self.stype = stype
self.btype = btype
assert mode == "unipolar", "Combinational tanhP1 needs unipolar mode."
self.rng_2 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+0, rng=self.rng, rtype=self.rtype)()
self.rng_3 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+1, rng=self.rng, rtype=self.rtype)()
self.rng_4 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+2, rng=self.rng, rtype=self.rtype)()
self.rng_5 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+3, rng=self.rng, rtype=self.rtype)()
# constants used in computation
self.n2_c = torch.tensor([62/153]).type(self.rtype)
self.n3_c = torch.tensor([ 17/42]).type(self.rtype)
self.n4_c = torch.tensor([ 2/5]).type(self.rtype)
self.n5_c = torch.tensor([ 1/3]).type(self.rtype)
self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode, self.rtype)()
self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode, self.rtype)()
self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)
self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)
# 4 dff in series
self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d5 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d6 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d7 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.input_d8 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.n_1_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
def tanh_comb_forward(self, input):
n_1 = (input.type(torch.int8) & self.input_d4.type(torch.int8))
# Operating units
n_2_c = self.bs_n2_c(self.bs_idx)
n_3_c = self.bs_n3_c(self.bs_idx)
n_4_c = self.bs_n4_c(self.bs_idx)
n_5_c = self.bs_n5_c(self.bs_idx)
n_2 = 1 - (n_1 & n_2_c.type(torch.int8))
n_3 = 1 - (n_2 & n_3_c.type(torch.int8) & self.n_1_d1.type(torch.int8))
n_4 = 1 - (n_3 & n_4_c.type(torch.int8) & self.n_1_d2.type(torch.int8))
n_5 = 1 - (n_4 & n_5_c.type(torch.int8) & self.n_1_d3.type(torch.int8))
output = (n_5 & self.input_d8.type(torch.int8))
# Update buffers and idx
self.n_1_d3.data = self.n_1_d2
self.n_1_d2.data = self.n_1_d1
self.n_1_d1.data = n_1
self.input_d8.data = self.input_d7
self.input_d7.data = self.input_d6
self.input_d6.data = self.input_d5
self.input_d5.data = self.input_d4
self.input_d4.data = self.input_d3
self.input_d3.data = self.input_d2
self.input_d2.data = self.input_d1
self.input_d1.data = input
self.bs_idx.data = self.bs_idx + 1
return output
def forward(self, input_x):
return self.tanh_comb_forward(input_x).type(self.stype)
class tanhPN(torch.nn.Module):
"""
This module is for fsm tanh(Nx/2), positive N/2.
Input is bipolar, output is bipolar.
"Stochastic neural computation I: Computational elements"
"""
def __init__(self,
mode="bipolar",
depth=5,
rtype=torch.float,
stype=torch.float,
btype=torch.float):
super(tanhPN, self).__init__()
self.depth = depth
self.mode = mode
self.rtype = rtype
self.stype = stype
self.btype = btype
assert mode == "bipolar", "FSM tanhPNhalf needs bipolar mode."
# N is the number of states
self.max = torch.nn.Parameter(torch.tensor([2**depth-1]).type(self.btype), requires_grad=False)
self.thd = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(self.btype), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(self.btype), requires_grad=False)
def tanh_fsm_forward(self, input):
output = torch.zeros_like(input)
output = output + torch.ge(self.cnt, self.thd.item()).type(self.stype)
self.cnt.data = input.type(self.btype) * (self.cnt + 1) + (1 - input.type(self.btype)) * (self.cnt - 1)
self.cnt.data = self.cnt.clamp(0, self.max.item())
return output
def forward(self, input):
return self.tanh_fsm_forward(input) | 0.892815 | 0.415551 |
from collections import namedtuple
import struct
import sys
from natsort import natsorted, ns
import re
import os
import pandas as pd
from . import firmware_gen as lf
import json
import aditofpython as tof
import tof_calib.device as device
import logging
import logging.config
import numpy as np
def setup_logging():
with open('./../logger.json', 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
'''
Predefined Hashmap key Defination
---------------------------------
'''
#Dictionary for modes
mode_dict = {'near' : 0, 'mid' : 1, 'far' : 2}
#Hashmap key for packet type
HEADER = 0
CAMERA_INTRINSIC = 1
NEAR_CAL = 2
NEAR_LF = 3
MID_CAL = 4
MID_LF = 5
FAR_CAL = 6
FAR_LF = 7
#Hashmap key for common parameters
EEPROM_VERSION = 1
CAL_SER_NUM = 2
CAL_DATE = 3
CHECKSUM = 4
#Hashmap key for Header Parameters
TOTAL_SIZE = 5
NUMBER_OF_MODES = 6
MODULE_TYPE = 11 #Value 2: BroadMarket/1: PICO/0 : ADI EVAL
AFE_TYPE = 13 #Value 0: ADDI9033 / 1:ADDI9043/ 2: ADDI9050
SENSOR_TYPE = 14 #Value 0 : Panasonic VGA / 1 : Panasonic QVGA
LASER_TYPE = 16 #Value 0 : Princeton VCSEL/ 1 : Heptagon VCSEL...
#Hashmap key for Camera Intrinsic
INTRINSIC = 5
DISTORTION_COEFFICIENTS = 6
#Hashmap for linear correct
ISATG_PROJECT_VERSION = 5
CALIBRATION_SOFTWARE_VERSION = 6
CALIBRATION_TYPE = 7 #Value 0 Sweep, 1: Rail, 2: Faceplant
CALIBRATION_MODE = 8 #Value 0:Near 1, 1 : Mid, 2 :Far
PULSE_COUNT = 11
NO_OF_LASERS = 12
LINEAR_CORRECT_OFFSET = 22
LINEAR_CORRECT_XPWR = 23
#Hashmap for load files
ADDR_DATA_LIST = 5
#Indices for PARAM STRUCT
SIZE = 0
VALUE = 1
'''
Class for managing the calibration map
Consist functions to:
generate calibration map
store calibration map binary to file
read calibration map from binary file
parse binary back to calibration map
display calibration map
---------------------------------
'''
class cal_map(object):
def __init__(self):
self.calibration_map = {}
header_packet = {
TOTAL_SIZE : self.param_struct([8]),
CHECKSUM : self.param_struct([8])
}
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
}
#calculates size of value and returns list[size, value]
def param_struct(self, param_value):
size = len(param_value) * 4 # len * 4(each element is float)
param_value = [int(size), [float(i) for i in param_value]]
return param_value
#calculates and returns size of packet
def get_packet_size(self, packet):
packet_size = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
packet_size = packet_size + param_size + 8 # added size 8 for key and size of each parameter
return int(packet_size)
#calculates and returns size of map
def get_map_size(self):
map_size = 0
for key, list_params in self.calibration_map.items():
size, nested_dict = list_params
map_size = map_size + size
map_size = map_size + 8 #Size of each key(4) and packet size(4) is added(4+4=8)
return map_size
def update_packet_checksum(self, packet):
checksum = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
for i in range (int(param_size/4)):
checksum = int(checksum) ^ int(param_value[i])
packet[CHECKSUM] = self.param_struct([checksum])
def update_map_header(self):
#Update Header Total Size
total_size = self.get_map_size()
self.calibration_map[HEADER][VALUE][TOTAL_SIZE] = self.param_struct([total_size])
#Update Header Checksum
self.update_packet_checksum(self.calibration_map[HEADER][VALUE])
#Generates Default Dictionary
def init_default_cal_map(self):
header_packet = {
EEPROM_VERSION : self.param_struct([0]),
TOTAL_SIZE : self.param_struct([1000]),
NUMBER_OF_MODES : self.param_struct([3]),
}
self.update_packet_checksum(header_packet)
camera_intrinsic_packet = {
EEPROM_VERSION : self.param_struct([0]),
CAL_SER_NUM : self.param_struct([0]),
CAL_DATE : self.param_struct([12042019]),
INTRINSIC : self.param_struct([0, 0, 0, 0, 0, 0, 0, 0, 0])
}
self.update_packet_checksum(camera_intrinsic_packet)
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
CAMERA_INTRINSIC : [self.get_packet_size(camera_intrinsic_packet), camera_intrinsic_packet]
}
#Update Header
self.update_map_header()
#Parses through dictionary and prints the key and value
def display_cal_map(self):
#Printing just the value of Calibration Dictionary
for key, list_params in self.calibration_map.items():
print ("Packet Key: ", (key),end="") # print the primary key (for Packet Type)
size, nested_dict = list_params
print ("\tPacket Size: ", size) #print the size of pimary packet
#print ("Packet Key: ", (key),"\tPacket Size: ", size, file=open("output.txt", "a"))
for nested_key,nested_value in nested_dict.items():
print("\tParam Key: ", nested_key,end="") #print the nested key (Parameter key)
param_size, param_value = nested_value
print("\tParam Size: ",param_size,end="") #print the size of Param
value = []
for i in range (int(param_size/4)):
value.append(param_value[i])
print("\tParam Value: ",value) #print the value of Param
#print("\tParam Key: ", nested_key,"\tParam Size: ",param_size,"\tParam Value: ",value, file=open("output.txt", "a")) #print the Param to file
#Generates the binary file for writing to EEPROM
def save_cal_map(self, filename):
#writing float values
f = open(filename,"wb")
for key, list_params in self.calibration_map.items():
f.write(struct.pack('<f', key) ) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
f.write(struct.pack('<f', size) ) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
f.write(struct.pack('<f',nested_key)) #write the nested key (Parameter key)
param_size, param_value = nested_value
f.write(struct.pack('<f',param_size)) #write the size of Param
for i in range (int(param_size/4)):
f.write(struct.pack('<f',param_value[i])) #write the value of Param
f.close()
'''Reads the binary file and parses it back to map,
replaces the value if already exist'''
def read_cal_map(self, filename):
#open the file
with open(filename,"rb") as f:
while True:
key = f.read(4)
if not key:
break
key = struct.unpack('<f', key)
key = int(key[0])
sub_packet_size = struct.unpack('<f', f.read(4))
sub_packet_size = int(sub_packet_size[0])
sub_packet_map = {}
i = 0
while i<(sub_packet_size/4): #4:size of float
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_key = sub_packet_value
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_size = sub_packet_value
number_of_elements = int(parameter_size/4) #4:size of float
value=[]
for j in range (number_of_elements):
sub_packet_value = struct.unpack('<f', f.read(4))
#sub_packet_value = int(sub_packet_value[0])
value.append(sub_packet_value[0])
i = i + 1
sub_packet_map.update({parameter_key: [parameter_size, value]})
self.calibration_map[key] = [sub_packet_size,sub_packet_map]
f.close()
#Add Load files to map, if existing map consist load files, it overwrites it, otherwise adds it
def add_load_files_to_map(self, packet_type, lf_path):
lf_map = {}
lf_list =[]
file_list = natsorted(os.listdir("./"+lf_path+"/"), alg=ns.IGNORECASE)[:13]
#print(file_list)
for file_name in file_list:
if file_name.endswith(".lf"):
addr, data, mode_locations = lf.extract_code_block("./"+lf_path+"/"+file_name)
for i in range(len(addr)) :
lf_list.append(addr[i])
lf_list.append(data[i])
#print("Parsed File", file_name, " ", file_num, "\n", lf_list)
#input("Press Enter to continue...")
lf_map[ADDR_DATA_LIST] = self.param_struct(lf_list)
#print(lf_map)
self.update_packet_checksum(lf_map)
self.calibration_map[packet_type] = [self.get_packet_size(lf_map), lf_map]
#Update Header
self.update_map_header()
def add_linear_offset_csv_to_map(self, packet_type, linear_offset_csv_file):
linear_df = pd.read_csv(linear_offset_csv_file)
linear_correct_offset_list = (linear_df.to_dict(orient='list')["reg_offset_value_hex"])
linear_correct_xpwr_list = (linear_df.to_dict(orient='list')["xcorr"][1:])
linear_map = {}
linear_map[LINEAR_CORRECT_OFFSET] = self.param_struct([int(i, 16) for i in linear_correct_offset_list])
linear_map[LINEAR_CORRECT_XPWR] = self.param_struct(linear_correct_xpwr_list)
self.calibration_map[packet_type] = [self.get_packet_size(linear_map), linear_map]
#Update Header
self. update_map_header()
def add_json_to_map(self, packet_type, json_file):
with open(json_file, 'r') as f:
json_read = json.load(f)
json_map = {}
for key,value in json_read.items():
for sub_key,sub_value in json_read[key].items():
if(type(sub_value) is list):
json_map[int(sub_key)] = self.param_struct(sub_value)
else:
json_map[int(sub_key)] = self.param_struct([sub_value])
self.update_packet_checksum(json_map)
self.calibration_map[packet_type] = [self.get_packet_size(json_map), json_map]
self.update_map_header()
#Function to replace calibration mode block
def replace_eeprom_mode(self, mode, linear_cal_json_file, load_file_path):
self.add_json_to_map((mode_dict[mode]*2+2), linear_cal_json_file)
self.add_load_files_to_map((mode_dict[mode]*2+3), load_file_path)
def write_eeprom_cal_map(self, eeprom):
#print("\n\nWriting EEPROM")
eeprom_write_bytearray = bytes()
for key, list_params in self.calibration_map.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', key)) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', size)) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', nested_key)) #write the nested key (Parameter key)
param_size, param_value = nested_value
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', param_size)) #write the size of Param
for i in range (int(param_size/4)):
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', param_value[i])) #write the value of Param
eeprom_map_size = [self.get_map_size()]
eeprom_write_list = []
size = eeprom_write_bytearray.__len__()
for index in range(0, size):
eeprom_write_list.append(eeprom_write_bytearray[index])
#print("EEPROM WRITE List\n", eeprom_write_list)
#input("Press Enter to continue...")
size_list = []
size_byte = bytes()
size_byte = struct.pack('<f', size)
for index in range(0, 4):
size_list.append(size_byte[index])
eeprom.write(int(0), np.array(size_list, dtype='uint8'), 4)
eeprom.write(int(4), np.array(eeprom_write_list, dtype='uint8'), eeprom_write_list.__len__())
def read_eeprom_cal_map(self, eeprom):
#print("Reading EEPROM")
data_array = np.zeros(4, dtype='uint8')
eeprom.read(int(0), data_array, 4)
read_size = struct.unpack('<f', data_array)
#print("Read Size",read_size)
data_array = np.zeros(int(read_size[0]), dtype='uint8')
eeprom.read(int(4), data_array, int(read_size[0]))
r_b = data_array.tobytes()
j = 0
while j<r_b.__len__():
key = <KEY>
j = j+4
if not key:
break
key = struct.unpack('<f', key)
key = int(key[0])
#print("Primary Key", key)
sub_packet_size = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_size = int(sub_packet_size[0])
#print("Sub Size",sub_packet_size)
sub_packet_map = {}
i = 0
while i<(sub_packet_size/4): #4:size of float
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_key = sub_packet_value
#print("Param Key", parameter_key)
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_size = sub_packet_value
#print("Param Size", parameter_size)
number_of_elements = int(parameter_size/4) #4:size of float
#print("Number of elements", number_of_elements)
value=[]
for k in range (number_of_elements):
#print(r_b[j:j+4])
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
#sub_packet_value = int(sub_packet_value[0])
value.append(sub_packet_value[0])
i = i + 1
sub_packet_map.update({parameter_key: [parameter_size, value]})
self.calibration_map[key] = [sub_packet_size,sub_packet_map]
def flatten_cal_map(cal):
lst = []
for key, list_params in cal.items():
size, nested_dict = list_params
lst.append(key)
lst.append(size)
for nested_key,nested_value in nested_dict.items():
param_size, param_value = nested_value
lst.append(param_size)
for i in range (int(param_size/4)):
lst.append(param_value[i])
return lst
def compare_map(cal1, cal2):
lst1 = flatten_cal_map(cal1)
lst2 = flatten_cal_map(cal2)
ret = True
for i in range(len(lst1)):
if(lst1[i]-lst2[i] > 0.2):
#print(lst1[1], lst2[2])
ret = False
return ret
'''
Test function to:
Generate default map
Write map to binary file
Read map back from binary file
Add load files to map
Display the map
---------------------------------
'''
def test_cal_eeprom():
cal1 = cal_map()
cal1.init_default_cal_map()
print("Calibration map written to Bin File")
cal1.display_cal_map()
cal1.save_cal_map("calibration_map.bin")
cal2 = cal_map()
cal2.read_cal_map("calibration_map.bin")
print("\n\nCalibration map read back from Bin File")
cal2.display_cal_map()
input("Press Enter to continue...")
cal2.add_json_to_map(NEAR_CAL, "./linear_cal.json")
print("\n\nCalibration map after adding linear_cal.json to map")
cal2.display_cal_map()
input("Press Enter to continue...")
cal2.add_load_files_to_map(NEAR_LF, "../config/ADDI9043/")
print("\n\nCalibration map after adding load files to map")
cal2.display_cal_map()
print("\n\nSaving to 'calibration_map.bin'")
cal2.save_cal_map("caibration_map.bin")
input("Press Enter to continue...")
#cal2.add_linear_correct_offset(NEAR_CAL, "../saved_results/latest/linear_offset.csv")
#cal2.display_cal_map()
#cal2.save_cal_map()
# Open the ADI TOF Camera
system = tof.System()
status = system.initialize()
print("system.initialize()", status)
cam_handle = device.open_device2(system)
eeproms = []
cam_handle.getEeproms(eeproms)
eeprom = eeproms[0]
print("\n\nWriting to EEPROM")
cal2.write_eeprom_cal_map(eeprom)
print("\n\nReading from EEPROM")
cal3 = cal_map()
cal3.read_eeprom_cal_map(eeprom)
cal3.save_cal_map("eeprom_read_map.bin")
with open ("eeprom_read_map.json", 'w') as f:
f.write(str(cal3.calibration_map))
f.close()
cal3.display_cal_map()
input("Press Enter to continue...")
cal3.replace_eeprom_mode('near', "./config/BM_Kit/Near/linear_cal.json", "./config/BM_Kit/Near/")
with open ("eeprom_read_map_modified.json", 'w') as f:
f.write(str(cal1.calibration_map))
f.close()
'''
Start point of program
---------------------------------
'''
if __name__ == "__main__":
setup_logging()
logger = logging.getLogger(__name__)
test_cal_eeprom() | tools/calibration-96tof1/cal_eeprom/cal_eeprom.py | from collections import namedtuple
import struct
import sys
from natsort import natsorted, ns
import re
import os
import pandas as pd
from . import firmware_gen as lf
import json
import aditofpython as tof
import tof_calib.device as device
import logging
import logging.config
import numpy as np
def setup_logging():
with open('./../logger.json', 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
'''
Predefined Hashmap key Defination
---------------------------------
'''
#Dictionary for modes
mode_dict = {'near' : 0, 'mid' : 1, 'far' : 2}
#Hashmap key for packet type
HEADER = 0
CAMERA_INTRINSIC = 1
NEAR_CAL = 2
NEAR_LF = 3
MID_CAL = 4
MID_LF = 5
FAR_CAL = 6
FAR_LF = 7
#Hashmap key for common parameters
EEPROM_VERSION = 1
CAL_SER_NUM = 2
CAL_DATE = 3
CHECKSUM = 4
#Hashmap key for Header Parameters
TOTAL_SIZE = 5
NUMBER_OF_MODES = 6
MODULE_TYPE = 11 #Value 2: BroadMarket/1: PICO/0 : ADI EVAL
AFE_TYPE = 13 #Value 0: ADDI9033 / 1:ADDI9043/ 2: ADDI9050
SENSOR_TYPE = 14 #Value 0 : Panasonic VGA / 1 : Panasonic QVGA
LASER_TYPE = 16 #Value 0 : Princeton VCSEL/ 1 : Heptagon VCSEL...
#Hashmap key for Camera Intrinsic
INTRINSIC = 5
DISTORTION_COEFFICIENTS = 6
#Hashmap for linear correct
ISATG_PROJECT_VERSION = 5
CALIBRATION_SOFTWARE_VERSION = 6
CALIBRATION_TYPE = 7 #Value 0 Sweep, 1: Rail, 2: Faceplant
CALIBRATION_MODE = 8 #Value 0:Near 1, 1 : Mid, 2 :Far
PULSE_COUNT = 11
NO_OF_LASERS = 12
LINEAR_CORRECT_OFFSET = 22
LINEAR_CORRECT_XPWR = 23
#Hashmap for load files
ADDR_DATA_LIST = 5
#Indices for PARAM STRUCT
SIZE = 0
VALUE = 1
'''
Class for managing the calibration map
Consist functions to:
generate calibration map
store calibration map binary to file
read calibration map from binary file
parse binary back to calibration map
display calibration map
---------------------------------
'''
class cal_map(object):
def __init__(self):
self.calibration_map = {}
header_packet = {
TOTAL_SIZE : self.param_struct([8]),
CHECKSUM : self.param_struct([8])
}
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
}
#calculates size of value and returns list[size, value]
def param_struct(self, param_value):
size = len(param_value) * 4 # len * 4(each element is float)
param_value = [int(size), [float(i) for i in param_value]]
return param_value
#calculates and returns size of packet
def get_packet_size(self, packet):
packet_size = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
packet_size = packet_size + param_size + 8 # added size 8 for key and size of each parameter
return int(packet_size)
#calculates and returns size of map
def get_map_size(self):
map_size = 0
for key, list_params in self.calibration_map.items():
size, nested_dict = list_params
map_size = map_size + size
map_size = map_size + 8 #Size of each key(4) and packet size(4) is added(4+4=8)
return map_size
def update_packet_checksum(self, packet):
checksum = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
for i in range (int(param_size/4)):
checksum = int(checksum) ^ int(param_value[i])
packet[CHECKSUM] = self.param_struct([checksum])
def update_map_header(self):
#Update Header Total Size
total_size = self.get_map_size()
self.calibration_map[HEADER][VALUE][TOTAL_SIZE] = self.param_struct([total_size])
#Update Header Checksum
self.update_packet_checksum(self.calibration_map[HEADER][VALUE])
#Generates Default Dictionary
def init_default_cal_map(self):
header_packet = {
EEPROM_VERSION : self.param_struct([0]),
TOTAL_SIZE : self.param_struct([1000]),
NUMBER_OF_MODES : self.param_struct([3]),
}
self.update_packet_checksum(header_packet)
camera_intrinsic_packet = {
EEPROM_VERSION : self.param_struct([0]),
CAL_SER_NUM : self.param_struct([0]),
CAL_DATE : self.param_struct([12042019]),
INTRINSIC : self.param_struct([0, 0, 0, 0, 0, 0, 0, 0, 0])
}
self.update_packet_checksum(camera_intrinsic_packet)
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
CAMERA_INTRINSIC : [self.get_packet_size(camera_intrinsic_packet), camera_intrinsic_packet]
}
#Update Header
self.update_map_header()
#Parses through dictionary and prints the key and value
def display_cal_map(self):
#Printing just the value of Calibration Dictionary
for key, list_params in self.calibration_map.items():
print ("Packet Key: ", (key),end="") # print the primary key (for Packet Type)
size, nested_dict = list_params
print ("\tPacket Size: ", size) #print the size of pimary packet
#print ("Packet Key: ", (key),"\tPacket Size: ", size, file=open("output.txt", "a"))
for nested_key,nested_value in nested_dict.items():
print("\tParam Key: ", nested_key,end="") #print the nested key (Parameter key)
param_size, param_value = nested_value
print("\tParam Size: ",param_size,end="") #print the size of Param
value = []
for i in range (int(param_size/4)):
value.append(param_value[i])
print("\tParam Value: ",value) #print the value of Param
#print("\tParam Key: ", nested_key,"\tParam Size: ",param_size,"\tParam Value: ",value, file=open("output.txt", "a")) #print the Param to file
#Generates the binary file for writing to EEPROM
def save_cal_map(self, filename):
#writing float values
f = open(filename,"wb")
for key, list_params in self.calibration_map.items():
f.write(struct.pack('<f', key) ) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
f.write(struct.pack('<f', size) ) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
f.write(struct.pack('<f',nested_key)) #write the nested key (Parameter key)
param_size, param_value = nested_value
f.write(struct.pack('<f',param_size)) #write the size of Param
for i in range (int(param_size/4)):
f.write(struct.pack('<f',param_value[i])) #write the value of Param
f.close()
'''Reads the binary file and parses it back to map,
replaces the value if already exist'''
def read_cal_map(self, filename):
#open the file
with open(filename,"rb") as f:
while True:
key = f.read(4)
if not key:
break
key = struct.unpack('<f', key)
key = int(key[0])
sub_packet_size = struct.unpack('<f', f.read(4))
sub_packet_size = int(sub_packet_size[0])
sub_packet_map = {}
i = 0
while i<(sub_packet_size/4): #4:size of float
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_key = sub_packet_value
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_size = sub_packet_value
number_of_elements = int(parameter_size/4) #4:size of float
value=[]
for j in range (number_of_elements):
sub_packet_value = struct.unpack('<f', f.read(4))
#sub_packet_value = int(sub_packet_value[0])
value.append(sub_packet_value[0])
i = i + 1
sub_packet_map.update({parameter_key: [parameter_size, value]})
self.calibration_map[key] = [sub_packet_size,sub_packet_map]
f.close()
#Add Load files to map, if existing map consist load files, it overwrites it, otherwise adds it
def add_load_files_to_map(self, packet_type, lf_path):
lf_map = {}
lf_list =[]
file_list = natsorted(os.listdir("./"+lf_path+"/"), alg=ns.IGNORECASE)[:13]
#print(file_list)
for file_name in file_list:
if file_name.endswith(".lf"):
addr, data, mode_locations = lf.extract_code_block("./"+lf_path+"/"+file_name)
for i in range(len(addr)) :
lf_list.append(addr[i])
lf_list.append(data[i])
#print("Parsed File", file_name, " ", file_num, "\n", lf_list)
#input("Press Enter to continue...")
lf_map[ADDR_DATA_LIST] = self.param_struct(lf_list)
#print(lf_map)
self.update_packet_checksum(lf_map)
self.calibration_map[packet_type] = [self.get_packet_size(lf_map), lf_map]
#Update Header
self.update_map_header()
def add_linear_offset_csv_to_map(self, packet_type, linear_offset_csv_file):
linear_df = pd.read_csv(linear_offset_csv_file)
linear_correct_offset_list = (linear_df.to_dict(orient='list')["reg_offset_value_hex"])
linear_correct_xpwr_list = (linear_df.to_dict(orient='list')["xcorr"][1:])
linear_map = {}
linear_map[LINEAR_CORRECT_OFFSET] = self.param_struct([int(i, 16) for i in linear_correct_offset_list])
linear_map[LINEAR_CORRECT_XPWR] = self.param_struct(linear_correct_xpwr_list)
self.calibration_map[packet_type] = [self.get_packet_size(linear_map), linear_map]
#Update Header
self. update_map_header()
def add_json_to_map(self, packet_type, json_file):
with open(json_file, 'r') as f:
json_read = json.load(f)
json_map = {}
for key,value in json_read.items():
for sub_key,sub_value in json_read[key].items():
if(type(sub_value) is list):
json_map[int(sub_key)] = self.param_struct(sub_value)
else:
json_map[int(sub_key)] = self.param_struct([sub_value])
self.update_packet_checksum(json_map)
self.calibration_map[packet_type] = [self.get_packet_size(json_map), json_map]
self.update_map_header()
#Function to replace calibration mode block
def replace_eeprom_mode(self, mode, linear_cal_json_file, load_file_path):
self.add_json_to_map((mode_dict[mode]*2+2), linear_cal_json_file)
self.add_load_files_to_map((mode_dict[mode]*2+3), load_file_path)
def write_eeprom_cal_map(self, eeprom):
#print("\n\nWriting EEPROM")
eeprom_write_bytearray = bytes()
for key, list_params in self.calibration_map.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', key)) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', size)) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', nested_key)) #write the nested key (Parameter key)
param_size, param_value = nested_value
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', param_size)) #write the size of Param
for i in range (int(param_size/4)):
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', param_value[i])) #write the value of Param
eeprom_map_size = [self.get_map_size()]
eeprom_write_list = []
size = eeprom_write_bytearray.__len__()
for index in range(0, size):
eeprom_write_list.append(eeprom_write_bytearray[index])
#print("EEPROM WRITE List\n", eeprom_write_list)
#input("Press Enter to continue...")
size_list = []
size_byte = bytes()
size_byte = struct.pack('<f', size)
for index in range(0, 4):
size_list.append(size_byte[index])
eeprom.write(int(0), np.array(size_list, dtype='uint8'), 4)
eeprom.write(int(4), np.array(eeprom_write_list, dtype='uint8'), eeprom_write_list.__len__())
def read_eeprom_cal_map(self, eeprom):
#print("Reading EEPROM")
data_array = np.zeros(4, dtype='uint8')
eeprom.read(int(0), data_array, 4)
read_size = struct.unpack('<f', data_array)
#print("Read Size",read_size)
data_array = np.zeros(int(read_size[0]), dtype='uint8')
eeprom.read(int(4), data_array, int(read_size[0]))
r_b = data_array.tobytes()
j = 0
while j<r_b.__len__():
key = <KEY>
j = j+4
if not key:
break
key = struct.unpack('<f', key)
key = int(key[0])
#print("Primary Key", key)
sub_packet_size = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_size = int(sub_packet_size[0])
#print("Sub Size",sub_packet_size)
sub_packet_map = {}
i = 0
while i<(sub_packet_size/4): #4:size of float
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_key = sub_packet_value
#print("Param Key", parameter_key)
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_size = sub_packet_value
#print("Param Size", parameter_size)
number_of_elements = int(parameter_size/4) #4:size of float
#print("Number of elements", number_of_elements)
value=[]
for k in range (number_of_elements):
#print(r_b[j:j+4])
sub_packet_value = struct.unpack('<f', r_b[j:j+4])
j=j+4
#sub_packet_value = int(sub_packet_value[0])
value.append(sub_packet_value[0])
i = i + 1
sub_packet_map.update({parameter_key: [parameter_size, value]})
self.calibration_map[key] = [sub_packet_size,sub_packet_map]
def flatten_cal_map(cal):
lst = []
for key, list_params in cal.items():
size, nested_dict = list_params
lst.append(key)
lst.append(size)
for nested_key,nested_value in nested_dict.items():
param_size, param_value = nested_value
lst.append(param_size)
for i in range (int(param_size/4)):
lst.append(param_value[i])
return lst
def compare_map(cal1, cal2):
lst1 = flatten_cal_map(cal1)
lst2 = flatten_cal_map(cal2)
ret = True
for i in range(len(lst1)):
if(lst1[i]-lst2[i] > 0.2):
#print(lst1[1], lst2[2])
ret = False
return ret
'''
Test function to:
Generate default map
Write map to binary file
Read map back from binary file
Add load files to map
Display the map
---------------------------------
'''
def test_cal_eeprom():
cal1 = cal_map()
cal1.init_default_cal_map()
print("Calibration map written to Bin File")
cal1.display_cal_map()
cal1.save_cal_map("calibration_map.bin")
cal2 = cal_map()
cal2.read_cal_map("calibration_map.bin")
print("\n\nCalibration map read back from Bin File")
cal2.display_cal_map()
input("Press Enter to continue...")
cal2.add_json_to_map(NEAR_CAL, "./linear_cal.json")
print("\n\nCalibration map after adding linear_cal.json to map")
cal2.display_cal_map()
input("Press Enter to continue...")
cal2.add_load_files_to_map(NEAR_LF, "../config/ADDI9043/")
print("\n\nCalibration map after adding load files to map")
cal2.display_cal_map()
print("\n\nSaving to 'calibration_map.bin'")
cal2.save_cal_map("caibration_map.bin")
input("Press Enter to continue...")
#cal2.add_linear_correct_offset(NEAR_CAL, "../saved_results/latest/linear_offset.csv")
#cal2.display_cal_map()
#cal2.save_cal_map()
# Open the ADI TOF Camera
system = tof.System()
status = system.initialize()
print("system.initialize()", status)
cam_handle = device.open_device2(system)
eeproms = []
cam_handle.getEeproms(eeproms)
eeprom = eeproms[0]
print("\n\nWriting to EEPROM")
cal2.write_eeprom_cal_map(eeprom)
print("\n\nReading from EEPROM")
cal3 = cal_map()
cal3.read_eeprom_cal_map(eeprom)
cal3.save_cal_map("eeprom_read_map.bin")
with open ("eeprom_read_map.json", 'w') as f:
f.write(str(cal3.calibration_map))
f.close()
cal3.display_cal_map()
input("Press Enter to continue...")
cal3.replace_eeprom_mode('near', "./config/BM_Kit/Near/linear_cal.json", "./config/BM_Kit/Near/")
with open ("eeprom_read_map_modified.json", 'w') as f:
f.write(str(cal1.calibration_map))
f.close()
'''
Start point of program
---------------------------------
'''
if __name__ == "__main__":
setup_logging()
logger = logging.getLogger(__name__)
test_cal_eeprom() | 0.289673 | 0.188903 |