code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Generated by Django 2.0.4 on 2018-06-23 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_adminprofile_businessprofile_eventprofile_participantprofile'),
]
operations = [
migrations.AlterField(
model_name='user',
name='user_type',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'paticipant'), (2, 'business'), (3, 'event'), (4, 'admin')], null=True),
),
]
|
[
"django.db.models.PositiveSmallIntegerField"
] |
[((380, 513), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'choices': "[(1, 'paticipant'), (2, 'business'), (3, 'event'), (4, 'admin')]", 'null': '(True)'}), "(blank=True, choices=[(1, 'paticipant'), (2,\n 'business'), (3, 'event'), (4, 'admin')], null=True)\n", (412, 513), False, 'from django.db import migrations, models\n')]
|
import sys
from argparse import ArgumentParser
if __name__ == "__main__":
argv = sys.argv[1:]
parser = ArgumentParser()
if len(argv) == 0:
parser.print_help()
parser.exit(1)
parser.add_argument("dataset_path", type=str,
help="Path to the directory containing the kit-mld dataset")
parser.add_argument("--gold_format", "-g", choices=["original", "seb", "csv", "answer"], type=str, nargs=1,
dest="gold_format",
help="Format of the gold annotations. original: path to the original dataset directory to use."
"seb: a file that contains the paths of the original dataset files included in a split. "
"csv: a file that contains the dataset as a dataframe", required=False, default=["csv"])
|
[
"argparse.ArgumentParser"
] |
[((112, 128), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (126, 128), False, 'from argparse import ArgumentParser\n')]
|
"""Tests related to embargoes of registrations"""
import datetime
import json
import mock
from nose.tools import * #noqa
from tests.base import fake, OsfTestCase
from tests.factories import (
AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory
)
from framework.exceptions import PermissionsError
from modularodm.exceptions import ValidationValueError
from website.exceptions import (
InvalidEmbargoDisapprovalToken, InvalidEmbargoApprovalToken, NodeStateError,
)
from website.models import Embargo, Node
from website.project.model import ensure_schemas
class RegistrationEmbargoModelsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoModelsTestCase, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(project=self.project)
self.embargo = EmbargoFactory(user=self.user)
self.valid_embargo_end_date = datetime.datetime.utcnow() + datetime.timedelta(days=3)
# Validator tests
def test_invalid_state_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.embargo.state = 'not a valid state'
self.embargo.save()
# Node#_initiate_embargo tests
def test__initiate_embargo_does_not_save_embargo(self):
initial_count = Embargo.find().count()
self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True
)
self.assertEqual(Embargo.find().count(), initial_count)
def test__initiate_embargo_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
self.registration.contributors.append(unconfirmed_user)
self.registration.add_permission(unconfirmed_user, 'admin', save=True)
assert_true(self.registration.has_permission(unconfirmed_user, 'admin'))
embargo = self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True
)
assert_true(self.user._id in embargo.approval_state)
assert_false(unconfirmed_user._id in embargo.approval_state)
def test__initiate_embargo_with_save_does_save_embargo(self):
initial_count = Embargo.find().count()
self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True,
save=True
)
self.assertEqual(Embargo.find().count(), initial_count + 1)
# Backref tests
def test_embargo_initiator_has_backref(self):
self.registration.embargo_registration(
self.user,
self.valid_embargo_end_date
)
self.registration.save()
self.registration.reload()
assert_equal(len(self.user.embargo__embargoed), 1)
# Node#embargo_registration tests
def test_embargo_from_non_admin_raises_PermissionsError(self):
self.registration.remove_permission(self.user, 'admin')
self.registration.save()
self.registration.reload()
with assert_raises(PermissionsError):
self.registration.embargo_registration(self.user, self.valid_embargo_end_date)
def test_embargo_end_date_in_past_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime(1999, 1, 1)
)
def test_embargo_end_date_today_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow()
)
def test_embargo_end_date_in_far_future_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime(2099, 1, 1)
)
def test_embargo_with_valid_end_date_starts_pending_embargo(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
def test_embargo_public_project_makes_private_pending_embargo(self):
self.registration.is_public = True
assert_true(self.registration.is_public)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
assert_false(self.registration.is_public)
def test_embargo_non_registration_raises_NodeStateError(self):
self.registration.is_registration = False
self.registration.save()
with assert_raises(NodeStateError):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
assert_false(self.registration.pending_embargo)
# Embargo#approve_embargo tests
def test_invalid_approval_token_raises_InvalidEmbargoApprovalToken(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
invalid_approval_token = 'not a real token'
with assert_raises(InvalidEmbargoApprovalToken):
self.registration.embargo.approve_embargo(self.user, invalid_approval_token)
assert_true(self.registration.pending_embargo)
assert_false(self.registration.embargo_end_date)
def test_non_admin_approval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
with assert_raises(PermissionsError):
self.registration.embargo.approve_embargo(non_admin, approval_token)
assert_true(self.registration.pending_embargo)
def test_one_approval_with_one_admin_embargoes(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.pending_embargo)
def test_approval_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
# Logs: Created, registered, embargo initiated, embargo approved
assert_equal(len(self.registration.registered_from.logs), initial_project_logs + 2)
def test_one_approval_with_two_admins_stays_pending(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
# First admin approves
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.pending_embargo)
num_of_approvals = sum([val['has_approved'] for val in self.registration.embargo.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Second admin approves
approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
self.registration.embargo.approve_embargo(admin2, approval_token)
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.pending_embargo)
num_of_approvals = sum([val['has_approved'] for val in self.registration.embargo.approval_state.values()])
assert_equal(num_of_approvals, 2)
# Embargo#disapprove_embargo tests
def test_invalid_disapproval_token_raises_InvalidEmbargoDisapprovalToken(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
with assert_raises(InvalidEmbargoDisapprovalToken):
self.registration.embargo.disapprove_embargo(self.user, fake.sentence())
assert_true(self.registration.pending_embargo)
assert_false(self.registration.embargo_end_date)
def test_non_admin_disapproval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
with assert_raises(PermissionsError):
self.registration.embargo.disapprove_embargo(non_admin, disapproval_token)
assert_true(self.registration.pending_embargo)
def test_one_disapproval_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
self.registration.embargo.disapprove_embargo(self.user, disapproval_token)
assert_equal(self.registration.embargo.state, Embargo.CANCELLED)
assert_false(self.registration.pending_embargo)
assert_false(self.registration.embargo_end_date)
def test_disapproval_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
registered_from = self.registration.registered_from
self.registration.embargo.disapprove_embargo(self.user, disapproval_token)
# Logs: Created, registered, embargo initiated, embargo cancelled
assert_equal(len(registered_from.logs), initial_project_logs + 2)
def test_cancelling_embargo_deletes_parent_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
self.registration.embargo.disapprove_embargo(self.user, disapproval_token)
assert_equal(self.registration.embargo.state, Embargo.CANCELLED)
assert_true(self.registration.is_deleted)
def test_cancelling_embargo_for_existing_registration_does_not_delete_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
self.registration.embargo.disapprove_embargo(self.user, disapproval_token)
assert_equal(self.registration.embargo.state, Embargo.CANCELLED)
assert_false(self.registration.is_deleted)
# Embargo property tests
def test_new_registration_is_pending_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_registration)
def test_existing_registration_is_not_pending_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_false(self.registration.pending_registration)
class RegistrationWithChildNodesEmbargoModelTestCase(OsfTestCase):
def setUp(self):
super(RegistrationWithChildNodesEmbargoModelTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.valid_embargo_end_date = datetime.datetime.utcnow() + datetime.timedelta(days=3)
self.project = ProjectFactory(title='Root', is_public=False, creator=self.user)
self.component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(project=self.project)
# Reload the registration; else tests won't catch failures to save
self.registration.reload()
def test_approval_embargoes_descendant_nodes(self):
# Initiate embargo for parent registration
self.registration.embargo_registration(
self.user,
self.valid_embargo_end_date
)
self.registration.save()
assert_true(self.registration.pending_embargo)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.pending_embargo)
# Approve parent registration's embargo
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.embargo.embargo_end_date)
# Ensure descendant nodes are in embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.embargo_end_date)
def test_disapproval_cancels_embargo_on_descendant_nodes(self):
# Initiate embargo on parent registration
self.registration.embargo_registration(
self.user,
self.valid_embargo_end_date
)
self.registration.save()
assert_true(self.registration.pending_embargo)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.pending_embargo)
# Disapprove parent registration's embargo
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
self.registration.embargo.disapprove_embargo(self.user, disapproval_token)
assert_false(self.registration.pending_embargo)
assert_false(self.registration.embargo_end_date)
assert_equal(self.registration.embargo.state, Embargo.CANCELLED)
# Ensure descendant nodes' embargoes are cancelled
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_false(node.pending_embargo)
assert_false(node.embargo_end_date)
class RegistrationEmbargoApprovalDisapprovalViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoApprovalDisapprovalViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registration = RegistrationFactory(creator=self.user)
# node_registration_embargo_approve tests
def test_GET_from_unauthorized_user_raises_HTTPForbidden(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=fake.sentence()),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
def test_GET_approve_registration_without_embargo_raises_HTTPBad_Request(self):
assert_false(self.registration.pending_embargo)
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=fake.sentence()),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_invalid_token_returns_HTTPBad_Request(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=fake.sentence()),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_wrong_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
wrong_approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=wrong_approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_wrong_admins_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
wrong_approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=wrong_approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_true(self.registration.pending_embargo)
assert_equal(res.status_code, 400)
def test_GET_approve_with_valid_token_returns_redirect(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_approve', token=approval_token),
auth=self.user.auth,
)
self.registration.embargo.reload()
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.pending_embargo)
assert_equal(res.status_code, 302)
# node_registration_embargo_disapprove tests
def test_GET_from_unauthorized_user_returns_HTTPForbidden(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_disapprove', token=fake.sentence()),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
def test_GET_disapprove_registration_without_embargo_HTTPBad_Request(self):
assert_false(self.registration.pending_embargo)
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_disapprove', token=fake.sentence()),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_invalid_token_returns_HTTPBad_Request(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_disapprove', token=fake.sentence()),
auth=self.user.auth,
expect_errors=True
)
self.registration.embargo.reload()
assert_true(self.registration.pending_embargo)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_wrong_admins_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.pending_embargo)
wrong_disapproval_token = self.registration.embargo.approval_state[admin2._id]['disapproval_token']
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_disapprove', token=wrong_disapproval_token),
auth=self.user.auth,
expect_errors=True
)
assert_true(self.registration.pending_embargo)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_valid_token_returns_redirect_to_parent(self):
project = ProjectFactory(creator=self.user)
registration = RegistrationFactory(project=project)
registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
registration.save()
assert_true(registration.pending_embargo)
disapproval_token = registration.embargo.approval_state[self.user._id]['disapproval_token']
res = self.app.get(
registration.web_url_for('node_registration_embargo_disapprove', token=disapproval_token),
auth=self.user.auth,
)
registration.embargo.reload()
assert_equal(registration.embargo.state, Embargo.CANCELLED)
assert_false(registration.embargo_end_date)
assert_false(registration.pending_embargo)
assert_equal(res.status_code, 302)
assert_true(project._id in res.location)
def test_GET_disapprove_for_existing_registration_with_valid_token_returns_redirect_to_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.pending_embargo)
disapproval_token = self.registration.embargo.approval_state[self.user._id]['disapproval_token']
res = self.app.get(
self.registration.web_url_for('node_registration_embargo_disapprove', token=disapproval_token),
auth=self.user.auth,
)
self.registration.embargo.reload()
assert_equal(self.registration.embargo.state, Embargo.CANCELLED)
assert_false(self.registration.embargo_end_date)
assert_false(self.registration.pending_embargo)
assert_equal(res.status_code, 302)
assert_true(self.registration._id in res.location)
class RegistrationEmbargoViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoViewsTestCase, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user)
current_month = datetime.datetime.now().strftime("%B")
current_year = datetime.datetime.now().strftime("%Y")
self.valid_make_public_payload = json.dumps({
u'embargoEndDate': u'Fri, 01, {month} {year} 00:00:00 GMT'.format(
month=current_month,
year=current_year
),
u'registrationChoice': 'immediate',
u'summary': unicode(fake.sentence())
})
valid_date = datetime.datetime.now() + datetime.timedelta(days=180)
self.valid_embargo_payload = json.dumps({
u'embargoEndDate': unicode(valid_date.strftime('%a, %d, %B %Y %H:%M:%S')) + u' GMT',
u'registrationChoice': 'embargo',
u'summary': unicode(fake.sentence())
})
self.invalid_embargo_date_payload = json.dumps({
u'embargoEndDate': u"Thu, 01 {month} {year} 05:00:00 GMT".format(
month=current_month,
year=str(int(current_year)-1)
),
u'registrationChoice': 'embargo',
u'summary': unicode(fake.sentence())
})
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_POST_register_make_public_immediately_creates_public_registration(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('node_register_template_page_post', template=u'Open-Ended_Registration'),
self.valid_make_public_payload,
content_type='application/json',
auth=self.user.auth
)
assert_equal(res.status_code, 201)
registration = Node.find().sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_true(registration.is_public)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_POST_register_make_public_immediately_makes_children_public(self, mock_enqueue):
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
subproject_component = NodeFactory(
creator=self.user,
parent=subproject,
title='Subcomponent'
)
res = self.app.post(
self.project.api_url_for('node_register_template_page_post', template=u'Open-Ended_Registration'),
self.valid_make_public_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
# Last node directly registered from self.project
registration = Node.load(self.project.node__registrations[-1])
assert_true(registration.is_public)
for node in registration.get_descendants_recursive():
assert_true(node.is_registration)
assert_true(node.is_public)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_POST_register_embargo_is_not_public(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('node_register_template_page_post', template=u'Open-Ended_Registration'),
self.valid_embargo_payload,
content_type='application/json',
auth=self.user.auth
)
assert_equal(res.status_code, 201)
registration = Node.find().sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_false(registration.is_public)
assert_true(registration.pending_registration)
assert_is_not_none(registration.embargo)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_POST_invalid_embargo_end_date_returns_HTTPBad_Request(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('node_register_template_page_post', template=u'Open-Ended_Registration'),
self.invalid_embargo_date_payload,
content_type='application/json',
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_valid_POST_embargo_adds_to_parent_projects_log(self, mock_enquque):
initial_project_logs = len(self.project.logs)
res = self.app.post(
self.project.api_url_for('node_register_template_page_post', template=u'Open-Ended_Registration'),
self.valid_embargo_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
# Logs: Created, registered, embargo initiated
assert_equal(len(self.project.logs), initial_project_logs + 1)
|
[
"tests.factories.NodeFactory",
"tests.factories.UnconfirmedUserFactory",
"tests.factories.UserFactory",
"website.models.Embargo.find",
"tests.factories.RegistrationFactory",
"tests.factories.ProjectFactory",
"mock.patch",
"datetime.datetime.now",
"website.project.model.ensure_schemas",
"datetime.datetime.utcnow",
"datetime.datetime",
"website.models.Node.find",
"datetime.timedelta",
"tests.base.fake.sentence",
"website.models.Node.load",
"tests.factories.AuthUserFactory",
"tests.factories.EmbargoFactory"
] |
[((26978, 27029), 'mock.patch', 'mock.patch', (['"""framework.tasks.handlers.enqueue_task"""'], {}), "('framework.tasks.handlers.enqueue_task')\n", (26988, 27029), False, 'import mock\n'), ((27609, 27660), 'mock.patch', 'mock.patch', (['"""framework.tasks.handlers.enqueue_task"""'], {}), "('framework.tasks.handlers.enqueue_task')\n", (27619, 27660), False, 'import mock\n'), ((28812, 28863), 'mock.patch', 'mock.patch', (['"""framework.tasks.handlers.enqueue_task"""'], {}), "('framework.tasks.handlers.enqueue_task')\n", (28822, 28863), False, 'import mock\n'), ((29515, 29566), 'mock.patch', 'mock.patch', (['"""framework.tasks.handlers.enqueue_task"""'], {}), "('framework.tasks.handlers.enqueue_task')\n", (29525, 29566), False, 'import mock\n'), ((30011, 30062), 'mock.patch', 'mock.patch', (['"""framework.tasks.handlers.enqueue_task"""'], {}), "('framework.tasks.handlers.enqueue_task')\n", (30021, 30062), False, 'import mock\n'), ((795, 808), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (806, 808), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((832, 865), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'self.user'}), '(creator=self.user)\n', (846, 865), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((894, 935), 'tests.factories.RegistrationFactory', 'RegistrationFactory', ([], {'project': 'self.project'}), '(project=self.project)\n', (913, 935), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((959, 989), 'tests.factories.EmbargoFactory', 'EmbargoFactory', ([], {'user': 'self.user'}), '(user=self.user)\n', (973, 989), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((1785, 1809), 'tests.factories.UnconfirmedUserFactory', 'UnconfirmedUserFactory', ([], {}), '()\n', (1807, 1809), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((6123, 6136), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (6134, 6136), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((7982, 7995), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (7993, 7995), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((9906, 9919), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (9917, 9919), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((13911, 13928), 'tests.factories.AuthUserFactory', 'AuthUserFactory', ([], {}), '()\n', (13926, 13928), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((14081, 14145), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'title': '"""Root"""', 'is_public': '(False)', 'creator': 'self.user'}), "(title='Root', is_public=False, creator=self.user)\n", (14095, 14145), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((14171, 14241), 'tests.factories.NodeFactory', 'NodeFactory', ([], {'creator': 'self.user', 'parent': 'self.project', 'title': '"""Component"""'}), "(creator=self.user, parent=self.project, title='Component')\n", (14182, 14241), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((14314, 14388), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'self.user', 'parent': 'self.project', 'title': '"""Subproject"""'}), "(creator=self.user, parent=self.project, title='Subproject')\n", (14328, 14388), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((14471, 14547), 'tests.factories.NodeFactory', 'NodeFactory', ([], {'creator': 'self.user', 'parent': 'self.subproject', 'title': '"""Subcomponent"""'}), "(creator=self.user, parent=self.subproject, title='Subcomponent')\n", (14482, 14547), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((14622, 14663), 'tests.factories.RegistrationFactory', 'RegistrationFactory', ([], {'project': 'self.project'}), '(project=self.project)\n', (14641, 14663), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((17188, 17205), 'tests.factories.AuthUserFactory', 'AuthUserFactory', ([], {}), '()\n', (17203, 17205), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((17234, 17272), 'tests.factories.RegistrationFactory', 'RegistrationFactory', ([], {'creator': 'self.user'}), '(creator=self.user)\n', (17253, 17272), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((17416, 17433), 'tests.factories.AuthUserFactory', 'AuthUserFactory', ([], {}), '()\n', (17431, 17433), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((18733, 18746), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (18744, 18746), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((19562, 19575), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (19573, 19575), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((21271, 21288), 'tests.factories.AuthUserFactory', 'AuthUserFactory', ([], {}), '()\n', (21286, 21288), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((22704, 22717), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (22715, 22717), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((23600, 23633), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'self.user'}), '(creator=self.user)\n', (23614, 23633), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((23657, 23693), 'tests.factories.RegistrationFactory', 'RegistrationFactory', ([], {'project': 'project'}), '(project=project)\n', (23676, 23693), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((25649, 25665), 'website.project.model.ensure_schemas', 'ensure_schemas', ([], {}), '()\n', (25663, 25665), False, 'from website.project.model import ensure_schemas\n'), ((25686, 25703), 'tests.factories.AuthUserFactory', 'AuthUserFactory', ([], {}), '()\n', (25701, 25703), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((25727, 25760), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'self.user'}), '(creator=self.user)\n', (25741, 25760), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((25789, 25849), 'tests.factories.RegistrationFactory', 'RegistrationFactory', ([], {'project': 'self.project', 'creator': 'self.user'}), '(project=self.project, creator=self.user)\n', (25808, 25849), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((27775, 27845), 'tests.factories.NodeFactory', 'NodeFactory', ([], {'creator': 'self.user', 'parent': 'self.project', 'title': '"""Component"""'}), "(creator=self.user, parent=self.project, title='Component')\n", (27786, 27845), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((27913, 27987), 'tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'self.user', 'parent': 'self.project', 'title': '"""Subproject"""'}), "(creator=self.user, parent=self.project, title='Subproject')\n", (27927, 27987), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((28065, 28136), 'tests.factories.NodeFactory', 'NodeFactory', ([], {'creator': 'self.user', 'parent': 'subproject', 'title': '"""Subcomponent"""'}), "(creator=self.user, parent=subproject, title='Subcomponent')\n", (28076, 28136), False, 'from tests.factories import AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory, RegistrationFactory, UserFactory, UnconfirmedUserFactory\n'), ((28566, 28613), 'website.models.Node.load', 'Node.load', (['self.project.node__registrations[-1]'], {}), '(self.project.node__registrations[-1])\n', (28575, 28613), False, 'from website.models import Embargo, Node\n'), ((1028, 1054), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1052, 1054), False, 'import datetime\n'), ((1057, 1083), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (1075, 1083), False, 'import datetime\n'), ((14002, 14028), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (14026, 14028), False, 'import datetime\n'), ((14031, 14057), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (14049, 14057), False, 'import datetime\n'), ((26325, 26348), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26346, 26348), False, 'import datetime\n'), ((26351, 26379), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(180)'}), '(days=180)\n', (26369, 26379), False, 'import datetime\n'), ((1424, 1438), 'website.models.Embargo.find', 'Embargo.find', ([], {}), '()\n', (1436, 1438), False, 'from website.models import Embargo, Node\n'), ((2428, 2442), 'website.models.Embargo.find', 'Embargo.find', ([], {}), '()\n', (2440, 2442), False, 'from website.models import Embargo, Node\n'), ((3617, 3646), 'datetime.datetime', 'datetime.datetime', (['(1999)', '(1)', '(1)'], {}), '(1999, 1, 1)\n', (3634, 3646), False, 'import datetime\n'), ((3878, 3904), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3902, 3904), False, 'import datetime\n'), ((4144, 4173), 'datetime.datetime', 'datetime.datetime', (['(2099)', '(1)', '(1)'], {}), '(2099, 1, 1)\n', (4161, 4173), False, 'import datetime\n'), ((4343, 4369), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4367, 4369), False, 'import datetime\n'), ((4372, 4399), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (4390, 4399), False, 'import datetime\n'), ((4747, 4773), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4771, 4773), False, 'import datetime\n'), ((4776, 4803), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (4794, 4803), False, 'import datetime\n'), ((5567, 5593), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5591, 5593), False, 'import datetime\n'), ((5596, 5623), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (5614, 5623), False, 'import datetime\n'), ((6220, 6246), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6244, 6246), False, 'import datetime\n'), ((6249, 6276), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (6267, 6276), False, 'import datetime\n'), ((6799, 6825), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6823, 6825), False, 'import datetime\n'), ((6828, 6855), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (6846, 6855), False, 'import datetime\n'), ((7459, 7485), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7483, 7485), False, 'import datetime\n'), ((7488, 7515), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (7506, 7515), False, 'import datetime\n'), ((8202, 8228), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8226, 8228), False, 'import datetime\n'), ((8231, 8258), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (8249, 8258), False, 'import datetime\n'), ((9401, 9427), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9425, 9427), False, 'import datetime\n'), ((9430, 9457), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (9448, 9457), False, 'import datetime\n'), ((9684, 9699), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (9697, 9699), False, 'from tests.base import fake, OsfTestCase\n'), ((10003, 10029), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10027, 10029), False, 'import datetime\n'), ((10032, 10059), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (10050, 10059), False, 'import datetime\n'), ((10588, 10614), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10612, 10614), False, 'import datetime\n'), ((10617, 10644), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (10635, 10644), False, 'import datetime\n'), ((11337, 11363), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11361, 11363), False, 'import datetime\n'), ((11366, 11393), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (11384, 11393), False, 'import datetime\n'), ((11985, 12011), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12009, 12011), False, 'import datetime\n'), ((12014, 12041), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (12032, 12041), False, 'import datetime\n'), ((12575, 12601), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12599, 12601), False, 'import datetime\n'), ((12604, 12631), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (12622, 12631), False, 'import datetime\n'), ((13206, 13232), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (13230, 13232), False, 'import datetime\n'), ((13235, 13262), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (13253, 13262), False, 'import datetime\n'), ((13520, 13546), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (13544, 13546), False, 'import datetime\n'), ((13549, 13576), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (13567, 13576), False, 'import datetime\n'), ((18238, 18264), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (18262, 18264), False, 'import datetime\n'), ((18267, 18294), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (18285, 18294), False, 'import datetime\n'), ((18953, 18979), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (18977, 18979), False, 'import datetime\n'), ((18982, 19009), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (19000, 19009), False, 'import datetime\n'), ((19782, 19808), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (19806, 19808), False, 'import datetime\n'), ((19811, 19838), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (19829, 19838), False, 'import datetime\n'), ((20498, 20524), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (20522, 20524), False, 'import datetime\n'), ((20527, 20554), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (20545, 20554), False, 'import datetime\n'), ((22098, 22124), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (22122, 22124), False, 'import datetime\n'), ((22127, 22154), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (22145, 22154), False, 'import datetime\n'), ((22924, 22950), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (22948, 22950), False, 'import datetime\n'), ((22953, 22980), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (22971, 22980), False, 'import datetime\n'), ((23772, 23798), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (23796, 23798), False, 'import datetime\n'), ((23801, 23828), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (23819, 23828), False, 'import datetime\n'), ((24688, 24714), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (24712, 24714), False, 'import datetime\n'), ((24717, 24744), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (24735, 24744), False, 'import datetime\n'), ((25875, 25898), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25896, 25898), False, 'import datetime\n'), ((25937, 25960), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25958, 25960), False, 'import datetime\n'), ((1634, 1648), 'website.models.Embargo.find', 'Embargo.find', ([], {}), '()\n', (1646, 1648), False, 'from website.models import Embargo, Node\n'), ((2661, 2675), 'website.models.Embargo.find', 'Embargo.find', ([], {}), '()\n', (2673, 2675), False, 'from website.models import Embargo, Node\n'), ((5242, 5268), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5266, 5268), False, 'import datetime\n'), ((5271, 5298), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (5289, 5298), False, 'import datetime\n'), ((17547, 17562), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (17560, 17562), False, 'from tests.base import fake, OsfTestCase\n'), ((17944, 17959), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (17957, 17959), False, 'from tests.base import fake, OsfTestCase\n'), ((18507, 18522), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (18520, 18522), False, 'from tests.base import fake, OsfTestCase\n'), ((21405, 21420), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (21418, 21420), False, 'from tests.base import fake, OsfTestCase\n'), ((21801, 21816), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (21814, 21816), False, 'from tests.base import fake, OsfTestCase\n'), ((22370, 22385), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (22383, 22385), False, 'from tests.base import fake, OsfTestCase\n'), ((26276, 26291), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (26289, 26291), False, 'from tests.base import fake, OsfTestCase\n'), ((26605, 26620), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (26618, 26620), False, 'from tests.base import fake, OsfTestCase\n'), ((26944, 26959), 'tests.base.fake.sentence', 'fake.sentence', ([], {}), '()\n', (26957, 26959), False, 'from tests.base import fake, OsfTestCase\n'), ((27468, 27479), 'website.models.Node.find', 'Node.find', ([], {}), '()\n', (27477, 27479), False, 'from website.models import Embargo, Node\n'), ((29269, 29280), 'website.models.Node.find', 'Node.find', ([], {}), '()\n', (29278, 29280), False, 'from website.models import Embargo, Node\n')]
|
import falcon
import simplejson as json
import mysql.connector
import config
import uuid
from core.useractivity import user_logger, access_control
class CombinedEquipmentCollection:
@staticmethod
def __init__():
""" Initializes CombinedEquipmentCollection"""
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name, uuid "
" FROM tbl_cost_centers ")
cursor.execute(query)
rows_cost_centers = cursor.fetchall()
cost_center_dict = dict()
if rows_cost_centers is not None and len(rows_cost_centers) > 0:
for row in rows_cost_centers:
cost_center_dict[row['id']] = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid, "
" is_input_counted, is_output_counted, "
" cost_center_id, description "
" FROM tbl_combined_equipments "
" ORDER BY id ")
cursor.execute(query)
rows_combined_equipments = cursor.fetchall()
result = list()
if rows_combined_equipments is not None and len(rows_combined_equipments) > 0:
for row in rows_combined_equipments:
cost_center = cost_center_dict.get(row['cost_center_id'], None)
meta_result = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid'],
"is_input_counted": bool(row['is_input_counted']),
"is_output_counted": bool(row['is_output_counted']),
"cost_center": cost_center,
"description": row['description'],
"qrcode": 'combinedequipment:' + row['uuid']}
result.append(meta_result)
cursor.close()
cnx.disconnect()
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_NAME')
name = str.strip(new_values['data']['name'])
if 'is_input_counted' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_input_counted'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_INPUT_COUNTED_VALUE')
is_input_counted = new_values['data']['is_input_counted']
if 'is_output_counted' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_output_counted'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_OUTPUT_COUNTED_VALUE')
is_output_counted = new_values['data']['is_output_counted']
if 'cost_center_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['cost_center_id'], int) or \
new_values['data']['cost_center_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COST_CENTER_ID')
cost_center_id = new_values['data']['cost_center_id']
if 'description' in new_values['data'].keys() and \
new_values['data']['description'] is not None and \
len(str(new_values['data']['description'])) > 0:
description = str.strip(new_values['data']['description'])
else:
description = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE name = %s ", (name,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.BAD_REQUEST',
description='API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE')
if cost_center_id is not None:
cursor.execute(" SELECT name "
" FROM tbl_cost_centers "
" WHERE id = %s ",
(new_values['data']['cost_center_id'],))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COST_CENTER_NOT_FOUND')
add_values = (" INSERT INTO tbl_combined_equipments "
" (name, uuid, is_input_counted, is_output_counted, "
" cost_center_id, description) "
" VALUES (%s, %s, %s, %s, %s, %s) ")
cursor.execute(add_values, (name,
str(uuid.uuid4()),
is_input_counted,
is_output_counted,
cost_center_id,
description))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(new_id)
class CombinedEquipmentItem:
@staticmethod
def __init__():
"""Initializes CombinedEquipmentItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name, uuid "
" FROM tbl_cost_centers ")
cursor.execute(query)
rows_cost_centers = cursor.fetchall()
cost_center_dict = dict()
if rows_cost_centers is not None and len(rows_cost_centers) > 0:
for row in rows_cost_centers:
cost_center_dict[row['id']] = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid, "
" is_input_counted, is_output_counted, "
" cost_center_id, description "
" FROM tbl_combined_equipments "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
cursor.close()
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
else:
cost_center = cost_center_dict.get(row['cost_center_id'], None)
meta_result = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid'],
"is_input_counted": bool(row['is_input_counted']),
"is_output_counted": bool(row['is_output_counted']),
"cost_center": cost_center,
"description": row['description'],
"qrcode": 'combinedequipment:' + row['uuid']}
resp.text = json.dumps(meta_result)
@staticmethod
@user_logger
def on_delete(req, resp, id_):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
# check relation with space
cursor.execute(" SELECT space_id "
" FROM tbl_spaces_combined_equipments "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_combined_equipments = cursor.fetchall()
if rows_combined_equipments is not None and len(rows_combined_equipments) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_SPACES')
# check relation with meter
cursor.execute(" SELECT meter_id "
" FROM tbl_combined_equipments_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_meters = cursor.fetchall()
if rows_meters is not None and len(rows_meters) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_METER')
# check relation with offline meter
cursor.execute(" SELECT offline_meter_id "
" FROM tbl_combined_equipments_offline_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_offline_meters = cursor.fetchall()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_OFFLINE_METER')
# check relation with virtual meter
cursor.execute(" SELECT virtual_meter_id "
" FROM tbl_combined_equipments_virtual_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_virtual_meters = cursor.fetchall()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_VIRTUAL_METER')
# delete all associated parameters
cursor.execute(" DELETE FROM tbl_combined_equipments_parameters WHERE combined_equipment_id = %s ", (id_,))
cnx.commit()
cursor.execute(" DELETE FROM tbl_combined_equipments WHERE id = %s ", (id_,))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
@staticmethod
@user_logger
def on_put(req, resp, id_):
"""Handles PUT requests"""
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_NAME')
name = str.strip(new_values['data']['name'])
if 'is_input_counted' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_input_counted'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_INPUT_COUNTED_VALUE')
is_input_counted = new_values['data']['is_input_counted']
if 'is_output_counted' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_output_counted'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_OUTPUT_COUNTED_VALUE')
is_output_counted = new_values['data']['is_output_counted']
if 'cost_center_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['cost_center_id'], int) or \
new_values['data']['cost_center_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COST_CENTER_ID')
cost_center_id = new_values['data']['cost_center_id']
if 'description' in new_values['data'].keys() and \
new_values['data']['description'] is not None and \
len(str(new_values['data']['description'])) > 0:
description = str.strip(new_values['data']['description'])
else:
description = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE name = %s AND id != %s ", (name, id_))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.BAD_REQUEST',
description='API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE')
cursor.execute(" SELECT name "
" FROM tbl_cost_centers "
" WHERE id = %s ",
(new_values['data']['cost_center_id'],))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COST_CENTER_NOT_FOUND')
update_row = (" UPDATE tbl_combined_equipments "
" SET name = %s, is_input_counted = %s, is_output_counted = %s, "
" cost_center_id = %s, description = %s "
" WHERE id = %s ")
cursor.execute(update_row, (name,
is_input_counted,
is_output_counted,
cost_center_id,
description,
id_))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_200
# Clone a Combined Equipment
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles PUT requests"""
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
new_values = json.loads(raw_json)
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT name, is_input_counted, is_output_counted, "
" cost_center_id, description "
" FROM tbl_combined_equipments "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
else:
add_values = (" INSERT INTO tbl_combined_equipments "
" (name, uuid, is_input_counted, is_output_counted, "
" cost_center_id, description) "
" VALUES (%s, %s, %s, %s, %s, %s) ")
cursor.execute(add_values, (row['name'] + ' Copy',
str(uuid.uuid4()),
row['is_input_counted'],
row['is_output_counted'],
row['cost_center_id'],
row['description']))
new_id = cursor.lastrowid
cnx.commit()
# clone relation with meter
cursor.execute(" SELECT meter_id, is_output "
" FROM tbl_combined_equipments_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_meters = cursor.fetchall()
if rows_meters is not None and len(rows_meters) > 0:
add_values = (" INSERT INTO tbl_combined_equipments_meters (combined_equipment_id, meter_id, is_output) "
" VALUES ")
for row in rows_meters:
add_values += " (" + str(new_id) + ","
add_values += str(row['meter_id']) + ","
add_values += str(bool(row['is_output'])) + "), "
# trim ", " at the end of string and then execute
cursor.execute(add_values[:-2])
cnx.commit()
# clone relation with offline meter
cursor.execute(" SELECT offline_meter_id, is_output "
" FROM tbl_combined_equipments_offline_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_offline_meters = cursor.fetchall()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
add_values = (" INSERT INTO tbl_combined_equipments_offline_meters "
" (combined_equipment_id, offline_meter_id, is_output) "
" VALUES ")
for row in rows_offline_meters:
add_values += " (" + str(new_id) + ","
add_values += "'" + str(row['offline_meter_id']) + "',"
add_values += str(bool(row['is_output'])) + "), "
# trim ", " at the end of string and then execute
cursor.execute(add_values[:-2])
cnx.commit()
# clone relation with virtual meter
cursor.execute(" SELECT virtual_meter_id, is_output "
" FROM tbl_combined_equipments_virtual_meters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_virtual_meters = cursor.fetchall()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
add_values = (" INSERT INTO tbl_combined_equipments_virtual_meters "
" (combined_equipment_id, virtual_meter_id, is_output) "
" VALUES ")
for row in rows_virtual_meters:
add_values += " (" + str(new_id) + ","
add_values += str(row['virtual_meter_id']) + ","
add_values += str(bool(row['is_output'])) + "), "
# trim ", " at the end of string and then execute
cursor.execute(add_values[:-2])
cnx.commit()
# clone parameters
cursor.execute(" SELECT name, parameter_type, constant, point_id, numerator_meter_uuid, denominator_meter_uuid "
" FROM tbl_combined_equipments_parameters "
" WHERE combined_equipment_id = %s ",
(id_,))
rows_parameters = cursor.fetchall()
if rows_parameters is not None and len(rows_parameters) > 0:
add_values = (" INSERT INTO tbl_combined_equipments_parameters"
" (combined_equipment_id, name, parameter_type, constant, point_id, "
" numerator_meter_uuid, denominator_meter_uuid) "
" VALUES ")
for row in rows_parameters:
add_values += " (" + str(new_id) + ","
add_values += "'" + str(row['name']) + "',"
add_values += "'" + str(row['parameter_type']) + "',"
if row['constant'] is not None:
add_values += "'" + str(row['constant']) + "',"
else:
add_values += "null, "
if row['point_id'] is not None:
add_values += str(row['point_id']) + ","
else:
add_values += "null, "
if row['numerator_meter_uuid'] is not None:
add_values += "'" + row['numerator_meter_uuid'] + "',"
else:
add_values += "null, "
if row['denominator_meter_uuid'] is not None:
add_values += "'" + row['denominator_meter_uuid'] + "'), "
else:
add_values += "null), "
# trim ", " at the end of string and then execute
cursor.execute(add_values[:-2])
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(new_id)
class CombinedEquipmentEquipmentCollection:
@staticmethod
def __init__():
"""Initializes CombinedEquipmentEquipmentCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT e.id, e.name, e.uuid "
" FROM tbl_combined_equipments c, tbl_combined_equipments_equipments ce, tbl_equipments e "
" WHERE ce.combined_equipment_id = c.id AND e.id = ce.equipment_id AND c.id = %s "
" ORDER BY e.id ")
cursor.execute(query, (id_,))
rows = cursor.fetchall()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0], "name": row[1], "uuid": row[2]}
result.append(meta_result)
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
new_values = json.loads(raw_json)
if 'equipment_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['equipment_id'], int) or \
new_values['data']['equipment_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EQUIPMENT_ID')
equipment_id = new_values['data']['equipment_id']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" from tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_equipments "
" WHERE id = %s ", (equipment_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.EQUIPMENT_NOT_FOUND')
query = (" SELECT id "
" FROM tbl_combined_equipments_equipments "
" WHERE combined_equipment_id = %s AND equipment_id = %s")
cursor.execute(query, (id_, equipment_id,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR',
description='API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_EXISTS')
add_row = (" INSERT INTO tbl_combined_equipments_equipments (combined_equipment_id, equipment_id) "
" VALUES (%s, %s) ")
cursor.execute(add_row, (id_, equipment_id,))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(id_) + '/equipments/' + str(equipment_id)
class CombinedEquipmentEquipmentItem:
@staticmethod
def __init__():
"""Initializes CombinedEquipmentEquipmentItem"""
pass
@staticmethod
def on_options(req, resp, id_, eid):
resp.status = falcon.HTTP_200
@staticmethod
@user_logger
def on_delete(req, resp, id_, eid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not eid.isdigit() or int(eid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_equipments "
" WHERE id = %s ", (eid,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT id "
" FROM tbl_combined_equipments_equipments "
" WHERE combined_equipment_id = %s AND equipment_id = %s ", (id_, eid))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_combined_equipments_equipments "
" WHERE combined_equipment_id = %s AND equipment_id = %s ", (id_, eid))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
class CombinedEquipmentParameterCollection:
@staticmethod
def __init__():
"""Initializes CombinedEquipmentParameterCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT id, name "
" FROM tbl_points ")
cursor.execute(query)
rows_points = cursor.fetchall()
point_dict = dict()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_dict[row['id']] = {"id": row['id'],
"name": row['name']}
query = (" SELECT id, name, uuid "
" FROM tbl_meters ")
cursor.execute(query)
rows_meters = cursor.fetchall()
meter_dict = dict()
if rows_meters is not None and len(rows_meters) > 0:
for row in rows_meters:
meter_dict[row['uuid']] = {"type": 'meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_offline_meters ")
cursor.execute(query)
rows_offline_meters = cursor.fetchall()
offline_meter_dict = dict()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
for row in rows_offline_meters:
offline_meter_dict[row['uuid']] = {"type": 'offline_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_virtual_meters ")
cursor.execute(query)
rows_virtual_meters = cursor.fetchall()
virtual_meter_dict = dict()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
for row in rows_virtual_meters:
virtual_meter_dict[row['uuid']] = {"type": 'virtual_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, parameter_type, "
" constant, point_id, numerator_meter_uuid, denominator_meter_uuid "
" FROM tbl_combined_equipments_parameters "
" WHERE combined_equipment_id = %s "
" ORDER BY id ")
cursor.execute(query, (id_, ))
rows_parameters = cursor.fetchall()
result = list()
if rows_parameters is not None and len(rows_parameters) > 0:
for row in rows_parameters:
constant = None
point = None
numerator_meter = None
denominator_meter = None
if row['parameter_type'] == 'point':
point = point_dict.get(row['point_id'], None)
constant = None
numerator_meter = None
denominator_meter = None
elif row['parameter_type'] == 'constant':
constant = row['constant']
point = None
numerator_meter = None
denominator_meter = None
elif row['parameter_type'] == 'fraction':
constant = None
point = None
# find numerator meter by uuid
numerator_meter = meter_dict.get(row['numerator_meter_uuid'], None)
if numerator_meter is None:
numerator_meter = virtual_meter_dict.get(row['numerator_meter_uuid'], None)
if numerator_meter is None:
numerator_meter = offline_meter_dict.get(row['numerator_meter_uuid'], None)
# find denominator meter by uuid
denominator_meter = meter_dict.get(row['denominator_meter_uuid'], None)
if denominator_meter is None:
denominator_meter = virtual_meter_dict.get(row['denominator_meter_uuid'], None)
if denominator_meter is None:
denominator_meter = offline_meter_dict.get(row['denominator_meter_uuid'], None)
meta_result = {"id": row['id'],
"name": row['name'],
"parameter_type": row['parameter_type'],
"constant": constant,
"point": point,
"numerator_meter": numerator_meter,
"denominator_meter": denominator_meter}
result.append(meta_result)
cursor.close()
cnx.disconnect()
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME')
name = str.strip(new_values['data']['name'])
if 'parameter_type' not in new_values['data'].keys() or \
not isinstance(new_values['data']['parameter_type'], str) or \
len(str.strip(new_values['data']['parameter_type'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')
parameter_type = str.strip(new_values['data']['parameter_type'])
if parameter_type not in ('constant', 'point', 'fraction'):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')
constant = None
if 'constant' in new_values['data'].keys():
if new_values['data']['constant'] is not None and \
isinstance(new_values['data']['constant'], str) and \
len(str.strip(new_values['data']['constant'])) > 0:
constant = str.strip(new_values['data']['constant'])
point_id = None
if 'point_id' in new_values['data'].keys():
if new_values['data']['point_id'] is not None and \
new_values['data']['point_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_POINT_ID')
point_id = new_values['data']['point_id']
numerator_meter_uuid = None
if 'numerator_meter_uuid' in new_values['data'].keys():
if new_values['data']['numerator_meter_uuid'] is not None and \
isinstance(new_values['data']['numerator_meter_uuid'], str) and \
len(str.strip(new_values['data']['numerator_meter_uuid'])) > 0:
numerator_meter_uuid = str.strip(new_values['data']['numerator_meter_uuid'])
denominator_meter_uuid = None
if 'denominator_meter_uuid' in new_values['data'].keys():
if new_values['data']['denominator_meter_uuid'] is not None and \
isinstance(new_values['data']['denominator_meter_uuid'], str) and \
len(str.strip(new_values['data']['denominator_meter_uuid'])) > 0:
denominator_meter_uuid = str.strip(new_values['data']['denominator_meter_uuid'])
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments_parameters "
" WHERE name = %s AND combined_equipment_id = %s ", (name, id_))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE')
# validate by parameter type
if parameter_type == 'point':
if point_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_POINT_ID')
query = (" SELECT id, name "
" FROM tbl_points "
" WHERE id = %s ")
cursor.execute(query, (point_id, ))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.POINT_NOT_FOUND')
elif parameter_type == 'constant':
if constant is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_CONSTANT_VALUE')
elif parameter_type == 'fraction':
query = (" SELECT id, name, uuid "
" FROM tbl_meters ")
cursor.execute(query)
rows_meters = cursor.fetchall()
meter_dict = dict()
if rows_meters is not None and len(rows_meters) > 0:
for row in rows_meters:
meter_dict[row['uuid']] = {"type": 'meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_offline_meters ")
cursor.execute(query)
rows_offline_meters = cursor.fetchall()
offline_meter_dict = dict()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
for row in rows_offline_meters:
offline_meter_dict[row['uuid']] = {"type": 'offline_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_virtual_meters ")
cursor.execute(query)
rows_virtual_meters = cursor.fetchall()
virtual_meter_dict = dict()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
for row in rows_virtual_meters:
virtual_meter_dict[row['uuid']] = {"type": 'virtual_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
# validate numerator meter uuid
if meter_dict.get(numerator_meter_uuid) is None and \
virtual_meter_dict.get(numerator_meter_uuid) is None and \
offline_meter_dict.get(numerator_meter_uuid) is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_NUMERATOR_METER_UUID')
# validate denominator meter uuid
if denominator_meter_uuid == numerator_meter_uuid:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DENOMINATOR_METER_UUID')
if denominator_meter_uuid not in meter_dict and \
denominator_meter_uuid not in virtual_meter_dict and \
denominator_meter_uuid not in offline_meter_dict:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DENOMINATOR_METER_UUID')
add_values = (" INSERT INTO tbl_combined_equipments_parameters "
" (combined_equipment_id, name, parameter_type, constant, "
" point_id, numerator_meter_uuid, denominator_meter_uuid) "
" VALUES (%s, %s, %s, %s, %s, %s, %s) ")
cursor.execute(add_values, (id_,
name,
parameter_type,
constant,
point_id,
numerator_meter_uuid,
denominator_meter_uuid))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(id_) + 'parameters/' + str(new_id)
class CombinedEquipmentParameterItem:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentParameterItem"""
pass
@staticmethod
def on_options(req, resp, id_, pid):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_, pid):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not pid.isdigit() or int(pid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name "
" FROM tbl_points ")
cursor.execute(query)
rows_points = cursor.fetchall()
point_dict = dict()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_dict[row['id']] = {"id": row['id'],
"name": row['name']}
query = (" SELECT id, name, uuid "
" FROM tbl_meters ")
cursor.execute(query)
rows_meters = cursor.fetchall()
meter_dict = dict()
if rows_meters is not None and len(rows_meters) > 0:
for row in rows_meters:
meter_dict[row['uuid']] = {"type": 'meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_offline_meters ")
cursor.execute(query)
rows_offline_meters = cursor.fetchall()
offline_meter_dict = dict()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
for row in rows_offline_meters:
offline_meter_dict[row['uuid']] = {"type": 'offline_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_virtual_meters ")
cursor.execute(query)
rows_virtual_meters = cursor.fetchall()
virtual_meter_dict = dict()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
for row in rows_virtual_meters:
virtual_meter_dict[row['uuid']] = {"type": 'virtual_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, parameter_type, "
" constant, point_id, numerator_meter_uuid, denominator_meter_uuid "
" FROM tbl_combined_equipments_parameters "
" WHERE combined_equipment_id = %s AND id = %s ")
cursor.execute(query, (id_, pid))
row = cursor.fetchone()
cursor.close()
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')
else:
constant = None
point = None
numerator_meter = None
denominator_meter = None
if row['parameter_type'] == 'point':
point = point_dict.get(row['point_id'], None)
constant = None
numerator_meter = None
denominator_meter = None
elif row['parameter_type'] == 'constant':
constant = row['constant']
point = None
numerator_meter = None
denominator_meter = None
elif row['parameter_type'] == 'fraction':
constant = None
point = None
# find numerator meter by uuid
numerator_meter = meter_dict.get(row['numerator_meter_uuid'], None)
if numerator_meter is None:
numerator_meter = virtual_meter_dict.get(row['numerator_meter_uuid'], None)
if numerator_meter is None:
numerator_meter = offline_meter_dict.get(row['numerator_meter_uuid'], None)
# find denominator meter by uuid
denominator_meter = meter_dict.get(row['denominator_meter_uuid'], None)
if denominator_meter is None:
denominator_meter = virtual_meter_dict.get(row['denominator_meter_uuid'], None)
if denominator_meter is None:
denominator_meter = offline_meter_dict.get(row['denominator_meter_uuid'], None)
meta_result = {"id": row['id'],
"name": row['name'],
"parameter_type": row['parameter_type'],
"constant": constant,
"point": point,
"numerator_meter": numerator_meter,
"denominator_meter": denominator_meter}
resp.text = json.dumps(meta_result)
@staticmethod
@user_logger
def on_delete(req, resp, id_, pid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not pid.isdigit() or int(pid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ",
(id_,))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments_parameters "
" WHERE combined_equipment_id = %s AND id = %s ",
(id_, pid,))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')
cursor.execute(" DELETE FROM tbl_combined_equipments_parameters "
" WHERE id = %s ", (pid, ))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
@staticmethod
@user_logger
def on_put(req, resp, id_, pid):
"""Handles PUT requests"""
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not pid.isdigit() or int(pid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME')
name = str.strip(new_values['data']['name'])
if 'parameter_type' not in new_values['data'].keys() or \
not isinstance(new_values['data']['parameter_type'], str) or \
len(str.strip(new_values['data']['parameter_type'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')
parameter_type = str.strip(new_values['data']['parameter_type'])
if parameter_type not in ('constant', 'point', 'fraction'):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')
constant = None
if 'constant' in new_values['data'].keys():
if new_values['data']['constant'] is not None and \
isinstance(new_values['data']['constant'], str) and \
len(str.strip(new_values['data']['constant'])) > 0:
constant = str.strip(new_values['data']['constant'])
point_id = None
if 'point_id' in new_values['data'].keys():
if new_values['data']['point_id'] is not None and \
new_values['data']['point_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_POINT_ID')
point_id = new_values['data']['point_id']
numerator_meter_uuid = None
if 'numerator_meter_uuid' in new_values['data'].keys():
if new_values['data']['numerator_meter_uuid'] is not None and \
isinstance(new_values['data']['numerator_meter_uuid'], str) and \
len(str.strip(new_values['data']['numerator_meter_uuid'])) > 0:
numerator_meter_uuid = str.strip(new_values['data']['numerator_meter_uuid'])
denominator_meter_uuid = None
if 'denominator_meter_uuid' in new_values['data'].keys():
if new_values['data']['denominator_meter_uuid'] is not None and \
isinstance(new_values['data']['denominator_meter_uuid'], str) and \
len(str.strip(new_values['data']['denominator_meter_uuid'])) > 0:
denominator_meter_uuid = str.strip(new_values['data']['denominator_meter_uuid'])
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments_parameters "
" WHERE combined_equipment_id = %s AND id = %s ",
(id_, pid,))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments_parameters "
" WHERE name = %s AND combined_equipment_id = %s AND id != %s ", (name, id_, pid))
row = cursor.fetchone()
if row is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE')
# validate by parameter type
if parameter_type == 'point':
if point_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_POINT_ID')
query = (" SELECT id, name "
" FROM tbl_points "
" WHERE id = %s ")
cursor.execute(query, (point_id, ))
row = cursor.fetchone()
if row is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.POINT_NOT_FOUND')
elif parameter_type == 'constant':
if constant is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_CONSTANT_VALUE')
elif parameter_type == 'fraction':
query = (" SELECT id, name, uuid "
" FROM tbl_meters ")
cursor.execute(query)
rows_meters = cursor.fetchall()
meter_dict = dict()
if rows_meters is not None and len(rows_meters) > 0:
for row in rows_meters:
meter_dict[row['uuid']] = {"type": 'meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_offline_meters ")
cursor.execute(query)
rows_offline_meters = cursor.fetchall()
offline_meter_dict = dict()
if rows_offline_meters is not None and len(rows_offline_meters) > 0:
for row in rows_offline_meters:
offline_meter_dict[row['uuid']] = {"type": 'offline_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT id, name, uuid "
" FROM tbl_virtual_meters ")
cursor.execute(query)
rows_virtual_meters = cursor.fetchall()
virtual_meter_dict = dict()
if rows_virtual_meters is not None and len(rows_virtual_meters) > 0:
for row in rows_virtual_meters:
virtual_meter_dict[row['uuid']] = {"type": 'virtual_meter',
"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
# validate numerator meter uuid
if meter_dict.get(numerator_meter_uuid) is None and \
virtual_meter_dict.get(numerator_meter_uuid) is None and \
offline_meter_dict.get(numerator_meter_uuid) is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_NUMERATOR_METER_UUID')
# validate denominator meter uuid
if denominator_meter_uuid == numerator_meter_uuid:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DENOMINATOR_METER_UUID')
if denominator_meter_uuid not in meter_dict and \
denominator_meter_uuid not in virtual_meter_dict and \
denominator_meter_uuid not in offline_meter_dict:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DENOMINATOR_METER_UUID')
add_values = (" UPDATE tbl_combined_equipments_parameters "
" SET name = %s , parameter_type = %s, constant = %s, "
" point_id = %s, numerator_meter_uuid = %s, denominator_meter_uuid =%s "
" WHERE id = %s ")
cursor.execute(add_values, (name,
parameter_type,
constant,
point_id,
numerator_meter_uuid,
denominator_meter_uuid,
pid))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_200
class CombinedEquipmentMeterCollection:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentMeterCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT id, name, uuid "
" FROM tbl_energy_categories ")
cursor.execute(query)
rows_energy_categories = cursor.fetchall()
energy_category_dict = dict()
if rows_energy_categories is not None and len(rows_energy_categories) > 0:
for row in rows_energy_categories:
energy_category_dict[row['id']] = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT m.id, m.name, m.uuid, m.energy_category_id, em.is_output "
" FROM tbl_combined_equipments e, tbl_combined_equipments_meters em, tbl_meters m "
" WHERE em.combined_equipment_id = e.id AND m.id = em.meter_id AND e.id = %s "
" ORDER BY m.id ")
cursor.execute(query, (id_,))
rows = cursor.fetchall()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
energy_category = energy_category_dict.get(row['energy_category_id'], None)
meta_result = {"id": row['id'], "name": row['name'], "uuid": row['uuid'],
"energy_category": energy_category,
"is_output": bool(row['is_output'])}
result.append(meta_result)
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
new_values = json.loads(raw_json)
if 'meter_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['meter_id'], int) or \
new_values['data']['meter_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_METER_ID')
meter_id = new_values['data']['meter_id']
if 'is_output' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_output'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_OUTPUT_VALUE')
is_output = new_values['data']['is_output']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" from tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_meters "
" WHERE id = %s ", (meter_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.METER_NOT_FOUND')
query = (" SELECT id "
" FROM tbl_combined_equipments_meters "
" WHERE combined_equipment_id = %s AND meter_id = %s")
cursor.execute(query, (id_, meter_id,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR',
description='API.COMBINED_EQUIPMENT_METER_RELATION_EXISTS')
add_row = (" INSERT INTO tbl_combined_equipments_meters (combined_equipment_id, meter_id, is_output ) "
" VALUES (%s, %s, %s) ")
cursor.execute(add_row, (id_, meter_id, is_output))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(id_) + '/meters/' + str(meter_id)
class CombinedEquipmentMeterItem:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentMeterItem"""
pass
@staticmethod
def on_options(req, resp, id_, mid):
resp.status = falcon.HTTP_200
@staticmethod
@user_logger
def on_delete(req, resp, id_, mid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not mid.isdigit() or int(mid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_METER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_meters "
" WHERE id = %s ", (mid,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.METER_NOT_FOUND')
cursor.execute(" SELECT id "
" FROM tbl_combined_equipments_meters "
" WHERE combined_equipment_id = %s AND meter_id = %s ", (id_, mid))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_METER_RELATION_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_combined_equipments_meters "
" WHERE combined_equipment_id = %s AND meter_id = %s ", (id_, mid))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
class CombinedEquipmentOfflineMeterCollection:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentOfflineMeterCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT id, name, uuid "
" FROM tbl_energy_categories ")
cursor.execute(query)
rows_energy_categories = cursor.fetchall()
energy_category_dict = dict()
if rows_energy_categories is not None and len(rows_energy_categories) > 0:
for row in rows_energy_categories:
energy_category_dict[row['id']] = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT m.id, m.name, m.uuid, m.energy_category_id, em.is_output "
" FROM tbl_combined_equipments e, tbl_combined_equipments_offline_meters em, tbl_offline_meters m "
" WHERE em.combined_equipment_id = e.id AND m.id = em.offline_meter_id AND e.id = %s "
" ORDER BY m.id ")
cursor.execute(query, (id_,))
rows = cursor.fetchall()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
energy_category = energy_category_dict.get(row['energy_category_id'], None)
meta_result = {"id": row['id'], "name": row['name'], "uuid": row['uuid'],
"energy_category": energy_category,
"is_output": bool(row['is_output'])}
result.append(meta_result)
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
new_values = json.loads(raw_json)
if 'offline_meter_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['offline_meter_id'], int) or \
new_values['data']['offline_meter_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_OFFLINE_METER_ID')
offline_meter_id = new_values['data']['offline_meter_id']
if 'is_output' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_output'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_OUTPUT_VALUE')
is_output = new_values['data']['is_output']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" from tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_offline_meters "
" WHERE id = %s ", (offline_meter_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.OFFLINE_METER_NOT_FOUND')
query = (" SELECT id "
" FROM tbl_combined_equipments_offline_meters "
" WHERE combined_equipment_id = %s AND offline_meter_id = %s")
cursor.execute(query, (id_, offline_meter_id,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR',
description='API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_EXISTS')
add_row = (" INSERT INTO tbl_combined_equipments_offline_meters "
" (combined_equipment_id, offline_meter_id, is_output ) "
" VALUES (%s, %s, %s) ")
cursor.execute(add_row, (id_, offline_meter_id, is_output))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(id_) + '/offlinemeters/' + str(offline_meter_id)
class CombinedEquipmentOfflineMeterItem:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentOfflineMeterItem"""
pass
@staticmethod
def on_options(req, resp, id_, mid):
resp.status = falcon.HTTP_200
@staticmethod
@user_logger
def on_delete(req, resp, id_, mid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not mid.isdigit() or int(mid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_OFFLINE_METER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_offline_meters "
" WHERE id = %s ", (mid,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.OFFLINE_METER_NOT_FOUND')
cursor.execute(" SELECT id "
" FROM tbl_combined_equipments_offline_meters "
" WHERE combined_equipment_id = %s AND offline_meter_id = %s ", (id_, mid))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_combined_equipments_offline_meters "
" WHERE combined_equipment_id = %s AND offline_meter_id = %s ", (id_, mid))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
class CombinedEquipmentVirtualMeterCollection:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentVirtualMeterCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
query = (" SELECT id, name, uuid "
" FROM tbl_energy_categories ")
cursor.execute(query)
rows_energy_categories = cursor.fetchall()
energy_category_dict = dict()
if rows_energy_categories is not None and len(rows_energy_categories) > 0:
for row in rows_energy_categories:
energy_category_dict[row['id']] = {"id": row['id'],
"name": row['name'],
"uuid": row['uuid']}
query = (" SELECT m.id, m.name, m.uuid, m.energy_category_id, em.is_output "
" FROM tbl_combined_equipments e, tbl_combined_equipments_virtual_meters em, tbl_virtual_meters m "
" WHERE em.combined_equipment_id = e.id AND m.id = em.virtual_meter_id AND e.id = %s "
" ORDER BY m.id ")
cursor.execute(query, (id_,))
rows = cursor.fetchall()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
energy_category = energy_category_dict.get(row['energy_category_id'], None)
meta_result = {"id": row['id'], "name": row['name'], "uuid": row['uuid'],
"energy_category": energy_category,
"is_output": bool(row['is_output'])}
result.append(meta_result)
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
new_values = json.loads(raw_json)
if 'virtual_meter_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['virtual_meter_id'], int) or \
new_values['data']['virtual_meter_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_VIRTUAL_METER_ID')
virtual_meter_id = new_values['data']['virtual_meter_id']
if 'is_output' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_output'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_OUTPUT_VALUE')
is_output = new_values['data']['is_output']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" from tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_virtual_meters "
" WHERE id = %s ", (virtual_meter_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.VIRTUAL_METER_NOT_FOUND')
query = (" SELECT id "
" FROM tbl_combined_equipments_virtual_meters "
" WHERE combined_equipment_id = %s AND virtual_meter_id = %s")
cursor.execute(query, (id_, virtual_meter_id,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR',
description='API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_EXISTS')
add_row = (" INSERT INTO tbl_combined_equipments_virtual_meters "
" (combined_equipment_id, virtual_meter_id, is_output ) "
" VALUES (%s, %s, %s) ")
cursor.execute(add_row, (id_, virtual_meter_id, is_output))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/combinedequipments/' + str(id_) + '/virtualmeters/' + str(virtual_meter_id)
class CombinedEquipmentVirtualMeterItem:
@staticmethod
def __init__():
""""Initializes CombinedEquipmentVirtualMeterItem"""
pass
@staticmethod
def on_options(req, resp, id_, mid):
resp.status = falcon.HTTP_200
@staticmethod
@user_logger
def on_delete(req, resp, id_, mid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_COMBINED_EQUIPMENT_ID')
if not mid.isdigit() or int(mid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_VIRTUAL_METER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_combined_equipments "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_virtual_meters "
" WHERE id = %s ", (mid,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.VIRTUAL_METER_NOT_FOUND')
cursor.execute(" SELECT id "
" FROM tbl_combined_equipments_virtual_meters "
" WHERE combined_equipment_id = %s AND virtual_meter_id = %s ", (id_, mid))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_combined_equipments_virtual_meters "
" WHERE combined_equipment_id = %s AND virtual_meter_id = %s ", (id_, mid))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
|
[
"uuid.uuid4",
"falcon.HTTPError",
"simplejson.dumps",
"core.useractivity.access_control",
"simplejson.loads"
] |
[((2253, 2271), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2263, 2271), True, 'import simplejson as json\n'), ((2380, 2399), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (2394, 2399), False, 'from core.useractivity import user_logger, access_control\n'), ((2611, 2631), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (2621, 2631), True, 'import simplejson as json\n'), ((8606, 8629), 'simplejson.dumps', 'json.dumps', (['meta_result'], {}), '(meta_result)\n', (8616, 8629), True, 'import simplejson as json\n'), ((8709, 8728), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (8723, 8728), False, 'from core.useractivity import user_logger, access_control\n'), ((12020, 12039), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (12034, 12039), False, 'from core.useractivity import user_logger, access_control\n'), ((12463, 12483), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (12473, 12483), True, 'import simplejson as json\n'), ((16568, 16587), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (16582, 16587), False, 'from core.useractivity import user_logger, access_control\n'), ((17011, 17031), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (17021, 17031), True, 'import simplejson as json\n'), ((25109, 25127), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (25119, 25127), True, 'import simplejson as json\n'), ((25241, 25260), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (25255, 25260), False, 'from core.useractivity import user_logger, access_control\n'), ((25685, 25705), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (25695, 25705), True, 'import simplejson as json\n'), ((28257, 28276), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (28271, 28276), False, 'from core.useractivity import user_logger, access_control\n'), ((36136, 36154), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (36146, 36154), True, 'import simplejson as json\n'), ((36268, 36287), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (36282, 36287), False, 'from core.useractivity import user_logger, access_control\n'), ((36707, 36727), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (36717, 36727), True, 'import simplejson as json\n'), ((50854, 50877), 'simplejson.dumps', 'json.dumps', (['meta_result'], {}), '(meta_result)\n', (50864, 50877), True, 'import simplejson as json\n'), ((50962, 50981), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (50976, 50981), False, 'from core.useractivity import user_logger, access_control\n'), ((52873, 52892), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (52887, 52892), False, 'from core.useractivity import user_logger, access_control\n'), ((53532, 53552), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (53542, 53552), True, 'import simplejson as json\n'), ((65063, 65081), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (65073, 65081), True, 'import simplejson as json\n'), ((65195, 65214), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (65209, 65214), False, 'from core.useractivity import user_logger, access_control\n'), ((65639, 65659), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (65649, 65659), True, 'import simplejson as json\n'), ((68498, 68517), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (68512, 68517), False, 'from core.useractivity import user_logger, access_control\n'), ((72995, 73013), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (73005, 73013), True, 'import simplejson as json\n'), ((73127, 73146), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (73141, 73146), False, 'from core.useractivity import user_logger, access_control\n'), ((73571, 73591), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (73581, 73591), True, 'import simplejson as json\n'), ((76610, 76629), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (76624, 76629), False, 'from core.useractivity import user_logger, access_control\n'), ((81171, 81189), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (81181, 81189), True, 'import simplejson as json\n'), ((81303, 81322), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (81317, 81322), False, 'from core.useractivity import user_logger, access_control\n'), ((81747, 81767), 'simplejson.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (81757, 81767), True, 'import simplejson as json\n'), ((84786, 84805), 'core.useractivity.access_control', 'access_control', (['req'], {}), '(req)\n', (84800, 84805), False, 'from core.useractivity import user_logger, access_control\n'), ((2841, 2955), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_NAME"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_NAME')\n", (2857, 2955), False, 'import falcon\n'), ((3204, 3317), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_INPUT_COUNTED_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_INPUT_COUNTED_VALUE')\n", (3220, 3317), False, 'import falcon\n'), ((3581, 3695), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_OUTPUT_COUNTED_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_OUTPUT_COUNTED_VALUE')\n", (3597, 3695), False, 'import falcon\n'), ((4017, 4122), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COST_CENTER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COST_CENTER_ID')\n", (4033, 4122), False, 'import falcon\n'), ((4885, 5009), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE"""'}), "(falcon.HTTP_404, title='API.BAD_REQUEST', description=\n 'API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE')\n", (4901, 5009), False, 'import falcon\n'), ((6687, 6799), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (6703, 6799), False, 'import falcon\n'), ((7867, 7976), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (7883, 7976), False, 'import falcon\n'), ((8794, 8906), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (8810, 8906), False, 'import falcon\n'), ((9481, 9593), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.THERE_IS_RELATION_WITH_SPACES"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.THERE_IS_RELATION_WITH_SPACES')\n", (9497, 9593), False, 'import falcon\n'), ((10069, 10180), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.THERE_IS_RELATION_WITH_METER"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.THERE_IS_RELATION_WITH_METER')\n", (10085, 10180), False, 'import falcon\n'), ((10704, 10823), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.THERE_IS_RELATION_WITH_OFFLINE_METER"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.THERE_IS_RELATION_WITH_OFFLINE_METER')\n", (10720, 10823), False, 'import falcon\n'), ((11347, 11466), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.THERE_IS_RELATION_WITH_VIRTUAL_METER"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.THERE_IS_RELATION_WITH_VIRTUAL_METER')\n", (11363, 11466), False, 'import falcon\n'), ((12105, 12217), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (12121, 12217), False, 'import falcon\n'), ((12693, 12807), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_NAME"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_NAME')\n", (12709, 12807), False, 'import falcon\n'), ((13056, 13169), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_INPUT_COUNTED_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_INPUT_COUNTED_VALUE')\n", (13072, 13169), False, 'import falcon\n'), ((13433, 13547), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_OUTPUT_COUNTED_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_OUTPUT_COUNTED_VALUE')\n", (13449, 13547), False, 'import falcon\n'), ((13869, 13974), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COST_CENTER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COST_CENTER_ID')\n", (13885, 13974), False, 'import falcon\n'), ((14730, 14839), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (14746, 14839), False, 'import falcon\n'), ((15152, 15276), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE"""'}), "(falcon.HTTP_404, title='API.BAD_REQUEST', description=\n 'API.COMBINED_EQUIPMENT_NAME_IS_ALREADY_IN_USE')\n", (15168, 15276), False, 'import falcon\n'), ((15632, 15734), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COST_CENTER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COST_CENTER_NOT_FOUND')\n", (15648, 15734), False, 'import falcon\n'), ((16653, 16765), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (16669, 16765), False, 'import falcon\n'), ((17399, 17508), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (17415, 17508), False, 'import falcon\n'), ((17866, 17975), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (17882, 17975), False, 'import falcon\n'), ((23865, 23977), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (23881, 23977), False, 'import falcon\n'), ((24361, 24470), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (24377, 24470), False, 'import falcon\n'), ((25520, 25632), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (25536, 25632), False, 'import falcon\n'), ((25923, 26026), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_EQUIPMENT_ID')\n", (25939, 26026), False, 'import falcon\n'), ((26468, 26577), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (26484, 26577), False, 'import falcon\n'), ((26866, 26966), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.EQUIPMENT_NOT_FOUND')\n", (26882, 26966), False, 'import falcon\n'), ((27335, 27456), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': '"""API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_EXISTS"""'}), "(falcon.HTTP_400, title='API.ERROR', description=\n 'API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_EXISTS')\n", (27351, 27456), False, 'import falcon\n'), ((28342, 28454), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (28358, 28454), False, 'import falcon\n'), ((28551, 28654), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_EQUIPMENT_ID')\n", (28567, 28654), False, 'import falcon\n'), ((29038, 29147), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (29054, 29147), False, 'import falcon\n'), ((29427, 29527), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.EQUIPMENT_NOT_FOUND')\n", (29443, 29527), False, 'import falcon\n'), ((29870, 29998), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_EQUIPMENT_RELATION_NOT_FOUND')\n", (29886, 29998), False, 'import falcon\n'), ((30677, 30789), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (30693, 30789), False, 'import falcon\n'), ((31188, 31297), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (31204, 31297), False, 'import falcon\n'), ((36353, 36465), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (36369, 36465), False, 'import falcon\n'), ((36937, 37061), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME')\n", (36953, 37061), False, 'import falcon\n'), ((37384, 37508), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')\n", (37400, 37508), False, 'import falcon\n'), ((37700, 37824), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')\n", (37716, 37824), False, 'import falcon\n'), ((39881, 39990), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_400, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (39897, 39990), False, 'import falcon\n'), ((40332, 40466), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE')\n", (40348, 40466), False, 'import falcon\n'), ((45705, 45817), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (45721, 45817), False, 'import falcon\n'), ((45914, 46036), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')\n", (45930, 46036), False, 'import falcon\n'), ((48764, 48896), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')\n", (48780, 48896), False, 'import falcon\n'), ((51047, 51159), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (51063, 51159), False, 'import falcon\n'), ((51256, 51378), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')\n", (51272, 51378), False, 'import falcon\n'), ((51803, 51912), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_400, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (51819, 51912), False, 'import falcon\n'), ((52324, 52456), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH"""'}), "(falcon.HTTP_400, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')\n", (52340, 52456), False, 'import falcon\n'), ((52958, 53070), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (52974, 53070), False, 'import falcon\n'), ((53167, 53289), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_ID')\n", (53183, 53289), False, 'import falcon\n'), ((53762, 53886), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_NAME')\n", (53778, 53886), False, 'import falcon\n'), ((54209, 54333), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')\n", (54225, 54333), False, 'import falcon\n'), ((54525, 54649), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_PARAMETER_TYPE')\n", (54541, 54649), False, 'import falcon\n'), ((56706, 56815), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_400, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (56722, 56815), False, 'import falcon\n'), ((57192, 57324), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH"""'}), "(falcon.HTTP_400, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_PARAMETER_NOT_FOUND_OR_NOT_MATCH')\n", (57208, 57324), False, 'import falcon\n'), ((57738, 57872), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.COMBINED_EQUIPMENT_PARAMETER_NAME_IS_ALREADY_IN_USE')\n", (57754, 57872), False, 'import falcon\n'), ((62985, 63097), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (63001, 63097), False, 'import falcon\n'), ((63496, 63605), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (63512, 63605), False, 'import falcon\n'), ((65474, 65586), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (65490, 65586), False, 'import falcon\n'), ((65865, 65964), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_METER_ID')\n", (65881, 65964), False, 'import falcon\n'), ((66196, 66302), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_OUTPUT_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_OUTPUT_VALUE')\n", (66212, 66302), False, 'import falcon\n'), ((66738, 66847), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (66754, 66847), False, 'import falcon\n'), ((67128, 67224), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.METER_NOT_FOUND')\n", (67144, 67224), False, 'import falcon\n'), ((67581, 67698), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': '"""API.COMBINED_EQUIPMENT_METER_RELATION_EXISTS"""'}), "(falcon.HTTP_400, title='API.ERROR', description=\n 'API.COMBINED_EQUIPMENT_METER_RELATION_EXISTS')\n", (67597, 67698), False, 'import falcon\n'), ((68583, 68695), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (68599, 68695), False, 'import falcon\n'), ((68792, 68891), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_METER_ID')\n", (68808, 68891), False, 'import falcon\n'), ((69275, 69384), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (69291, 69384), False, 'import falcon\n'), ((69660, 69756), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.METER_NOT_FOUND')\n", (69676, 69756), False, 'import falcon\n'), ((70091, 70215), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_METER_RELATION_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_METER_RELATION_NOT_FOUND')\n", (70107, 70215), False, 'import falcon\n'), ((70893, 71005), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (70909, 71005), False, 'import falcon\n'), ((71404, 71513), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (71420, 71513), False, 'import falcon\n'), ((73406, 73518), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (73422, 73518), False, 'import falcon\n'), ((73821, 73928), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_OFFLINE_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_OFFLINE_METER_ID')\n", (73837, 73928), False, 'import falcon\n'), ((74176, 74282), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_OUTPUT_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_OUTPUT_VALUE')\n", (74192, 74282), False, 'import falcon\n'), ((74718, 74827), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (74734, 74827), False, 'import falcon\n'), ((75124, 75228), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.OFFLINE_METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.OFFLINE_METER_NOT_FOUND')\n", (75140, 75228), False, 'import falcon\n'), ((75609, 75734), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': '"""API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_EXISTS"""'}), "(falcon.HTTP_400, title='API.ERROR', description=\n 'API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_EXISTS')\n", (75625, 75734), False, 'import falcon\n'), ((76695, 76807), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (76711, 76807), False, 'import falcon\n'), ((76904, 77011), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_OFFLINE_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_OFFLINE_METER_ID')\n", (76920, 77011), False, 'import falcon\n'), ((77395, 77504), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (77411, 77504), False, 'import falcon\n'), ((77788, 77892), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.OFFLINE_METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.OFFLINE_METER_NOT_FOUND')\n", (77804, 77892), False, 'import falcon\n'), ((78243, 78375), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_OFFLINE_METER_RELATION_NOT_FOUND')\n", (78259, 78375), False, 'import falcon\n'), ((79069, 79181), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (79085, 79181), False, 'import falcon\n'), ((79580, 79689), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (79596, 79689), False, 'import falcon\n'), ((81582, 81694), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (81598, 81694), False, 'import falcon\n'), ((81997, 82104), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_VIRTUAL_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_VIRTUAL_METER_ID')\n", (82013, 82104), False, 'import falcon\n'), ((82352, 82458), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_IS_OUTPUT_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_IS_OUTPUT_VALUE')\n", (82368, 82458), False, 'import falcon\n'), ((82894, 83003), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (82910, 83003), False, 'import falcon\n'), ((83300, 83404), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.VIRTUAL_METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.VIRTUAL_METER_NOT_FOUND')\n", (83316, 83404), False, 'import falcon\n'), ((83785, 83910), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': '"""API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_EXISTS"""'}), "(falcon.HTTP_400, title='API.ERROR', description=\n 'API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_EXISTS')\n", (83801, 83910), False, 'import falcon\n'), ((84871, 84983), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_COMBINED_EQUIPMENT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_COMBINED_EQUIPMENT_ID')\n", (84887, 84983), False, 'import falcon\n'), ((85080, 85187), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_VIRTUAL_METER_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_VIRTUAL_METER_ID')\n", (85096, 85187), False, 'import falcon\n'), ((85571, 85680), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_NOT_FOUND')\n", (85587, 85680), False, 'import falcon\n'), ((85964, 86068), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.VIRTUAL_METER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.VIRTUAL_METER_NOT_FOUND')\n", (85980, 86068), False, 'import falcon\n'), ((86419, 86551), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COMBINED_EQUIPMENT_VIRTUAL_METER_RELATION_NOT_FOUND')\n", (86435, 86551), False, 'import falcon\n'), ((2520, 2588), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.ERROR', description=ex)\n", (2536, 2588), False, 'import falcon\n'), ((5440, 5542), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_404'], {'title': '"""API.NOT_FOUND"""', 'description': '"""API.COST_CENTER_NOT_FOUND"""'}), "(falcon.HTTP_404, title='API.NOT_FOUND', description=\n 'API.COST_CENTER_NOT_FOUND')\n", (5456, 5542), False, 'import falcon\n'), ((12368, 12440), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (12384, 12440), False, 'import falcon\n'), ((16916, 16988), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (16932, 16988), False, 'import falcon\n'), ((25381, 25453), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (25397, 25453), False, 'import falcon\n'), ((36616, 36684), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.ERROR', description=ex)\n", (36632, 36684), False, 'import falcon\n'), ((38431, 38530), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_POINT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_POINT_ID')\n", (38447, 38530), False, 'import falcon\n'), ((40628, 40727), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_POINT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_POINT_ID')\n", (40644, 40727), False, 'import falcon\n'), ((41061, 41159), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.POINT_NOT_FOUND"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.POINT_NOT_FOUND')\n", (41077, 41159), False, 'import falcon\n'), ((53441, 53509), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.ERROR"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.ERROR', description=ex)\n", (53457, 53509), False, 'import falcon\n'), ((55256, 55355), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_POINT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_POINT_ID')\n", (55272, 55355), False, 'import falcon\n'), ((58034, 58133), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_POINT_ID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_POINT_ID')\n", (58050, 58133), False, 'import falcon\n'), ((58489, 58587), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.POINT_NOT_FOUND"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.POINT_NOT_FOUND')\n", (58505, 58587), False, 'import falcon\n'), ((65335, 65407), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (65351, 65407), False, 'import falcon\n'), ((73267, 73339), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (73283, 73339), False, 'import falcon\n'), ((81443, 81515), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.EXCEPTION"""', 'description': 'ex'}), "(falcon.HTTP_400, title='API.EXCEPTION', description=ex)\n", (81459, 81515), False, 'import falcon\n'), ((5919, 5931), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5929, 5931), False, 'import uuid\n'), ((41293, 41398), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_CONSTANT_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_CONSTANT_VALUE')\n", (41309, 41398), False, 'import falcon\n'), ((58721, 58826), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_CONSTANT_VALUE"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_CONSTANT_VALUE')\n", (58737, 58826), False, 'import falcon\n'), ((18403, 18415), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18413, 18415), False, 'import uuid\n'), ((43649, 43760), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_NUMERATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_NUMERATOR_METER_UUID')\n", (43665, 43760), False, 'import falcon\n'), ((43927, 44040), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_DENOMINATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_DENOMINATOR_METER_UUID')\n", (43943, 44040), False, 'import falcon\n'), ((44305, 44418), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_DENOMINATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_DENOMINATOR_METER_UUID')\n", (44321, 44418), False, 'import falcon\n'), ((61077, 61188), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_NUMERATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_NUMERATOR_METER_UUID')\n", (61093, 61188), False, 'import falcon\n'), ((61355, 61468), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_DENOMINATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_DENOMINATOR_METER_UUID')\n", (61371, 61468), False, 'import falcon\n'), ((61733, 61846), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_400'], {'title': '"""API.BAD_REQUEST"""', 'description': '"""API.INVALID_DENOMINATOR_METER_UUID"""'}), "(falcon.HTTP_400, title='API.BAD_REQUEST', description=\n 'API.INVALID_DENOMINATOR_METER_UUID')\n", (61749, 61846), False, 'import falcon\n')]
|
from .serializers import CategorySerializer, TaskSerializer, MemberSerializer, ProjectSerializer
from .models import Categories, Tasks, Members, Projects
from rest_framework import status
from rest_framework.parsers import JSONParser
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
@csrf_exempt
def ProjectAPI(request, id=0):
if request.method == 'GET':
project = Projects.objects.all()
serializer = ProjectSerializer(project, many=True)
return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)
elif request.method == 'POST':
project_data = JSONParser().parse(request)
serializer = ProjectSerializer(data=project_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Project Added Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Add Project', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PUT':
project_data = JSONParser().parse(request)
project = Projects.objects.get(ProjectID = project_data['ProjectID'])
serializer = ProjectSerializer(instance=project, data=project_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Project Updated Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Update Project', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
project = Projects.objects.get(ProjectID=id)
project.delete()
return JsonResponse('Project Successfully Deleted!', safe=False, status=status.HTTP_200_OK)
@csrf_exempt
def TaskAPI(request, id=0):
if request.method == 'GET':
task = Tasks.objects.all()
serializer = TaskSerializer(task, many=True)
return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)
elif request.method == 'POST':
task_data = JSONParser().parse(request)
serializer = TaskSerializer(data=task_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Task Added Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Add Task', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PUT':
task_data = JSONParser().parse(request)
task = Tasks.objects.get(TaskID = task_data['TaskID'])
serializer = TaskSerializer(instance=task, data=task_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Task Updated Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Update Task', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
task = Tasks.objects.get(TaskID=id)
task.delete()
return JsonResponse('Task Successfully Deleted!', safe=False, status=status.HTTP_200_OK)
@csrf_exempt
def CategoryAPI(request, id=0):
if request.method == 'GET':
category = Categories.objects.all()
serializer = CategorySerializer(category, many=True)
return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)
elif request.method == 'POST':
category_data = JSONParser().parse(request)
serializer = CategorySerializer(data=category_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Category Added Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Add Category', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PUT':
category_data = JSONParser().parse(request)
category = Categories.objects.get(CategoryID = category_data['CategoryID'])
serializer = CategorySerializer(instance=category, data=category_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Category Updated Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Update Category', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
category = Categories.objects.get(CategoryID=id)
category.delete()
return JsonResponse('Category Successfully Deleted!', safe=False, status=status.HTTP_200_OK)
@csrf_exempt
def MemberAPI(request, id=0):
if request.method == 'GET':
member = Members.objects.all()
serializer = MemberSerializer(member, many=True)
return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)
elif request.method == 'POST':
member_data = JSONParser().parse(request)
serializer = MemberSerializer(data=member_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Member Added Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Add Member', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PUT':
member_data = JSONParser().parse(request)
member = Members.objects.get(MemberID = member_data['MemberID'])
serializer = MemberSerializer(instance=member, data=member_data)
if serializer.is_valid():
serializer.save()
return JsonResponse('Member Updated Successfully!', safe=False, status=status.HTTP_201_CREATED)
return JsonResponse('Failed To Update Member', safe=False, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
member = Members.objects.get(MemberID=id)
member.delete()
return JsonResponse('Member Successfully Deleted!', safe=False, status=status.HTTP_200_OK)
|
[
"django.http.response.JsonResponse",
"rest_framework.parsers.JSONParser"
] |
[((551, 619), 'django.http.response.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), '(serializer.data, safe=False, status=status.HTTP_200_OK)\n', (563, 619), False, 'from django.http.response import JsonResponse\n'), ((1951, 2019), 'django.http.response.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), '(serializer.data, safe=False, status=status.HTTP_200_OK)\n', (1963, 2019), False, 'from django.http.response import JsonResponse\n'), ((3309, 3377), 'django.http.response.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), '(serializer.data, safe=False, status=status.HTTP_200_OK)\n', (3321, 3377), False, 'from django.http.response import JsonResponse\n'), ((4742, 4810), 'django.http.response.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), '(serializer.data, safe=False, status=status.HTTP_200_OK)\n', (4754, 4810), False, 'from django.http.response import JsonResponse\n'), ((955, 1045), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Add Project"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Add Project', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (967, 1045), False, 'from django.http.response import JsonResponse\n'), ((2343, 2430), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Add Task"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Add Task', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (2355, 2430), False, 'from django.http.response import JsonResponse\n'), ((3717, 3808), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Add Category"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Add Category', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (3729, 3808), False, 'from django.http.response import JsonResponse\n'), ((5142, 5231), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Add Member"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Add Member', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (5154, 5231), False, 'from django.http.response import JsonResponse\n'), ((852, 944), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Project Added Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Project Added Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (864, 944), False, 'from django.http.response import JsonResponse\n'), ((1469, 1562), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Update Project"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Update Project', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (1481, 1562), False, 'from django.http.response import JsonResponse\n'), ((2243, 2332), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Task Added Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Task Added Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (2255, 2332), False, 'from django.http.response import JsonResponse\n'), ((2824, 2914), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Update Task"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Update Task', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (2836, 2914), False, 'from django.http.response import JsonResponse\n'), ((3613, 3706), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Category Added Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Category Added Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (3625, 3706), False, 'from django.http.response import JsonResponse\n'), ((4243, 4337), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Update Category"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Update Category', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (4255, 4337), False, 'from django.http.response import JsonResponse\n'), ((5040, 5131), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Member Added Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Member Added Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (5052, 5131), False, 'from django.http.response import JsonResponse\n'), ((5645, 5737), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Failed To Update Member"""'], {'safe': '(False)', 'status': 'status.HTTP_400_BAD_REQUEST'}), "('Failed To Update Member', safe=False, status=status.\n HTTP_400_BAD_REQUEST)\n", (5657, 5737), False, 'from django.http.response import JsonResponse\n'), ((683, 695), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (693, 695), False, 'from rest_framework.parsers import JSONParser\n'), ((1364, 1458), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Project Updated Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Project Updated Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (1376, 1458), False, 'from django.http.response import JsonResponse\n'), ((1689, 1778), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Project Successfully Deleted!"""'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), "('Project Successfully Deleted!', safe=False, status=status.\n HTTP_200_OK)\n", (1701, 1778), False, 'from django.http.response import JsonResponse\n'), ((2080, 2092), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (2090, 2092), False, 'from rest_framework.parsers import JSONParser\n'), ((2722, 2813), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Task Updated Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Task Updated Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (2734, 2813), False, 'from django.http.response import JsonResponse\n'), ((3029, 3115), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Task Successfully Deleted!"""'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), "('Task Successfully Deleted!', safe=False, status=status.\n HTTP_200_OK)\n", (3041, 3115), False, 'from django.http.response import JsonResponse\n'), ((3442, 3454), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (3452, 3454), False, 'from rest_framework.parsers import JSONParser\n'), ((4137, 4232), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Category Updated Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Category Updated Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (4149, 4232), False, 'from django.http.response import JsonResponse\n'), ((4469, 4559), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Category Successfully Deleted!"""'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), "('Category Successfully Deleted!', safe=False, status=status.\n HTTP_200_OK)\n", (4481, 4559), False, 'from django.http.response import JsonResponse\n'), ((4873, 4885), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (4883, 4885), False, 'from rest_framework.parsers import JSONParser\n'), ((5541, 5634), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Member Updated Successfully!"""'], {'safe': '(False)', 'status': 'status.HTTP_201_CREATED'}), "('Member Updated Successfully!', safe=False, status=status.\n HTTP_201_CREATED)\n", (5553, 5634), False, 'from django.http.response import JsonResponse\n'), ((5860, 5948), 'django.http.response.JsonResponse', 'JsonResponse', (['"""Member Successfully Deleted!"""'], {'safe': '(False)', 'status': 'status.HTTP_200_OK'}), "('Member Successfully Deleted!', safe=False, status=status.\n HTTP_200_OK)\n", (5872, 5948), False, 'from django.http.response import JsonResponse\n'), ((1099, 1111), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (1109, 1111), False, 'from rest_framework.parsers import JSONParser\n'), ((2481, 2493), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (2491, 2493), False, 'from rest_framework.parsers import JSONParser\n'), ((3863, 3875), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (3873, 3875), False, 'from rest_framework.parsers import JSONParser\n'), ((5284, 5296), 'rest_framework.parsers.JSONParser', 'JSONParser', ([], {}), '()\n', (5294, 5296), False, 'from rest_framework.parsers import JSONParser\n')]
|
import kubernetes.config
import logging
import logging.config
from pengrixio.config import KUBECONFIG
logging.config.fileConfig('logging.conf')
log = logging.getLogger('pengrixio')
# load kubernetes config file.
try:
kubernetes.config.load_kube_config(KUBECONFIG)
except:
log.warn('kubernetes cluster config file is invalid.')
|
[
"logging.config.fileConfig",
"logging.getLogger"
] |
[((105, 146), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""logging.conf"""'], {}), "('logging.conf')\n", (130, 146), False, 'import logging\n'), ((153, 183), 'logging.getLogger', 'logging.getLogger', (['"""pengrixio"""'], {}), "('pengrixio')\n", (170, 183), False, 'import logging\n')]
|
import param
from panel import panel
from panel.reactive import ReactiveHTML
from panel.widgets import FileDownload
try:
# Backward compatibility for panel 0.12.6
import bokeh.core.properties as bp
from panel.links import PARAM_MAPPING
# The Bokeh Color property has `_default_help` set which causes
# an error to be raise when Nullable is called on it. This converter
# overrides the Bokeh _help to set it to None and avoid the error.
# See https://github.com/holoviz/panel/issues/3058
def color_param_to_ppt(p, kwargs):
ppt = bp.Color(**kwargs)
ppt._help = None
return ppt
PARAM_MAPPING[param.Color] = color_param_to_ppt
except Exception:
pass
class DownloadButton(ReactiveHTML):
callback = param.Callable(precedence=-1)
color = param.Color(default='grey', allow_None=True)
data = param.String()
filename = param.String()
hide = param.Boolean(default=False)
size = param.Integer(default=20)
_template = """
<style>
.download-button {
position: absolute;
top: 0px;
right: 0px;
width: {{ size }}px;
height: {{ size }}px;
z-index: 10000;
opacity: {% if hide %}0{% else %}1{% endif %};
transition-delay: 0.5s;
transition: 0.5s;
cursor: pointer;
font-size: {{ size }}px;
{% if color %}color: {{ color }};{% endif %}
}
.download-button:hover {
transition: 0.5s;
opacity: 1;
}
.download-button:focus {
opacity: 1;
}
</style>
<span id="download-button" onclick="${_on_click}" class="download-button">
<i class="fas fa-download"></i>
</span>
"""
_scripts = {
'data': """
if (data.data == null || !data.data.length)
return
const byteString = atob(data.data.split(',')[1]);
// separate out the mime component
const mimeString = data.data.split(',')[0].split(':')[1].split(';')[0];
// Reset data
data.data = '';
// write the bytes of the string to an ArrayBuffer
const ab = new ArrayBuffer(byteString.length);
const ia = new Uint8Array(ab);
for (let i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
// write the ArrayBuffer to a blob, and you're done
var bb = new Blob([ab], { type: mimeString });
var link = document.createElement('a');
link.href = URL.createObjectURL(bb)
link.download = data.filename
link.click()
"""
}
def __init__(self, object=None, **params):
params['sizing_mode'] = 'stretch_width'
if object is not None:
object = panel(object)
params['object'] = object
super().__init__(**params)
def _on_click(self, event=None):
file_input = FileDownload(callback=self.callback, filename=self.filename)
file_input._transfer()
self.data = file_input.data
class IconButton(ReactiveHTML):
disabled = param.Boolean(default=False)
color = param.Color(default=None)
icon = param.String(default=None, doc="""
The FontAwesome icon to use.""")
size = param.Integer(default=12, bounds=(0, None))
_template = """
<i id="icon-button" class="fas ${icon}" style="font-size: ${size}px; color: ${color}" onclick=${script('clicked')}></i>
"""
_scripts = {
'clicked': """
if (data.disabled)
return
data.disabled = true;
view._send_event('button', 'click', {target: {value: null}, type: 'icon_click'})
""",
'disabled': """
icon_button.style.cursor = data.disabled ? "not-allowed": "inherit";
"""
}
_event = 'dom_event'
def __init__(self, **params):
super().__init__(**params)
self._callbacks = []
@param.depends('size', watch=True, on_init=True)
def _update_height(self):
self.height = self.size
def on_click(self, callback):
self._callbacks.append(callback)
def js_on_click(self, args={}, code=""):
from panel.links import Callback
return Callback(self, code={'event:'+self._event: code}, args=args)
def _button_click(self, event=None):
try:
for cb in self._callbacks:
cb(event)
finally:
self.disabled = False
|
[
"param.Integer",
"param.depends",
"panel.links.Callback",
"param.Color",
"param.Boolean",
"bokeh.core.properties.Color",
"panel.panel",
"param.Callable",
"panel.widgets.FileDownload",
"param.String"
] |
[((767, 796), 'param.Callable', 'param.Callable', ([], {'precedence': '(-1)'}), '(precedence=-1)\n', (781, 796), False, 'import param\n'), ((810, 854), 'param.Color', 'param.Color', ([], {'default': '"""grey"""', 'allow_None': '(True)'}), "(default='grey', allow_None=True)\n", (821, 854), False, 'import param\n'), ((867, 881), 'param.String', 'param.String', ([], {}), '()\n', (879, 881), False, 'import param\n'), ((898, 912), 'param.String', 'param.String', ([], {}), '()\n', (910, 912), False, 'import param\n'), ((925, 953), 'param.Boolean', 'param.Boolean', ([], {'default': '(False)'}), '(default=False)\n', (938, 953), False, 'import param\n'), ((966, 991), 'param.Integer', 'param.Integer', ([], {'default': '(20)'}), '(default=20)\n', (979, 991), False, 'import param\n'), ((3081, 3109), 'param.Boolean', 'param.Boolean', ([], {'default': '(False)'}), '(default=False)\n', (3094, 3109), False, 'import param\n'), ((3123, 3148), 'param.Color', 'param.Color', ([], {'default': 'None'}), '(default=None)\n', (3134, 3148), False, 'import param\n'), ((3161, 3234), 'param.String', 'param.String', ([], {'default': 'None', 'doc': '"""\n The FontAwesome icon to use."""'}), '(default=None, doc="""\n The FontAwesome icon to use.""")\n', (3173, 3234), False, 'import param\n'), ((3247, 3290), 'param.Integer', 'param.Integer', ([], {'default': '(12)', 'bounds': '(0, None)'}), '(default=12, bounds=(0, None))\n', (3260, 3290), False, 'import param\n'), ((3923, 3970), 'param.depends', 'param.depends', (['"""size"""'], {'watch': '(True)', 'on_init': '(True)'}), "('size', watch=True, on_init=True)\n", (3936, 3970), False, 'import param\n'), ((570, 588), 'bokeh.core.properties.Color', 'bp.Color', ([], {}), '(**kwargs)\n', (578, 588), True, 'import bokeh.core.properties as bp\n'), ((2903, 2963), 'panel.widgets.FileDownload', 'FileDownload', ([], {'callback': 'self.callback', 'filename': 'self.filename'}), '(callback=self.callback, filename=self.filename)\n', (2915, 2963), False, 'from panel.widgets import FileDownload\n'), ((4211, 4275), 'panel.links.Callback', 'Callback', (['self'], {'code': "{('event:' + self._event): code}", 'args': 'args'}), "(self, code={('event:' + self._event): code}, args=args)\n", (4219, 4275), False, 'from panel.links import Callback\n'), ((2757, 2770), 'panel.panel', 'panel', (['object'], {}), '(object)\n', (2762, 2770), False, 'from panel import panel\n')]
|
"""
This file contains the necessary to reconstruct the intermediary featuress from
a save of the models an inputs
Author Hugues
"""
import torch
from pathlib import Path
if __name__ == '__main__':
import sys
sys.path.append("..")
from param import data_path
file_location = Path(data_path) / Path('models')
from models.store_model_SHL import create_filename, Diagnostic_CNN
from models.store_model_CIFAR import Diagnostic_ResNet
# Diagnostic_ResNet and Diagnostic_CNN will be used for class loading
datasets = ["CIFAR_10", "SHL_2018"]
sensors = {"CIFAR_10":["CIFAR_10"],
"SHL_2018":["Gyr_y", "Acc_norm", "Mag_norm"]}
n_trials = 3 *2
#%%
def load_data(file_location, dataset, sanity_check=False):
"""
Loads the data and performs some verificaions on the ordering and performance
Parameters
----------
file_location (Path object or str): the absolute or reltive path to the
.pickle objects
dataset (str): either 'SHL_2018' or 'CIFAR_10'
sanity_check (bool): if True, also loads the raw data an makes sure that we can
recreate the predictions.
Defaults to False
Returns
-------
data: dict
keys = sensor (ex "Acc_norm" or "CIFAR_10")
values = dict
keys = split ('train' or 'val')
values = list of numpy arrays (n_samples, ...)
one array per initialization (3*2 = 6 by default)
models: dict
keys = sensor (ex "Acc_norm" or "CIFAR_10")
values = list of PyTorch nn.Module objects
ground_truth: dict
keys = split ('train' or 'val')
values = np array of ints, containing the class between 0 and n-1
"""
sensors_list = sensors[dataset]
data = {sensor:
{split:
[]
for split in ["train", "val"]}
for sensor in sensors_list}
models = {sensor:
[]
for sensor in sensors_list}
ground_truth = {split:
[]
for split in ["train", "val"]}
if sanity_check: previous_GT = {"train":None, "val":None} # we will check that
# the dataloader does not shuffle the position of the samples
# basic sensors
for sensor in sensors_list:
if sanity_check:
train_dataloader, val_dataloader = torch.load(Path(data_path) / Path("models") / Path("dataloaders-"+dataset+"-"+sensor+'.pt'))
dataloaders = {'train':train_dataloader,
'val': val_dataloader}
for trial_index in range(n_trials):
filename = create_filename(dataset, sensor, trial_index)
features_filepath = Path(data_path) / Path("models") / Path('features-' + filename)
model_filepath = Path(data_path) / Path("models") / Path('model-' + filename)
print(f"loading '{features_filepath}'...", end='')
features_pred_GT_train, features_pred_GT_val = torch.load(features_filepath)
model = torch.load(model_filepath)
features_pred_GT = {"train":features_pred_GT_train,
"val" :features_pred_GT_val
}
print(' ... done')
for i_split, split in enumerate(["train", "val"]):
features, prediction, this_gt = features_pred_GT[split]
ground_truth[split] = this_gt # the value is replaced every time, which is not
# a problem because all GT should be equal
if sanity_check:
score_name, score_value = model.validate(dataloaders[split])
print(f" {dataset:5s} {score_name} {100*score_value:.2f} %")
if previous_GT[split] is None:
previous_GT[split] = this_gt
else :
assert (previous_GT[split] == this_gt).all(), "the order of the samples changed between runs"
data[sensor][split].append(features)
model.cpu() # we dont need the model to be on GPU anymore
models[sensor].append(model)
return data, models, ground_truth
#%%
if __name__ == "__main__":
load_data(file_location, dataset="SHL_2018", sanity_check=True)
|
[
"sys.path.append",
"pathlib.Path",
"torch.load",
"models.store_model_SHL.create_filename"
] |
[((220, 241), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (235, 241), False, 'import sys\n'), ((287, 302), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (291, 302), False, 'from pathlib import Path\n'), ((305, 319), 'pathlib.Path', 'Path', (['"""models"""'], {}), "('models')\n", (309, 319), False, 'from pathlib import Path\n'), ((2586, 2631), 'models.store_model_SHL.create_filename', 'create_filename', (['dataset', 'sensor', 'trial_index'], {}), '(dataset, sensor, trial_index)\n', (2601, 2631), False, 'from models.store_model_SHL import create_filename, Diagnostic_CNN\n'), ((2943, 2972), 'torch.load', 'torch.load', (['features_filepath'], {}), '(features_filepath)\n', (2953, 2972), False, 'import torch\n'), ((2993, 3019), 'torch.load', 'torch.load', (['model_filepath'], {}), '(model_filepath)\n', (3003, 3019), False, 'import torch\n'), ((2699, 2727), 'pathlib.Path', 'Path', (["('features-' + filename)"], {}), "('features-' + filename)\n", (2703, 2727), False, 'from pathlib import Path\n'), ((2795, 2820), 'pathlib.Path', 'Path', (["('model-' + filename)"], {}), "('model-' + filename)\n", (2799, 2820), False, 'from pathlib import Path\n'), ((2367, 2420), 'pathlib.Path', 'Path', (["('dataloaders-' + dataset + '-' + sensor + '.pt')"], {}), "('dataloaders-' + dataset + '-' + sensor + '.pt')\n", (2371, 2420), False, 'from pathlib import Path\n'), ((2664, 2679), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (2668, 2679), False, 'from pathlib import Path\n'), ((2682, 2696), 'pathlib.Path', 'Path', (['"""models"""'], {}), "('models')\n", (2686, 2696), False, 'from pathlib import Path\n'), ((2760, 2775), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (2764, 2775), False, 'from pathlib import Path\n'), ((2778, 2792), 'pathlib.Path', 'Path', (['"""models"""'], {}), "('models')\n", (2782, 2792), False, 'from pathlib import Path\n'), ((2332, 2347), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (2336, 2347), False, 'from pathlib import Path\n'), ((2350, 2364), 'pathlib.Path', 'Path', (['"""models"""'], {}), "('models')\n", (2354, 2364), False, 'from pathlib import Path\n')]
|
import uos
from flashbdev import bdev
def check_bootsec():
buf = bytearray(bdev.ioctl(5, 0)) # 5 is SEC_SIZE
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xFF:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print(
"""\
The filesystem appears to be corrupted. If you had important data there, you
may want to make a flash snapshot to try to recover it. Otherwise, perform
factory reprogramming of MicroPython firmware (completely erase flash, followed
by firmware programming).
"""
)
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
uos.VfsLfs2.mkfs(bdev)
vfs = uos.VfsLfs2(bdev)
uos.mount(vfs, "/")
with open("webrepl_cfg.py", "w") as webrepl_cfg_file:
webrepl_cfg_file.write(
"""\
PASS = '<PASSWORD>'
"""
)
with open("boot.py", "w") as boot_file:
boot_file.write(
"""\
# This file is executed on every boot (including wake-boot from deepsleep)
import esp
import machine
import time
import sys
import traceback
print("Welcome to AidonMeterLogger console!")
print("------------------------------------")
"""
)
with open("main.py", "w") as main_file:
main_file.write(
"""\
from neo import Neo
from meter_reader import MeterReader
neo=Neo(13)
neo.blue()
handler=MeterReader(16)
handler.run()
"""
)
return vfs
|
[
"flashbdev.bdev.readblocks",
"uos.mount",
"flashbdev.bdev.ioctl",
"time.sleep",
"uos.VfsLfs2",
"uos.VfsLfs2.mkfs"
] |
[((120, 143), 'flashbdev.bdev.readblocks', 'bdev.readblocks', (['(0)', 'buf'], {}), '(0, buf)\n', (135, 143), False, 'from flashbdev import bdev\n'), ((753, 775), 'uos.VfsLfs2.mkfs', 'uos.VfsLfs2.mkfs', (['bdev'], {}), '(bdev)\n', (769, 775), False, 'import uos\n'), ((786, 803), 'uos.VfsLfs2', 'uos.VfsLfs2', (['bdev'], {}), '(bdev)\n', (797, 803), False, 'import uos\n'), ((808, 827), 'uos.mount', 'uos.mount', (['vfs', '"""/"""'], {}), "(vfs, '/')\n", (817, 827), False, 'import uos\n'), ((81, 97), 'flashbdev.bdev.ioctl', 'bdev.ioctl', (['(5)', '(0)'], {}), '(5, 0)\n', (91, 97), False, 'from flashbdev import bdev\n'), ((662, 675), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (672, 675), False, 'import time\n')]
|
# The MIT License (MIT)
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module provides an AST rewriter that takes and Read/Write operations on
global variables and rewrites them to retrieve the variable by a function
instead. Non-global variables are left untouched.
Example:
import os
from os import path
parent_dir = path.dirname(__file__)
def main():
filename = path.join(parent_dir, 'foo.py')
print(filename)
Will be converted to:
import os; __dict__['os'] = os
from os import path; __dict__['path'] = path
__dict__['parent_dir'] = __dict__['path'].dirname(__dict__['__file__'])
def main():
filename = __dict__['path'].join(__dict__['parent_dir'], 'foo.py')
__dict__['print'](filename)
"""
import ast
import collections
import textwrap
import sys
from ..compat import builtins, exec_, string_types
def get_argname(arg):
if isinstance(arg, ast.Name):
return arg.id
elif isinstance(arg, str):
return arg
elif isinstance(arg, ast.arg):
# Python 3 where annotations are supported
return arg.arg
else:
raise RuntimeError(ast.dump(arg))
class NameRewriter(ast.NodeTransformer):
# This code snippet is inserted when using the `from X import *` syntax.
IMPORT_FROM_ALL_ASSIGN = textwrap.dedent('''
# We can not use __import__(module, fromlist=[None]) as some modules seem
# to break with it (see for example nose-devs/nose#1075).
import importlib as __importlib
__module = __importlib.import_module({module!r})
try:
__vars = __module.__all__
except AttributeError:
__vars = [x for x in dir(__module) if not x.startswith('_')]
for __key in __vars:
{data_var}[__key] = getattr(__module, __key)
del __importlib, __module, __vars, __key
''')
def __init__(self, data_var):
self.data_var = data_var
self.stack = []
def __push_stack(self):
self.stack.append({'external': set(), 'vars': set()})
def __pop_stack(self):
self.stack.pop()
def __is_local(self, name):
if not self.stack:
return False
for frame in reversed(self.stack):
if name in frame['external']:
return False
if name in frame['vars']:
return True
return False
def __add_variable(self, name):
assert isinstance(name, string_types), name
if self.stack and name not in self.stack[-1]['external']:
self.stack[-1]['vars'].add(name)
def __add_external(self, name):
if self.stack:
self.stack[-1]['external'].add(name)
def __get_subscript(self, name, ctx=None):
"""
Returns `<data_var>["<name>"]`
"""
assert isinstance(name, string_types), name
return ast.Subscript(
value=ast.Name(id=self.data_var, ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=name)),
ctx=ctx)
def __get_subscript_assign(self, name):
"""
Returns `<data_var>["<name>"] = <name>`.
"""
return ast.Assign(
targets=[self.__get_subscript(name, ast.Store())],
value=ast.Name(id=name, ctx=ast.Load()))
def __get_subscript_delete(self, name):
"""
Returns `del <data_var>["<name>"]`.
"""
return ast.Delete(targets=[self.__get_subscript(name, ast.Del())])
def __visit_target(self, node):
"""
Call this method to visit assignment targets and to add local variables
to the current stack frame. Used in #visit_Assign() and
#__visit_comprehension().
"""
if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
self.__add_variable(node.id)
elif isinstance(node, (ast.Tuple, ast.List)):
[self.__visit_target(x) for x in node.elts]
def __visit_suite(self, node):
result = node
if isinstance(node, (ast.FunctionDef, ast.ClassDef)):
self.__add_variable(node.name)
if not self.__is_local(node.name):
assign = self.__get_subscript_assign(node.name)
result = [node, ast.copy_location(assign, node)]
self.__push_stack()
if sys.version_info[0] > 2 and isinstance(node, ast.ClassDef):
# TODO: This is a bit of a dirty hack to make sure that super and
# __class__ are considered as local variables in functions.
self.__add_variable('super')
self.__add_variable('__class__')
if isinstance(node, (ast.FunctionDef, ast.Lambda)): # Also used for ClassDef
for arg in node.args.args + getattr(node.args, 'kwonlyargs', []): # Python 2
self.__add_variable(get_argname(arg))
if node.args.vararg:
self.__add_variable(get_argname(node.args.vararg))
if node.args.kwarg:
self.__add_variable(get_argname(node.args.kwarg.arg))
self.generic_visit(node)
self.__pop_stack()
return result
def __visit_comprehension(self, node):
# In Python 3, comprehensions have their own scope.
has_own_scope = (sys.version_info[0] > 2)
if has_own_scope:
self.__push_stack()
for comp in node.generators:
self.__visit_target(comp.target)
self.generic_visit(node)
if has_own_scope:
self.__pop_stack()
return node
def visit_Name(self, node):
if not self.__is_local(node.id):
node = ast.copy_location(self.__get_subscript(node.id, node.ctx), node)
return node
def visit_Assign(self, node):
for target in node.targets:
self.__visit_target(target)
self.generic_visit(node)
return node
def visit_Import(self, node):
assignments = []
for alias in node.names:
name = (alias.asname or alias.name).split('.')[0]
assignments.append(self.__get_subscript_assign(name))
return [node] + [ast.copy_location(x, node) for x in assignments]
def visit_ImportFrom(self, node):
assignments = []
for alias in node.names:
name = alias.asname or alias.name
if name == '*':
code = self.IMPORT_FROM_ALL_ASSIGN.format(module=node.module, data_var=self.data_var)
module = ast.parse(code)
assignments += module.body
else:
assignments.append(self.__get_subscript_assign(name))
return [node] + [ast.copy_location(x, node) for x in assignments]
def visit_ExceptHandler(self, node):
if node.name:
self.__add_variable(get_argname(node.name)) # Python 2 has an ast.Name here, Python 3 just a string
self.generic_visit(node)
if not self.stack and node.name and sys.version_info[0] > 2:
# In Python 2, the node.name will already be replaced with a subscript
# by #visit_Name().
node.body.insert(0, ast.copy_location(self.__get_subscript_assign(node.name), node))
if sys.version_info[0] == 3:
node.body.append(ast.copy_location(self.__get_subscript_delete(node.name), node))
return node
def visit_With(self, node):
if hasattr(node, 'items'):
optional_vars = [x.optional_vars for x in node.items]
else:
# Python 2
optional_vars = [node.optional_vars]
[self.__visit_target(x) for x in optional_vars if x]
self.generic_visit(node)
return node
def visit_For(self, node):
self.__visit_target(node.target)
self.generic_visit(node)
return node
visit_FunctionDef = __visit_suite
visit_Lambda = __visit_suite
visit_ClassDef = __visit_suite
visit_ListComp = __visit_comprehension
visit_SetComp = __visit_comprehension
visit_GeneratorExp = __visit_comprehension
visit_DictComp = __visit_comprehension
def visit_Global(self, node):
for name in node.names:
self.__add_external(name)
def transform(ast_node, data_var='__dict__'):
ast_node = NameRewriter('__dict__').visit(ast_node)
ast_node = ast.fix_missing_locations(ast_node)
return ast_node
def dynamic_exec(code, resolve, assign=None, delete=None, automatic_builtins=True,
filename=None, module_name=None, _type='exec'):
"""
Transforms the Python source code *code* and evaluates it so that the
*resolve* and *assign* functions are called respectively for when a global
variable is access or assigned.
If *resolve* is a mapping, *assign* must be omitted. #KeyError#s raised by
the mapping are automatically converted to #NameError#s.
Otherwise, *resolve* and *assign* must be callables that have the same
interface as `__getitem__()`, and `__setitem__()`. If *assign* is omitted
in that case, assignments will be redirected to a separate dictionary and
keys in that dictionary will be checked before continuing with the *resolve*
callback.
"""
parse_filename = filename or '<string>'
ast_node = transform(ast.parse(code, parse_filename, mode=_type))
code = compile(ast_node, parse_filename, _type)
if hasattr(resolve, '__getitem__'):
if assign is not None:
raise TypeError('"assign" parameter specified where "resolve" is a mapping')
if delete is not None:
raise TypeError('"delete" parameter specified where "resolve" is a mapping')
input_mapping = resolve
def resolve(x):
try:
return input_mapping[x]
except KeyError:
raise NameError(x)
assign = input_mapping.__setitem__
delete = input_mapping.__delitem__
else:
input_mapping = False
class DynamicMapping(object):
_data = {}
_deleted = set()
def __repr__(self):
if input_mapping:
return 'DynamicMapping({!r})'.format(input_mapping)
else:
return 'DynamicMapping(resolve={!r}, assign={!r})'.format(resolve, assign)
def __getitem__(self, key):
if key in self._deleted:
raise NameError(key)
if assign is None:
try:
return self._data[key]
except KeyError:
pass # Continue with resolve()
try:
return resolve(key)
except NameError as exc:
if automatic_builtins and not key.startswith('_'):
try:
return getattr(builtins, key)
except AttributeError:
pass
raise exc
def __setitem__(self, key, value):
self._deleted.discard(key)
if assign is None:
self._data[key] = value
else:
assign(key, value)
def __delitem__(self, key):
if delete is None:
self._deleted.add(key)
else:
delete(key)
def get(self, key, default=None):
try:
return self[key]
except NameError:
return default
mapping = DynamicMapping()
globals_ = {'__dict__': mapping}
if filename:
mapping['__file__'] = filename
globals_['__file__'] = filename
if module_name:
mapping['__name__'] = module_name
globals_['__name__'] = module_name
return (exec_ if _type == 'exec' else eval)(code, globals_)
def dynamic_eval(*args, **kwargs):
return dynamic_exec(*args, _type='eval', **kwargs)
|
[
"textwrap.dedent",
"ast.Del",
"ast.Load",
"ast.fix_missing_locations",
"ast.dump",
"ast.copy_location",
"ast.parse",
"ast.Store",
"ast.Str"
] |
[((2324, 2845), 'textwrap.dedent', 'textwrap.dedent', (['"""\n # We can not use __import__(module, fromlist=[None]) as some modules seem\n # to break with it (see for example nose-devs/nose#1075).\n import importlib as __importlib\n __module = __importlib.import_module({module!r})\n try:\n __vars = __module.__all__\n except AttributeError:\n __vars = [x for x in dir(__module) if not x.startswith(\'_\')]\n for __key in __vars:\n {data_var}[__key] = getattr(__module, __key)\n del __importlib, __module, __vars, __key\n """'], {}), '(\n """\n # We can not use __import__(module, fromlist=[None]) as some modules seem\n # to break with it (see for example nose-devs/nose#1075).\n import importlib as __importlib\n __module = __importlib.import_module({module!r})\n try:\n __vars = __module.__all__\n except AttributeError:\n __vars = [x for x in dir(__module) if not x.startswith(\'_\')]\n for __key in __vars:\n {data_var}[__key] = getattr(__module, __key)\n del __importlib, __module, __vars, __key\n """\n )\n', (2339, 2845), False, 'import textwrap\n'), ((8619, 8654), 'ast.fix_missing_locations', 'ast.fix_missing_locations', (['ast_node'], {}), '(ast_node)\n', (8644, 8654), False, 'import ast\n'), ((9538, 9581), 'ast.parse', 'ast.parse', (['code', 'parse_filename'], {'mode': '_type'}), '(code, parse_filename, mode=_type)\n', (9547, 9581), False, 'import ast\n'), ((6638, 6664), 'ast.copy_location', 'ast.copy_location', (['x', 'node'], {}), '(x, node)\n', (6655, 6664), False, 'import ast\n'), ((6947, 6962), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (6956, 6962), False, 'import ast\n'), ((7093, 7119), 'ast.copy_location', 'ast.copy_location', (['x', 'node'], {}), '(x, node)\n', (7110, 7119), False, 'import ast\n'), ((2163, 2176), 'ast.dump', 'ast.dump', (['arg'], {}), '(arg)\n', (2171, 2176), False, 'import ast\n'), ((4953, 4984), 'ast.copy_location', 'ast.copy_location', (['assign', 'node'], {}), '(assign, node)\n', (4970, 4984), False, 'import ast\n'), ((3784, 3794), 'ast.Load', 'ast.Load', ([], {}), '()\n', (3792, 3794), False, 'import ast\n'), ((3825, 3840), 'ast.Str', 'ast.Str', ([], {'s': 'name'}), '(s=name)\n', (3832, 3840), False, 'import ast\n'), ((4028, 4039), 'ast.Store', 'ast.Store', ([], {}), '()\n', (4037, 4039), False, 'import ast\n'), ((4077, 4087), 'ast.Load', 'ast.Load', ([], {}), '()\n', (4085, 4087), False, 'import ast\n'), ((4248, 4257), 'ast.Del', 'ast.Del', ([], {}), '()\n', (4255, 4257), False, 'import ast\n')]
|
#!/use/bin/python
import sys
from sense_hat import SenseHat
import variables.colors as c
import variables.mode as m
from libs.set_color import *
def set_color_terminal():
sense = SenseHat()
try:
color = input("Type an rgb color: ")
except (KeyboardInterrupt, SystemExit):
sys.exit()
except:
print("Changed mode...")
try:
if color == "close" or color == "next":
m.mode_index += 1
else:
c.color = color
set_color()
except (KeyboardInterrupt, SystemExit):
sys.exit()
except:
print("Not a valid input, use it as following: <number>,<number>,<number> -> 255,255,255")
|
[
"sense_hat.SenseHat",
"sys.exit"
] |
[((185, 195), 'sense_hat.SenseHat', 'SenseHat', ([], {}), '()\n', (193, 195), False, 'from sense_hat import SenseHat\n'), ((303, 313), 'sys.exit', 'sys.exit', ([], {}), '()\n', (311, 313), False, 'import sys\n'), ((565, 575), 'sys.exit', 'sys.exit', ([], {}), '()\n', (573, 575), False, 'import sys\n')]
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""Utils."""
import gettext
import grp
import pwd
import re
from otopi import constants as otopicons
from otopi import plugin
from otopi import util
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
def editConfigContent(
content,
params,
keep_existing=False,
changed_lines=None,
comment_re='[#]*\\s*',
param_re='\\w+',
new_comment_tpl='{spaces}# {original}',
separator_re='\\s*=\\s*',
new_line_tpl='{spaces}{param} = {value}',
added_params=None,
):
"""Return edited content of a config file.
Keyword arguments:
content - a list of strings, the content prior to calling us
params - a dict of params/values that should be in the output
If the value for a param is None, param is deleted
keep_existing - if True, existing params are not changed, only missing
ones are added.
changed_lines - an output parameter, a list of dictionaries with
added and removed lines.
comment_re - a regular expression that a comment marker prefixed
to param should match. If a commented param line is found,
a new line will be added after it.
param_re - a regular expression that should match params
new_comment_tpl - a template for a comment. {original} will be replaced
with this template, {spaces} will be replaced with
original whitespace prefix.
separator_re - a regular expression that the separator between
param and value should match
new_line_tpl - a template for a new line. {param} will be replaced
with param, {value} with value.
added_params - an output parameter, a list of params that were added
in the end because they were not found in content.
Params that appear uncommented in the input, are commented, and new
values are added after the commented lines. Params that appear only
commented in the input, the comments are copied as-is, and new lines
are added after the comments. Params that do not appear in the input
are added in the end.
"""
params = params.copy()
pattern = r"""
^
(?P<spaces>\s*)
(?P<comment>{comment_re})
(?P<original>
(?P<param>{param_re})
(?P<separator>{separator_re})
(?P<value>.*)
)
$
""".format(
comment_re=comment_re,
param_re=param_re,
separator_re=separator_re,
)
re_obj = re.compile(flags=re.VERBOSE, pattern=pattern)
# Find params which are uncommented in the input.
uncommented = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not f.group('comment')
):
uncommented.add(f.group('param'))
if changed_lines is None:
changed_lines = []
if added_params is None:
added_params = []
newcontent = []
processed = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not (
f.group('param') in uncommented and
f.group('comment')
)
# If param in uncommented and current line is comment,
# we do not need to process it - we process the uncommented
# line when we see it
):
if (
not f.group('comment') and
(
str(f.group('value')) == str(params[f.group('param')]) or
keep_existing
)
):
# value is not changed, or we do not care. do nothing
processed.add(f.group('param'))
else:
if (
f.group('param') in uncommented and
not f.group('comment')
):
# Add current line, commented, before new line
currentline = new_comment_tpl.format(
spaces=f.group('spaces'),
original=f.group('original'),
)
changed_lines.append(
{
'added': currentline,
'removed': line,
}
)
newcontent.append(currentline)
else:
# Only possible option here is that current line is
# a comment and param is not in uncommented. Keep it.
# Other two options are in "if"s above.
# The last option - param is not in uncommented
# and current line is not a comment - is not possible.
newcontent.append(line)
newline = new_line_tpl.format(
spaces=f.group('spaces'),
param=f.group('param'),
value=params[f.group('param')],
)
changed_lines.append(
{
'added': newline,
}
)
processed.add(f.group('param'))
line = newline
newcontent.append(line)
# Add remaining params at the end
for param, value in params.items():
if param not in processed:
newline = new_line_tpl.format(
spaces='',
param=param,
value=value,
)
newcontent.append(newline)
changed_lines.append(
{
'added': newline,
}
)
added_params.append(param)
return newcontent
@util.export
def getUid(user):
return pwd.getpwnam(user)[2]
@util.export
def getGid(group):
return grp.getgrnam(group)[2]
@util.export
def parsePort(port):
try:
port = int(port)
except ValueError:
raise ValueError(
_('Invalid port {number}').format(
number=port,
)
)
if port < 0 or port > 0xffff:
raise ValueError(
_('Invalid number {number}').format(
number=port,
)
)
return port
@util.export
def getPortTester():
def test_port(port):
res = ''
try:
parsePort(port)
except ValueError as e:
res = e
return res
return test_port
@util.export
def addExitCode(environment, code, priority=plugin.Stages.PRIORITY_DEFAULT):
environment[
otopicons.BaseEnv.EXIT_CODE
].append(
{
'code': code,
'priority': priority,
}
)
@util.export
def getPackageManager(logger=None):
"""Return a tuple with the package manager printable name string, the mini
implementation class and the sink base class, for the preferred package
manager available in the system.
The only parameter accepted by this function is a logger instance, that
can be ommited (or None) if the user don't wants logs.
"""
try:
from otopi import minidnf
minidnf.MiniDNF()
if logger is not None:
logger.debug('Using DNF as package manager')
return 'DNF', minidnf.MiniDNF, minidnf.MiniDNFSinkBase
except (ImportError, RuntimeError):
try:
from otopi import miniyum
# yum does not raises validation exceptions in constructor,
# then its not worth instantiating it to test.
if logger is not None:
logger.debug('Using Yum as package manager')
return 'Yum', miniyum.MiniYum, miniyum.MiniYumSinkBase
except ImportError:
raise RuntimeError(
_(
'No supported package manager found in your system'
)
)
# vim: expandtab tabstop=4 shiftwidth=4
|
[
"gettext.dgettext",
"otopi.minidnf.MiniDNF",
"pwd.getpwnam",
"grp.getgrnam",
"re.compile"
] |
[((292, 348), 'gettext.dgettext', 'gettext.dgettext', ([], {'message': 'm', 'domain': '"""ovirt-engine-setup"""'}), "(message=m, domain='ovirt-engine-setup')\n", (308, 348), False, 'import gettext\n'), ((2749, 2794), 're.compile', 're.compile', ([], {'flags': 're.VERBOSE', 'pattern': 'pattern'}), '(flags=re.VERBOSE, pattern=pattern)\n', (2759, 2794), False, 'import re\n'), ((6118, 6136), 'pwd.getpwnam', 'pwd.getpwnam', (['user'], {}), '(user)\n', (6130, 6136), False, 'import pwd\n'), ((6185, 6204), 'grp.getgrnam', 'grp.getgrnam', (['group'], {}), '(group)\n', (6197, 6204), False, 'import grp\n'), ((7499, 7516), 'otopi.minidnf.MiniDNF', 'minidnf.MiniDNF', ([], {}), '()\n', (7514, 7516), False, 'from otopi import minidnf\n')]
|
"""Support for SUPLA MQTT sensors."""
from datetime import timedelta
import logging
import homeassistant.components.mqtt as hass_mqtt
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=2)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Perform the setup for SUPLA MQTT status sensor."""
_LOGGER.debug("SUPLA MQTT sensor, async_setup_entry")
config_mqtt_settings = hass.data[DOMAIN][config_entry.entry_id]
mqtt_settings = config_mqtt_settings.data
async_add_entities([SuplaMqttSoftBridge(hass, mqtt_settings)], True)
class SuplaMqttSoftBridge(Entity):
"""Supla Mqtt Soft Bridge representation."""
def __init__(self, hass, mqtt_settings):
"""Sensor initialization."""
self._username = mqtt_settings["username"]
self._qos = 0
self._manufacturer = "SUPLA.ORG"
self._model = "MQTT Bridge"
self._os_version = "v3"
self._supla_in = 0
self._supla_out = 0
self._sub_state = None
@callback
async def hass_message_out(self, msg):
"""Handle new MQTT messages."""
self._supla_out = self._supla_out + 1
@callback
async def supla_message_in(self, msg):
"""Handle new MQTT messages."""
self._supla_in = self._supla_in + 1
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await self._subscribe_topics()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
self._sub_state = await hass_mqtt.subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"execute": {
"topic": "supla/+/devices/+/channels/+/execute_action",
"msg_callback": self.hass_message_out,
"qos": self._qos,
},
"set": {
"topic": "supla/+/devices/+/channels/+/set/+",
"msg_callback": self.hass_message_out,
"qos": self._qos,
},
"set": {
"topic": "supla/#",
"msg_callback": self.supla_message_in,
"qos": self._qos,
},
"set": {
"topic": "homeassistant/#",
"msg_callback": self.supla_message_in,
"qos": self._qos,
},
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await hass_mqtt.subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
@property
def device_info(self):
"""Device info."""
return {
"identifiers": {(DOMAIN, self._username)},
"name": f"MQTT Bridge",
"manufacturer": self._manufacturer,
"model": self._model,
"sw_version": self._os_version,
"via_device": None,
}
@property
def unique_id(self) -> str:
"""Return a unique, friendly identifier for this entity."""
return self._username
@property
def name(self):
"""Return the name of the sensor."""
return f"SUPLA connection status"
@property
def state(self):
"""Return the status of the sensor."""
# connection result codes
return "mqtt bridge connection"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity."""
return ""
@property
def device_state_attributes(self):
"""Return the attributes of the device."""
return {
"MQTT packets OUT": self._supla_out,
"MQTT packets IN": self._supla_in,
}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:bridge"
async def async_update(self):
"""Sensor update."""
pass
|
[
"homeassistant.components.mqtt.subscription.async_subscribe_topics",
"homeassistant.components.mqtt.subscription.async_unsubscribe_topics",
"datetime.timedelta",
"logging.getLogger"
] |
[((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((305, 325), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (314, 325), False, 'from datetime import timedelta\n'), ((1656, 2182), 'homeassistant.components.mqtt.subscription.async_subscribe_topics', 'hass_mqtt.subscription.async_subscribe_topics', (['self.hass', 'self._sub_state', "{'execute': {'topic': 'supla/+/devices/+/channels/+/execute_action',\n 'msg_callback': self.hass_message_out, 'qos': self._qos}, 'set': {\n 'topic': 'supla/+/devices/+/channels/+/set/+', 'msg_callback': self.\n hass_message_out, 'qos': self._qos}, 'set': {'topic': 'supla/#',\n 'msg_callback': self.supla_message_in, 'qos': self._qos}, 'set': {\n 'topic': 'homeassistant/#', 'msg_callback': self.supla_message_in,\n 'qos': self._qos}}"], {}), "(self.hass, self._sub_state, {\n 'execute': {'topic': 'supla/+/devices/+/channels/+/execute_action',\n 'msg_callback': self.hass_message_out, 'qos': self._qos}, 'set': {\n 'topic': 'supla/+/devices/+/channels/+/set/+', 'msg_callback': self.\n hass_message_out, 'qos': self._qos}, 'set': {'topic': 'supla/#',\n 'msg_callback': self.supla_message_in, 'qos': self._qos}, 'set': {\n 'topic': 'homeassistant/#', 'msg_callback': self.supla_message_in,\n 'qos': self._qos}})\n", (1701, 2182), True, 'import homeassistant.components.mqtt as hass_mqtt\n'), ((2715, 2790), 'homeassistant.components.mqtt.subscription.async_unsubscribe_topics', 'hass_mqtt.subscription.async_unsubscribe_topics', (['self.hass', 'self._sub_state'], {}), '(self.hass, self._sub_state)\n', (2762, 2790), True, 'import homeassistant.components.mqtt as hass_mqtt\n')]
|
import scipy.sparse as sp
import pandas as pd
import numpy as np
import torch
import h5py
def get_adj(num_rows, num_cols, row_idx, col_idx, device):
adj = torch.zeros((num_rows, num_cols), dtype=torch.float32, device=device)
adj[row_idx, col_idx] = 1.
adj = adj / adj.sum(dim=1, keepdim=True)
adj.masked_fill_(torch.isnan(adj), 0)
return adj
def load_matlab_file(path_file, name_field):
db = h5py.File(path_file, 'r')
ds = db[name_field]
try:
if 'ir' in ds.keys():
data = np.asarray(ds['data'])
ir = np.asarray(ds['ir'])
jc = np.asarray(ds['jc'])
out = sp.csc_matrix((data, ir, jc))
except AttributeError:
out = np.asarray(ds).T
db.close()
return out.astype(np.int)
def matrix2data(matrix, rating):
idx = np.argwhere(matrix > 0)
rows = idx[:, 0]
columns = idx[:, 1]
ratings = rating[rows, columns].reshape(-1, 1)
data = np.concatenate([idx, ratings], axis=1)
data = pd.DataFrame(data, columns=('user', 'movie', 'rating'))
return data
|
[
"pandas.DataFrame",
"h5py.File",
"numpy.asarray",
"numpy.argwhere",
"scipy.sparse.csc_matrix",
"torch.zeros",
"torch.isnan",
"numpy.concatenate"
] |
[((162, 231), 'torch.zeros', 'torch.zeros', (['(num_rows, num_cols)'], {'dtype': 'torch.float32', 'device': 'device'}), '((num_rows, num_cols), dtype=torch.float32, device=device)\n', (173, 231), False, 'import torch\n'), ((424, 449), 'h5py.File', 'h5py.File', (['path_file', '"""r"""'], {}), "(path_file, 'r')\n", (433, 449), False, 'import h5py\n'), ((831, 854), 'numpy.argwhere', 'np.argwhere', (['(matrix > 0)'], {}), '(matrix > 0)\n', (842, 854), True, 'import numpy as np\n'), ((964, 1002), 'numpy.concatenate', 'np.concatenate', (['[idx, ratings]'], {'axis': '(1)'}), '([idx, ratings], axis=1)\n', (978, 1002), True, 'import numpy as np\n'), ((1014, 1069), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "('user', 'movie', 'rating')"}), "(data, columns=('user', 'movie', 'rating'))\n", (1026, 1069), True, 'import pandas as pd\n'), ((330, 346), 'torch.isnan', 'torch.isnan', (['adj'], {}), '(adj)\n', (341, 346), False, 'import torch\n'), ((533, 555), 'numpy.asarray', 'np.asarray', (["ds['data']"], {}), "(ds['data'])\n", (543, 555), True, 'import numpy as np\n'), ((573, 593), 'numpy.asarray', 'np.asarray', (["ds['ir']"], {}), "(ds['ir'])\n", (583, 593), True, 'import numpy as np\n'), ((611, 631), 'numpy.asarray', 'np.asarray', (["ds['jc']"], {}), "(ds['jc'])\n", (621, 631), True, 'import numpy as np\n'), ((650, 679), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ir, jc)'], {}), '((data, ir, jc))\n', (663, 679), True, 'import scipy.sparse as sp\n'), ((721, 735), 'numpy.asarray', 'np.asarray', (['ds'], {}), '(ds)\n', (731, 735), True, 'import numpy as np\n')]
|
import numpy as np
from ...dimensions.dim_linear import DimLinear
from ...dimensions.dim_angular import DimAngular
from ...dimensions import DimRadian
from ..cross_sect_base import CrossSectBase, CrossSectToken
__all__ = ['CrossSectParallelogram']
class CrossSectParallelogram(CrossSectBase):
def __init__(self, **kwargs: any) -> None:
'''
Intialization function for Parallelogram class. This function takes in
arguments and saves the information passed to private variable to make
them read-only
Parameters
----------
**kwargs : any
DESCRIPTION. Keyword arguments provided to the initialization function.
The following argument names have to be included in order for the code
to execute: name, dim_l, dim_t, dim_theta, location.
Returns
-------
None
'''
self._create_attr(kwargs)
super()._validate_attr()
self._validate_attr()
@property
def dim_l(self):
return self._dim_l
@property
def dim_t(self):
return self._dim_t
@property
def dim_theta(self):
return self._dim_theta
def draw(self, drawer):
l = self.dim_l # height of the parallelogram
t = self.dim_t # width of the parallelogram
theta = DimRadian(self.dim_theta) # angle of the parallelogram
x = [0, l * np.cos(theta), l * np.cos(theta) + t / np.sin(theta), t / np.sin(theta)]
y = [0, l * np.sin(theta), l * np.sin(theta), 0];
z = np.array([x, y])
coords = np.transpose(z)
points = self.location.transform_coords(coords)
# draw parallelogram
side_1 = drawer.draw_line(points[0], points[1])
side_2 = drawer.draw_line(points[1], points[2])
side_3 = drawer.draw_line(points[2], points[3])
side_4 = drawer.draw_line(points[3], points[0])
x_coord = (l * np.cos(theta) + t / np.sin(theta)) / 2
y_coord = l * np.sin(theta) / 2
ic = np.array([[x_coord, y_coord]])
inner_coord = self.location.transform_coords(ic)
segments = [side_1, side_2, side_3, side_4]
cs_token = CrossSectToken(inner_coord[0], segments)
return cs_token
def _validate_attr(self):
if not isinstance(self._dim_l, DimLinear):
raise TypeError('dim_l is not of DimLinear')
if not isinstance(self._dim_t, DimLinear):
raise TypeError('dim_t is not of DimLinear')
if not isinstance(self._dim_theta, DimAngular):
raise TypeError('dim_theta is not of DimAngular')
|
[
"numpy.transpose",
"numpy.sin",
"numpy.array",
"numpy.cos"
] |
[((1556, 1572), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1564, 1572), True, 'import numpy as np\n'), ((1591, 1606), 'numpy.transpose', 'np.transpose', (['z'], {}), '(z)\n', (1603, 1606), True, 'import numpy as np\n'), ((2036, 2066), 'numpy.array', 'np.array', (['[[x_coord, y_coord]]'], {}), '([[x_coord, y_coord]])\n', (2044, 2066), True, 'import numpy as np\n'), ((1411, 1424), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1417, 1424), True, 'import numpy as np\n'), ((1469, 1482), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1475, 1482), True, 'import numpy as np\n'), ((1505, 1518), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1511, 1518), True, 'import numpy as np\n'), ((1524, 1537), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1530, 1537), True, 'import numpy as np\n'), ((2004, 2017), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2010, 2017), True, 'import numpy as np\n'), ((1430, 1443), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1436, 1443), True, 'import numpy as np\n'), ((1450, 1463), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1456, 1463), True, 'import numpy as np\n'), ((1943, 1956), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1949, 1956), True, 'import numpy as np\n'), ((1963, 1976), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1969, 1976), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import sys
import os
import wxgigo
from wxgigo.management.cmdparser import CommandParser, CommandError, SystemCheckError
#from wxgigo.utils.version import get_version
class BaseCommand(object):
"""
Base Command
"""
_called_from_command_line = False
# Metadata about this command.
option_list = ()
help = ''
args = ''
def __init__(self):
pass
def get_version(self):
"""
Return the wxgigo version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return wxgigo.get_version()
@property
def use_argparse(self):
return not bool(self.option_list)
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
return 'asdfojasodfi'
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
if self.args:
# Keep compatibility and always accept positional arguments, like optparse when args is set
parser.add_argument('args', nargs='*')
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
try:
self.handle(*args, **cmd_options)
except Exception as e:
if not isinstance(e, CommandError):
raise
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
|
[
"wxgigo.get_version",
"sys.exit",
"os.path.basename"
] |
[((1762, 1782), 'wxgigo.get_version', 'wxgigo.get_version', ([], {}), '()\n', (1780, 1782), False, 'import wxgigo\n'), ((4055, 4066), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4063, 4066), False, 'import sys\n'), ((2320, 2347), 'os.path.basename', 'os.path.basename', (['prog_name'], {}), '(prog_name)\n', (2336, 2347), False, 'import os\n')]
|
import unittest
from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note
from MuseParse.tests.testLilyMethods.lily import Lily
from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.StaffNode import StaffNode
class MeasureTests(Lily):
def testValue(self):
if hasattr(self, "lilystring"):
if hasattr(self, "item"):
self.assertEqual(self.lilystring, self.item.toLily())
class testMeasure(MeasureTests):
def setUp(self):
self.item = MeasureNode()
self.lilystring = " | "
class testMeasureNote(MeasureTests):
def setUp(self):
self.item = MeasureNode()
note = Note.Note()
note.pitch = Note.Pitch()
self.item.addNote(note)
self.lilystring = "c' | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measurenote"
class testMeasureChord(MeasureTests):
def setUp(self):
self.item = MeasureNode()
note = Note.Note()
note.pitch = Note.Pitch()
self.item.addNote(note)
note2 = Note.Note(chord=True)
note2.pitch = Note.Pitch()
self.item.addNote(note2, chord=True)
self.lilystring = "<c' c'> | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measurenotechord"
class testMeasureNoteWithGrace(MeasureTests):
def setUp(self):
self.item = MeasureNode()
note = Note.Note(type="quarter")
note.pitch = Note.Pitch()
grace = Note.GraceNote(first=True)
grace.last = True
note.addNotation(grace)
self.item.addNote(note)
self.item.RunVoiceChecks()
self.lilystring = "\grace { c'4 } | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measurenotegrace"
class testMeasureTempo(MeasureTests):
def setUp(self):
self.item = MeasureNode()
self.item.addDirection(Directions.Metronome(beat="quarter", min=60))
self.item.addNote(NoteNode())
self.lilystring = " \\tempo 4=60 | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measuretempo"
class testMeasureTwoDirections(MeasureTests):
def setUp(self):
self.item = MeasureNode()
self.item.addDirection(
Directions.Direction(
text="hello world",
placement="above"))
self.item.addDirection(Directions.Metronome(beat="quarter", min=60))
self.item.addNote(NoteNode())
self.lilystring = " ^\\markup { \"hello world\" } \\tempo 4=60 | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measuretwodirections"
class testMeasureTwoNotes(MeasureTests):
def setUp(self):
self.item = MeasureNode()
note = Note.Note()
note.pitch = Note.Pitch()
self.item.addNote(note)
note2 = Note.Note()
note2.pitch = Note.Pitch()
self.item.addNote(note2)
self.lilystring = "c' c' | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measuretwonotes"
class testMeasureOneNoteOneDirection(MeasureTests):
def setUp(self):
self.item = MeasureNode()
note = Note.Note()
note.pitch = Note.Pitch()
self.item.addDirection(
Directions.Direction(
text="hello",
placement="below"))
self.item.addNote(note)
self.lilystring = "c' _\\markup { \"hello\" } | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measurenotedirection"
class testPartialMeasure(MeasureTests):
def setUp(self):
self.item = MeasureNode()
self.item.partial = True
self.item.meter = Meter.Meter(beats=4, type=4)
note = Note.Note(type="quarter")
note.pitch = Note.Pitch()
self.item.addNote(note)
self.lilystring = "\\time 4/4 \partial 4 c'4 | "
self.compile = True
self.wrappers = ["\\new Staff {", "}"]
Lily.setUp(self)
self.name = "measurePartial"
class testPartialMeasureTwoNotes(Lily):
def setUp(self):
self.item = MeasureNode(partial=True)
self.item.meter = Meter.Meter(type=4, beats=4)
note = Note.Note()
note.SetType("quarter")
note.pitch = Note.Pitch(octave=4)
note2 = Note.Note()
note2.SetType("quarter")
note2.pitch = Note.Pitch(octave=4)
self.item.addNote(note)
self.item.addNote(note2)
Lily.setUp(self)
self.lilystring = "\\time 4/4 \partial 2 c'4 c'4 | "
class testPartialMeasureTwoNotesDifferentTypes(Lily):
def setUp(self):
self.item = MeasureNode(partial=True)
self.item.meter = Meter.Meter(type=4, beats=4)
note = Note.Note()
note.SetType("quarter")
note.pitch = Note.Pitch(octave=4)
note2 = Note.Note()
note2.SetType("half")
note2.pitch = Note.Pitch(octave=4)
self.item.addNote(note)
self.item.addNote(note2)
Lily.setUp(self)
self.lilystring = "\\time 4/4 \partial 2. c'4 c'2 | "
class testPartialMeasureThreeNotesDifferentTypes(Lily):
def setUp(self):
self.item = MeasureNode(partial=True)
self.item.meter = Meter.Meter(type=4, beats=4)
note = Note.Note(type="quarter")
note.pitch = Note.Pitch(octave=4)
note2 = Note.Note(type="half")
note2.pitch = Note.Pitch(octave=4)
note3 = Note.Note(type="eighth")
note3.pitch = Note.Pitch(octave=4)
self.item.addNote(note)
self.item.addNote(note2)
self.item.addNote(note3)
Lily.setUp(self)
self.lilystring = "\\time 4/4 \partial 2.. c'4 c'2 c'8 | "
class testPartialMeasureThreeNotesSameTypes(Lily):
def setUp(self):
self.item = MeasureNode(partial=True)
self.item.meter = Meter.Meter(type=4, beats=4)
note = Note.Note(type="quarter")
note.pitch = Note.Pitch(octave=4)
note2 = Note.Note(type="quarter")
note2.pitch = Note.Pitch(octave=4)
note3 = Note.Note(type="quarter")
note3.pitch = Note.Pitch(octave=4)
self.item.addNote(note)
self.item.addNote(note2)
self.item.addNote(note3)
Lily.setUp(self)
self.lilystring = "\\time 4/4 \partial 2. c'4 c'4 c'4 | "
class testMeasureOrder(Lily):
def setUp(self):
self.item = StaffNode()
measure1 = MeasureNode()
self.item.AddChild(measure1, index=1)
measure2 = MeasureNode()
measure3 = MeasureNode()
self.item.AddChild(measure2, index="X1")
self.item.AddChild(measure3, index=2)
self.lilystring = " % measure 1\n | \n\n % measure X1\n | \n\n % measure 2\n | \n\n"
class testMeasureTranspositionCalc(unittest.TestCase):
def setUp(self):
self.item = MeasureNode()
def testCalcUpWithChromatic(self):
self.item.transpose = BarlinesAndMarkers.Transposition(chromatic=2)
expected = "\\transpose c' d' {"
self.assertEqual(self.item.CalculateTransposition(), expected)
def testCalcUpWithDiatonic(self):
self.item.transpose = BarlinesAndMarkers.Transposition(diatonic=1)
expected = "\\transpose c' d' {"
self.assertEqual(self.item.CalculateTransposition(), expected)
def testCalcOctaveShift(self):
self.item.transpose = BarlinesAndMarkers.Transposition(octave=1)
expected = "\\transpose c' c'' {"
self.assertEqual(self.item.CalculateTransposition(), expected)
class testMeasureNoteWithShifter(Lily):
def setUp(self):
self.item = MeasureNode()
node = NoteNode()
node.GetItem().pitch = Note.Pitch(octave=4)
self.item.addNote(node)
dirnode = Directions.OctaveShift(amount=8, type="up")
self.item.addDirection(dirnode)
node2 = NoteNode()
node2.GetItem().pitch = Note.Pitch(octave=4)
self.item.addNote(node2)
Lily.setUp(self)
self.compile = True
self.wrappers = ["\\new Staff{a8 ", "c'8]}"]
self.lilystring = "c' \n\\ottava #-1\n c' | "
self.name = "noteOctaveShift"
class testShiftBeforeNote(unittest.TestCase):
def setUp(self):
self.item = MeasureNode()
dirnode = Directions.OctaveShift(amount=8, type="up")
self.item.addDirection(dirnode)
self.node = NoteNode()
self.node.GetItem().pitch = Note.Pitch(octave=2)
self.item.addNote(self.node)
def testLilystring(self):
value = "\n\\ottava #-1\n c, | "
self.assertEqual(value, self.item.toLily())
class testGraceAtStartOfMeasure(unittest.TestCase):
def setUp(self):
self.item = MeasureNode()
node = NoteNode()
self.note = Note.Note(type="quarter")
self.note.addNotation(Note.GraceNote())
self.note.pitch = Note.Pitch()
node.SetItem(self.note)
self.item.addNote(node)
self.item.RunVoiceChecks()
def testIsFirstGraceNote(self):
result = self.note.Search(Note.GraceNote)
self.assertTrue(result.first)
def testLilystring(self):
value = "\grace { c'4 } | "
self.assertEqual(value, self.item.toLily())
class testTwoVoicesMeasureNotePosition(Lily):
def setUp(self):
self.item = MeasureNode()
node = Note.Note(type="quarter")
node.pitch = Note.Pitch(octave=4)
self.item.addNote(node, voice=1)
self.item.addNote(node, voice=1)
self.item.Backup(1)
node2 = Note.Note(type="quarter")
node2.pitch = Note.Pitch(octave=4)
self.item.addNote(node2, voice=2)
Lily.setUp(self)
self.compile = True
self.wrappers = ["\\new Staff{a8 ", "c'8]}"]
self.lilystring = "<< % voice 1\n\\new Voice = \"one\"\n{\\voiceOne c'4 c'4 } % voice 2\n\\new Voice = \"two\"\n{\\voiceTwo r4 c'4 }>> | "
self.name = "noteOctaveShift"
def tearDown(self):
self.item = None
|
[
"MuseParse.classes.ObjectHierarchy.ItemClasses.Note.GraceNote",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note",
"MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Metronome",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.OctaveShift",
"MuseParse.classes.ObjectHierarchy.ItemClasses.BarlinesAndMarkers.Transposition",
"MuseParse.tests.testLilyMethods.lily.Lily.setUp",
"MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode",
"MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Direction",
"MuseParse.classes.ObjectHierarchy.TreeClasses.StaffNode.StaffNode"
] |
[((688, 701), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (699, 701), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((815, 828), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (826, 828), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((844, 855), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (853, 855), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((877, 889), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (887, 889), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1040, 1056), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (1050, 1056), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((1173, 1186), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (1184, 1186), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((1202, 1213), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (1211, 1213), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1235, 1247), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (1245, 1247), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1296, 1317), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'chord': '(True)'}), '(chord=True)\n', (1305, 1317), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1340, 1352), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (1350, 1352), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1521, 1537), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (1531, 1537), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((1667, 1680), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (1678, 1680), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((1696, 1721), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (1705, 1721), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1743, 1755), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (1753, 1755), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((1772, 1798), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.GraceNote', 'Note.GraceNote', ([], {'first': '(True)'}), '(first=True)\n', (1786, 1798), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((2054, 2070), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (2064, 2070), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((2192, 2205), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (2203, 2205), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((2450, 2466), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (2460, 2466), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((2592, 2605), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (2603, 2605), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((3019, 3035), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (3029, 3035), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((3164, 3177), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (3175, 3177), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((3193, 3204), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (3202, 3204), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((3226, 3238), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (3236, 3238), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((3287, 3298), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (3296, 3298), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((3321, 3333), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (3331, 3333), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((3488, 3504), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (3498, 3504), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((3639, 3652), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (3650, 3652), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((3668, 3679), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (3677, 3679), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((3701, 3713), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (3711, 3713), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4021, 4037), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (4031, 4037), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((4165, 4178), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (4176, 4178), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((4238, 4266), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter', 'Meter.Meter', ([], {'beats': '(4)', 'type': '(4)'}), '(beats=4, type=4)\n', (4249, 4266), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4282, 4307), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (4291, 4307), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4329, 4341), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (4339, 4341), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4515, 4531), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (4525, 4531), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((4653, 4678), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {'partial': '(True)'}), '(partial=True)\n', (4664, 4678), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((4705, 4733), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter', 'Meter.Meter', ([], {'type': '(4)', 'beats': '(4)'}), '(type=4, beats=4)\n', (4716, 4733), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4749, 4760), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (4758, 4760), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4814, 4834), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (4824, 4834), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4851, 4862), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (4860, 4862), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((4918, 4938), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (4928, 4938), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5012, 5028), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (5022, 5028), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((5189, 5214), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {'partial': '(True)'}), '(partial=True)\n', (5200, 5214), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((5241, 5269), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter', 'Meter.Meter', ([], {'type': '(4)', 'beats': '(4)'}), '(type=4, beats=4)\n', (5252, 5269), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5285, 5296), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (5294, 5296), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5350, 5370), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (5360, 5370), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5387, 5398), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {}), '()\n', (5396, 5398), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5451, 5471), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (5461, 5471), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5545, 5561), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (5555, 5561), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((5725, 5750), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {'partial': '(True)'}), '(partial=True)\n', (5736, 5750), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((5777, 5805), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter', 'Meter.Meter', ([], {'type': '(4)', 'beats': '(4)'}), '(type=4, beats=4)\n', (5788, 5805), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5821, 5846), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (5830, 5846), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5868, 5888), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (5878, 5888), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5905, 5927), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""half"""'}), "(type='half')\n", (5914, 5927), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5950, 5970), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (5960, 5970), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((5987, 6011), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""eighth"""'}), "(type='eighth')\n", (5996, 6011), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6034, 6054), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (6044, 6054), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6161, 6177), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (6171, 6177), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((6341, 6366), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {'partial': '(True)'}), '(partial=True)\n', (6352, 6366), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((6393, 6421), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Meter.Meter', 'Meter.Meter', ([], {'type': '(4)', 'beats': '(4)'}), '(type=4, beats=4)\n', (6404, 6421), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6437, 6462), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (6446, 6462), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6484, 6504), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (6494, 6504), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6521, 6546), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (6530, 6546), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6569, 6589), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (6579, 6589), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6606, 6631), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (6615, 6631), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6654, 6674), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (6664, 6674), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((6781, 6797), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (6791, 6797), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((6939, 6950), 'MuseParse.classes.ObjectHierarchy.TreeClasses.StaffNode.StaffNode', 'StaffNode', ([], {}), '()\n', (6948, 6950), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.StaffNode import StaffNode\n'), ((6970, 6983), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (6981, 6983), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((7049, 7062), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (7060, 7062), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((7082, 7095), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (7093, 7095), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((7383, 7396), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (7394, 7396), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((7467, 7512), 'MuseParse.classes.ObjectHierarchy.ItemClasses.BarlinesAndMarkers.Transposition', 'BarlinesAndMarkers.Transposition', ([], {'chromatic': '(2)'}), '(chromatic=2)\n', (7499, 7512), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((7694, 7738), 'MuseParse.classes.ObjectHierarchy.ItemClasses.BarlinesAndMarkers.Transposition', 'BarlinesAndMarkers.Transposition', ([], {'diatonic': '(1)'}), '(diatonic=1)\n', (7726, 7738), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((7917, 7959), 'MuseParse.classes.ObjectHierarchy.ItemClasses.BarlinesAndMarkers.Transposition', 'BarlinesAndMarkers.Transposition', ([], {'octave': '(1)'}), '(octave=1)\n', (7949, 7959), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((8157, 8170), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (8168, 8170), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((8186, 8196), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (8194, 8196), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((8228, 8248), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (8238, 8248), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((8299, 8342), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.OctaveShift', 'Directions.OctaveShift', ([], {'amount': '(8)', 'type': '"""up"""'}), "(amount=8, type='up')\n", (8321, 8342), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((8399, 8409), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (8407, 8409), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((8442, 8462), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (8452, 8462), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((8504, 8520), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (8514, 8520), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((8785, 8798), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (8796, 8798), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((8817, 8860), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.OctaveShift', 'Directions.OctaveShift', ([], {'amount': '(8)', 'type': '"""up"""'}), "(amount=8, type='up')\n", (8839, 8860), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((8921, 8931), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (8929, 8931), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((8968, 8988), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(2)'}), '(octave=2)\n', (8978, 8988), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((9247, 9260), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (9258, 9260), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((9276, 9286), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (9284, 9286), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((9307, 9332), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (9316, 9332), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((9407, 9419), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {}), '()\n', (9417, 9419), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((9854, 9867), 'MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode.MeasureNode', 'MeasureNode', ([], {}), '()\n', (9865, 9867), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode\n'), ((9883, 9908), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (9892, 9908), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((9930, 9950), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (9940, 9950), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((10077, 10102), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Note', 'Note.Note', ([], {'type': '"""quarter"""'}), "(type='quarter')\n", (10086, 10102), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((10125, 10145), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.Pitch', 'Note.Pitch', ([], {'octave': '(4)'}), '(octave=4)\n', (10135, 10145), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((10196, 10212), 'MuseParse.tests.testLilyMethods.lily.Lily.setUp', 'Lily.setUp', (['self'], {}), '(self)\n', (10206, 10212), False, 'from MuseParse.tests.testLilyMethods.lily import Lily\n'), ((2237, 2281), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Metronome', 'Directions.Metronome', ([], {'beat': '"""quarter"""', 'min': '(60)'}), "(beat='quarter', min=60)\n", (2257, 2281), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((2309, 2319), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (2317, 2319), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((2650, 2709), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Direction', 'Directions.Direction', ([], {'text': '"""hello world"""', 'placement': '"""above"""'}), "(text='hello world', placement='above')\n", (2670, 2709), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((2775, 2819), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Metronome', 'Directions.Metronome', ([], {'beat': '"""quarter"""', 'min': '(60)'}), "(beat='quarter', min=60)\n", (2795, 2819), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((2847, 2857), 'MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode.NoteNode', 'NoteNode', ([], {}), '()\n', (2855, 2857), False, 'from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode\n'), ((3758, 3811), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Directions.Direction', 'Directions.Direction', ([], {'text': '"""hello"""', 'placement': '"""below"""'}), "(text='hello', placement='below')\n", (3778, 3811), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n'), ((9363, 9379), 'MuseParse.classes.ObjectHierarchy.ItemClasses.Note.GraceNote', 'Note.GraceNote', ([], {}), '()\n', (9377, 9379), False, 'from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions, BarlinesAndMarkers, Meter, Note\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 17:00:38 2020
@author: <NAME>
production rules for Colada
Output of parsers will generally be an Etok.
Parser rules ending in _ produce a list of Etoks rather than one.
Inner functions f(acc) are treatments.
Inner functions f(item)->item are item transformers.
The parsers are generally implemented as function calls
rather than values. This gives uniform style and helps
to prevent infinite recursive expansion of parsers.
DEBUG. Not implemented: namespace, record, this_exists,
DEBUG. Currently inconsistent in when LexToken is converted to Etok. Duplicated effort
It is always performed by the Etok constructor.
It is always performed by Etok.rawupdate.
We may need it earlier when we apply v.update(), which raises an error on LexToken
"""
#import copy
#import msg
#import traceback
from exception import (ParseError, ParseNoCatch, ErrorItem)
import lib, word_lists
import tokenlib
import lexer
from tokenlib import Etok
#from ply.lex import LexToken #import ply.lex.LexToken as LexToken
import parser_combinator as c
from parser_combinator import (Parse,
next_word, next_any_word, next_value,
first_word,
first_phrase,
next_phrase,
pstream)
import sample
def first(*args):
return Parse.first(args)
def memo(f):
m = {}
def wrapper():
if f not in m:
m[f]= f()
return m[f]
return wrapper # f to run tests.
#def strip_delim_deprecated(acc):
# """treatment to remove outer delimiters
# Deprecated, because we assume inner material is an Etok.
# """
# (_,b,_) = acc
# return Etok.rawupdate(b,acc)
lookup_parse = {} # dictionary of parses, used for forward refs
def add_lookup_parse(nonterminal,value):
"""Add a new production rule to the lookup_parse dictionary.
The key is the nonterminal string.
The value is the parser associated with it.
value.nonterminal should
"""
v = value.name(nonterminal)
if nonterminal in lookup_parse:
lookup_parse[nonterminal].append(v)
else:
lookup_parse[nonterminal]= [v]
#get_lookup_parse_history = {} #for debugging.
#get_lookup_parse_history is here to keep tract of
# parsers that have been called but not implemented.
# Keys are all none. It functions as a set.
def get_lookup_parse(nonterminal):
"""The grammar is highly mutually recursive.
To make the implementation simpler, some of the recursion
has been relegated to a dictionary of parses: lookup_parse,
with keys given as nonterminal strings.
The parsers should return an Etok.
As new production rules are implemented, they are
added to the dictionary. The key is the nonterminal.
This function looks up the production rules
for a given key and forms them
into a parser according to the first successful rule.
There is a last resort production rule of letting each nonterminal
string represent a parser that parses the nonterminal as a literal
word string. This last resort rule might give strange behavior but it
is probably quite harmless. The last resort helps with debugging.
# DEBUG. Should not this fail, requiring a string in caps?
>>> pstream(get_lookup_parse('hello'),'hello and')
Etok(WORD,hello,'hello')
>>> pstream(get_lookup_parse('STATEMENT'),'[STATEMENT x > y]')
Etok(STATEMENT,backdoor2,'[ STATEMENT x > y ]')
"""
def f(acc):
return Etok.etok(acc).update({'name':nonterminal,'rule':'backdoor1'})
def f2(acc):
(_,(_,b),_)=acc
return Etok(name=nonterminal,etoks=b,raw=acc,rule='backdoor2')
backdoor1=Parse.next_token().if_rawvalue(nonterminal.upper()).treat(f,nonterminal)
backdoor2= c.bracket(Parse.next_token().if_rawvalue(nonterminal.upper()) + c.balanced()).treat(f2,nonterminal)
ps = [backdoor1,backdoor2] +lookup_parse.get(nonterminal,[])
return Parse.first(ps).name(nonterminal,production='lookup')
def build_word_net(phrase_list):
"""build a word net (discrimination net) from a list of phrases.
No normalization of words is performed, except case.
End of phrase is marked with {'':{}}. Eventually, we stop at key ''.
>>> build_word_net(['this and','this or','that and','this or that'])
{'this': {'and': {'': {}}, 'or': {'': {}, 'that': {'': {}}}}, 'that': {'and': {'': {}}}}
"""
def to_dict(ls):
if not(ls):
return {}
return {ls[0] : to_dict(ls[1:])}
def one_dict(phrase):
od = to_dict(phrase.lower().split()+[''])
return od
def add_dict(d1,od):
if not(od):
return d1
key = [*od][0] #first key
if key in d1:
# N.B. avoid equality to preserve scoping
d1.__setitem__(key,add_dict(d1[key],od[key]))
else:
d1.setdefault(key,od[key])
return d1
acc = {}
for phrase in phrase_list:
acc = add_dict(acc,one_dict(phrase))
return acc
def next_word_net(wn):
"""construct a parser for a word net.
Take the longest match.
>>> pstream(next_word_net(build_word_net(['aa bb cc','bb cc','aa bb cc dd'])),'aa bb cc dd ee')
[LexToken(WORD,'aa',1,0), LexToken(WORD,'bb',1,3), LexToken(WORD,'cc',1,6), LexToken(WORD,'dd',1,9)]
"""
def f(item):
try:
item1 = next_any_word().process(item)
if not(item1.acc.value in wn):
raise ParseError(ErrorItem(item,'next_word_net'))
except (StopIteration, ParseError) as pe:
if '' in wn:
return tokenlib.update([],item)
raise pe
acc1 = item1.acc
wn1 = wn[acc1.value]
item2 = next_word_net(wn1).process(tokenlib.update(None,item1))
return tokenlib.update([acc1]+item2.acc,item2)
return Parse(f,'next_word_net',sample=sample.word_net(wn))
#print(build_word_net(word_lists.transition))
def phrase_list_transition():
"""parser for transition phrases
>>> pstream(phrase_list_transition(),'therefore')
([LexToken(WORD,'therefore',1,0)], None)
"""
return (next_word_net(build_word_net(word_lists.transition)) + next_word('that').possibly())
def phrase_list_filler():
"""parser for filler words.
Examples:
'We know that'
'We see'
'See that'
"""
return (next_word('we').possibly() + first_word('put write have know see') +
next_word('that').possibly())
# case sensitive words
rawtype = next_any_word().if_rawvalue('Type')
rawsort = next_any_word().if_rawvalue('Sort')
rawprop = next_any_word().if_rawvalue('Prop')
rawtrue = next_any_word().if_rawvalue('True')
rawfalse = next_any_word().if_rawvalue('False')
period = next_value('.')
comma = next_value(',')
semicolon = next_value(';')
# :as means to 'coerce as'
colon_as = next_value(':')+next_word('as').possibly()
colon = next_value(':')
lit_dict = {
'a' : first_word('a an'), #indefinite
'article' : first_word('a an the'),
'assume': first_word('assume suppose'),
'axiom': first_word('axiom conjecture hypothesis equation formula'),
'choose': first_word('take choose pick'),
'contradiction' : first_word('contradiction contrary'),
'declare_mutual_inductive_decl': next_phrase('mutual inductive'),
'declare_mutual_inductive_def': next_phrase('mutual inductive def'),
'def': first_word('def definition'),
'defined_as' : first_phrase(['said to be','defined as','defined to be']),
'denote': first_phrase(['denote','stand for']),
'do': first_word('do does'),
'done': first_word('done quit'),
'equal': next_phrase('equal to'),
'exist': (next_word('there').possibly() + next_word('exist')).treat(lib.snd,'lit_exist'),
'false': first_word('off false no'),
'fix': first_word('fix let'),
'forall': (next_word('forall') | next_phrase('for all')),
'has': first_word('has have had'),
'iff': (first_phrase(['iff','if and only if']) |
(first_phrase(['is','are','be','to be']) + next_word('the').possibly() + next_word('predicate'))),
'is' : first_phrase(['is','are','be','to be']),
'lets': first_phrase(['let us','let','we can','we']),
'param': next_phrase('with parameter'),
'prove': first_word('prove show'),
'qed': first_word('end qed obvious literal'),
'satisfy' : first_phrase(['satisfy','give rise to','determine']),
'say': first_word('say write'),
'then': first_word('then therefore hence'),
'theorem': first_word('proposition theorem lemma corollary'),
'true': first_word('on true yes'),
'we-record': next_phrase('we record'),
'we-say': (next_word('we').possibly() +
first_word('say write') +
next_word('that').possibly()
),
'with': first_word('with of having'),
'with_property': next_phrase('with property'),
'wrong': next_phrase('it is wrong that'),
}
def lit(s):
"""parser generator for 's'-like words or phrases
canned phrases that have small variants
lit[w] gives parser for w-like words or phrases
Output Etok(name='lit', rule=s, value=None)
>>> pstream(lit('qed'),'obvious')
Etok(LIT,qed,'obvious')
"""
def f(acc):
return Etok('LIT',[],[acc],s)
if s =='record':
return (Parse.word('we').possibly() +
first_word('record register') +
Parse.word('identification').possibly() +
Parse.word('that').possibly()).treat(f,'that')
else:
return lit_dict[s].treat(f,s)
def read_keyword(s): #was lit_read
"""parser generator for s-like word.
Must be a single word.
Output is an etok with name = s, rule = response,
>>> pstream(read_keyword('assoc'),'right')
Etok(ASSOC,right,'right')
"""
def f(acc):
return Etok(name=s.upper(),etoks=[],raw=acc,rule=acc.value)
local_lit_dict = {
'sort': (rawtype | rawsort),
'assoc': first_word('left right no'),
'field_key': first_word('coercion notationless notation parameter type call'), #renamed map -> call
'document': first_word('document article section subsection subsubsection subdivision division'),
'end_document': first_word('endsection endsubsection endsubsubsection enddivision endsubdivision')
}
if s == 'doc':
return (local_lit_dict['document'] | local_lit_dict['end_document']).treat(f,'doc')
if s == 'location':
return Parse.first([local_lit_dict['document'],lit_dict['theorem'],lit_dict['axiom']]).treat(f,'location')
return local_lit_dict[s].treat(f,s)
def lit_any():
def f(acc):
return Etok(name='any',etoks=acc,raw=acc)
return (first_phrase(['each and every','some and every']) | first_word('every each all any some no')).treat(f,'any')
def cs_brace(cs_parse):
"""control sequence parser including arguments in braces.
Etok cs_parse is used to parse cs and
Etok expr to parse each braced arg.
Output: Etok(name='cs_brace') etoks=(cs_brace,braces).
>>> pstream(cs_brace(next_any_word()),'cs {TERM} {TERM} c')
Etok(cs_brace,word,'cs { TERM } { TERM }')
"""
def f(acc):
(cs,bs)=acc
bs = [b for (_,b,_) in bs]
return Etok(name='cs_brace',etoks=(cs,bs),raw=acc,rule=cs_parse.nonterminal)
return (cs_parse + c.brace(expr()).many()).treat(f,'cs_brace')
# case_sensitive_word -> use next_value(s)
@memo
def atomic():
"""parser for atomic identifiers,
converting words and integers as needed
Atomic identifiers cannot be a single letter (a short var)
wordlike atomic identifiers are modulo case-sing-syn.
but hierarchical identifiers are always case sensitive.
Integers are included for section numbers in labels.
output Etok
>>> pstream(atomic(),'HELLO')
Etok(ATOMIC,HELLO,'HELLO')
>>> pstream(atomic(),'the')
Etok(ATOMIC,the,'the')
"""
def f(acc):
if acc.type == 'WORD':
rule = c.synonymize(acc.value)
else:
rule = acc.value
return Etok(name='ATOMIC',etoks=[],raw=[acc],rule=rule)
return Parse.next_token().if_types(['INTEGER','WORD','ATOMIC_IDENTIFIER']).name('atomic').treat(f,'atomic')
@memo
def label():
return atomic()
# no memo, parameter
def primitive(primitive_nonterminal):
def f(item):
if not(primitive_nonterminal in word_lists.prim_list):
raise(ParseNoCatch(ErrorItem(item,primitive_nonterminal,'undeclared primitive')))
return get_lookup_parse(primitive_nonterminal).process(item)
return Parse(f,primitive_nonterminal,'!')
@memo
def section_label():
"""Section label.
Output Etok.etoks = [section,label?]
>>> pstream(section_label(),'Section 3.')
Etok(section_label,'section 3 .')
"""
def f(acc):
(e,_) = acc
return Etok(name='section_label',etoks=e,raw=acc)
def section_tag():
return (read_keyword('doc'))
return (section_tag() + label().possibly() + period).name('section_label').treat(f,'section_label')
class Instruction:
"""Construct a parser that creates an Etok for a given instruction.
There are misc data types: synonym, string, bool, int.
dictionary keys:
name : 'instruction'
rule : instruction keyword
misc : None, synlist, str, bool, int depending on the type
production : 'instruction'
rawvalue : input tokens,
keyword : string indicating instruction,
value : None, synlist, str, bool, int depending on the type.
"""
def _param_misc(tok):
if not(tok):
return None
if tok.type == 'INTEGER':
return int(tok.value)
if tok.value.lower() in ['yes','true','on']:
return True
if tok.value.lower() in ['no','false','off']:
return False
return tok.value
def _expand_slashdash(vs):
"""expanding synonyms
e.g. word/-ing is short for word/wording
>>> Instruction._expand_slashdash('work /- ing effort / workaround'.split())
['work', 'working', 'effort', 'workaround']
"""
for i in range(len(vs)-1):
if vs[i]== '/-':
vs[i]= '/'
vs[i+1]= vs[i-1]+vs[i+1]
return [v for v in vs if v != '/']
def _syn():
"""parsing synonyms
>>> pstream(Instruction._syn(),'aa/bb,cc/-dd,ee/ff')
[[LexToken(WORD,'aa',1,0), LexToken(SYMBOL,'/',1,2), LexToken(WORD,'bb',1,3)], ...
"""
def f(acc):
return acc[0::2]
def p(tok):
return tok.value in ['/','/-'] or c.can_wordify(tok)
synlist = Parse.next_token().if_test(p).plus()
return c.plus_andcomma(synlist).treat(f,'_syn')
def _treat_syn(acc):
"""build dict for synonyms.
input acc should contain the syn lists in the form
output by _syn.
This function will expand the slashes.
Output Etok(instruction,synonym,...)
"""
#acc11 = acc[1][1]
tt=[Instruction._expand_slashdash([t.value for t in ac]) for ac in acc]
return Etok(name='instruction',etoks=[],raw=acc,rule='synonym',misc=tt)
#d = {'production':'instruction',
# 'raw':lib.flatten(acc),
# 'keyword':'synonym'
# }
#acc11 = acc[1][1]
#d['value']=[Instruction._expand_slashdash([t.value for t in ac]) for ac in acc11]
#return d
def syn():
"""Synonym parser,
output is a fully treated Etok(instruction,synonym,...)
"""
return Instruction._syn().treat(Instruction._treat_syn)
def _treat_instruct(acc):
(_,(keyword,ls),_) = acc
return Etok(name='instruction',etoks=[],raw=acc,rule=keyword.value,misc=Instruction._param_misc(ls))
#{'production':'instruction',
# 'raw':lib.flatten(acc),
# 'keyword':keyword.value,
# 'value':Instruction._param_misc(ls)}
_keywords = """exit timelimit printgoal dump
ontored read library error warning"""
_keyword_instruct = (first_word(_keywords) +
Parse.next_token().possibly())
def instruction():
"""parsing instructions
>>> pstream(Instruction.instruction(),'[exit 1]')
Etok(instruction,exit,'[ exit 1 ]')
>>> pstream(Instruction.instruction(),'[read filename]')
Etok(instruction,read,'[ read filename ]')
>>> pstream(Instruction.instruction(),'[synonym another/extras, yet/-s]')
Etok(instruction,synonym,'[ synonym another / extra yet /- s ]')
"""
def f(acc):
(_,(_,s),_)=acc
return Etok.rawupdate(s, acc)
return (c.bracket(next_word('synonym') + Instruction.syn()).treat(f) |
c.bracket(Instruction._keyword_instruct).treat(Instruction._treat_instruct))
@memo
def expr():
"""parse for expression (term, type, or prop).
Output Etok(expr,...)
>>> pstream(expr(),'TERM')
Etok(expr,term,'TERM')
"""
def f1(nonterminal): #currying
def f(acc):
return Etok('expr',etoks=acc.etoks,raw=acc.raw,rule=nonterminal,misc=acc.misc,altrepr=acc.altrepr)
return f
def get(nonterminal):
return get_lookup_parse(nonterminal).treat(f1(nonterminal),nonterminal)
return first(
get('general_type'),
get('term'),
get('prop'),
get('proof_expr'),
get('sort_expr')
)
@memo
def colon_sort():
def f(acc):
((_,a),e) = acc
if not a:
return Etok.rawupdate(e,acc)
return Etok('coerce_as',etoks=[e],raw=acc)
return (colon_as + get_lookup_parse('sort_expr')).treat(f,'colon_sort')
@memo
def opt_colon_sort():
return colon_sort().possibly()
@memo
def colon_type():
"""Parse a colon then a post_colon_type.
output Etok
>>> pstream(colon_type(),':POST_COLON_TYPE')
Etok(post_colon_type,backdoor1,': POST_COLON_TYPE')
"""
def f(acc):
((_,a),e) = acc
if not a:
return Etok.rawupdate(e,acc)
return Etok('coerce_as',etoks=[e],raw=acc)
return (colon_as + get_lookup_parse('post_colon_type')).treat(f,'colon_type')
@memo
def opt_colon_type():
return colon_type().possibly()
@memo
def opt_colon_sort_or_type():
return (colon_sort() | colon_type()).possibly()
@memo
def var():
"""parser for a single variable.
Accepts a single token that is a variable.
>>> pstream(var(),'x')
Etok(VAR,x,'x')
"""
return c.next_type('VAR').name('VAR').treat(Etok.etok,'VAR')
def annotated(p):
"""
Parser for annotated p in parentheses.
p must output an Etok.
Input is wrapped in parentheses.
Annotation is colon_type or None
Parser output Etok('annotated'...)
etoks:(p,colon_type)
Sample input to parser:
(x : A)
>>> pstream(annotated(var()),'(x:POST_COLON_TYPE)')
Etok(annotated,...,'( x : POST_COLON_TYPE )')
"""
def f(acc):
(_,vs,_) = acc
#if not ann:
# return Etok.rawupdate(v,acc) # cannot guarantee that v is a single Etok.
return Etok('annotated',etoks=vs,raw=acc,rule=p.nonterminal)
return c.paren(p + opt_colon_sort_or_type()).treat(f,'annotated')
@memo
def annotated_var():
return annotated(var())
def annotateds(p):
"""
Parser for annotated list
p must output a list of Etoks.
Input is wrapped in parentheses.
Output Etok.etoks:([p],post_colon_type or None)
Sample input:
(x y z : A)
(u v)
>>> pstream(annotateds(var().plus()),'(x y:POST_COLON_TYPE)')
Etok(annotateds,...,'( x y : POST_COLON_TYPE )')
"""
def f(acc):
(_,(vs,ann),_) = acc
return Etok('annotateds',etoks=(vs,ann),raw=acc,rule=p.nonterminal)
return c.paren(p + opt_colon_type()).treat(f,'annotateds')
@memo
def annotated_vars():
return annotated(var().plus())
@memo
def tvar():
"""
>>> pstream(tvar(),'x')
Etok(VAR,x,'x')
>>> pstream(tvar(),'(x : POST_COLON_TYPE)')
Etok(annotated,VAR,'( x : POST_COLON_TYPE )')
"""
return var() | annotated_var()
@memo
def assign_expr():
"""parser for := followed by an expression
The output is the expression at Etok
>>> pstream(assign_expr(),':= GENERAL_TYPE')
Etok(expr,general_type,':= GENERAL_TYPE')
"""
def f(acc):
(_,e) = acc
return Etok.rawupdate(e,acc)
return (next_value(':=') + expr()).name('assign_expr').treat(f,'assign_expr')
def var_or_atomic(omit=[]):
"""parser for a var or atomic identifier.
The value is not allowed to lie in omit.
Output of parser is a single Etok of one of those types."""
def p(tok):
return not(tok.value in omit)
return (var() | atomic()).if_test(p).name('var_or_atomic')
def var_or_atomics_(omit=[]):
"""parser for a sequence of one or more var or atomics
>>> pstream(var_or_atomics_(),'x uu vv THE RUN.TO')
[Etok(VAR,x,'x'), ... Etok(ATOMIC,THE,'THE')]
"""
return var_or_atomic(omit=[]).plus()
@memo
def var_or_atomic_or_blank():
"""parser for var or atomic or _.
The parser output is a single token that is one of those types."""
return var_or_atomic() | next_value('_').treat(Etok.etok)
@memo
def brace_assign():
"""
input semi-separated list of assignments within braces.
output is a Etok(brace_assign)
Etok.etoks: list of (lhs,type annotation,assigned expr)
the last two can be None.
>>> pstream(brace_assign(),'{ x := TERM ; y : POST_COLON_TYPE := TERM }')
Etok(brace_assign,'{ x := TERM ; y : POST_COLON_TYPE := TERM }')
"""
#def f_item(acc):
# ((v,o),p) = acc
# return (v,o,p)
#n_acc = []
#def f_brace(acc):
# nonlocal n_acc
# (_,b,_) = acc
# n_acc = acc # keep full list of tokens
# return b[0::2]
#def f_final(acc):
# return Etok(name='brace_assign',etoks=acc,raw=n_acc)
#def brace_assign_item():
# return (var_or_atomic_or_blank()+ opt_colon_type() + assign_expr().possibly()).name('brace_assign_item').treat(f_item)
#return c.brace_semif().treat(f_brace,'f_brace').reparse_list(brace_assign_item()).treat(f_final,'brace_assign')
def f(acc):
(_,ps,_)=acc
ps = [(v,o,p) for ((v,o),p) in ps[0::2]]
return Etok(name='brace_assign',etoks=ps,raw=acc)
p = (var_or_atomic_or_blank()+ opt_colon_type() + assign_expr().possibly()).name('brace_assign_item')
return c.brace(p.plus(semicolon)).treat(f,'brace_assign_item')
@memo
def brace_noassign():
"""
input semi-separated list of var_or_atomics with possible typing
output is an Etok(brace_noassign)
Etok.etoks list of (lhs,typ annotation or None)
>>> pstream(brace_noassign(),'{x:POST_COLON_TYPE;y}')
Etok(brace_noassign,'{ x : POST_COLON_TYPE ; y }')
"""
#n_acc = []
#def f_brace(acc):
# nonlocal n_acc
# (_,b,_) = acc
# n_acc = acc
# return b[0::2] #remove semi
#def f_final(acc):
# return Etok(name='brace_noassign',etoks=acc,raw=n_acc)
#def brace_noassign_item():
# return (var_or_atomics_() + opt_colon_type())
#return c.brace_semif().treat(f_brace,'f_brace').reparse_list(brace_noassign_item()).treat(f_final,'brace_noassign')
def f(acc):
(_,ps,_)=acc
ps = ps[0::2]
return Etok(name='brace_noassign',etoks=ps,raw=acc)
p = (var_or_atomics_() + opt_colon_type()).name('brace_noassign_item')
return c.brace(p.plus(semicolon)).treat(f,'brace_noassign_item')
@memo
def app_args():
"""
parses the arguments of a function application.
output Etok.toks (brace_assign?,[expr])
>>> pstream(app_args(),'{ x:= TERM } TIGHTEST_EXPR TIGHTEST_EXPR ...')
Etok(app_args,'{ x := TERM } TIGHTEST_EXPR TIGHTEST_EXPR')
"""
def f(acc):
return Etok(name='app_args',etoks=acc,raw=acc)
return (brace_assign().possibly() + get_lookup_parse('tightest_expr').many()).treat(f,'app_args')
def casemark(s):
"""Used to mark different cases in parsing, for later
case-based treatment"""
def f(acc):
return (s,acc)
return f
def annotated_args(omit=[]):
"""
parse formal parameters of a function
input variables
omit = list of banned names for variables and atomics
output Etok(annotated_args) , etoks = man
>>> pstream(annotated_args(),'x vv tt')
Etok(annotated_args,'x vv tt')
>>> pstream(annotated_args(),'x (uu v w : POST_COLON_TYPE) y')
Etok(annotated_args,'x ( uu v w : POST_COLON_TYPE ) y')
"""
def f(acc):
return Etok(name='annotated_args',etoks=acc,raw=acc)
return (var_or_atomic(omit) | annotateds(var_or_atomics_(omit))).many().treat(f,'annotated_args')
def args_template(omit=[]):
"""
input parse braced, annotated arguments of
formal function args
output Etok(args_template), can be devoid of data
>>> pstream(args_template(),'{ x ; y ; z} r (s t) v')
Etok(args_template,'{ x ; y ; z } r ( s t ) v')
>>> pstream(args_template(),'')
Etok(args_template,'')
"""
def f(acc):
return Etok(name='args_template',etoks=acc,raw=acc)
return (brace_noassign().possibly() + annotated_args(omit)).treat(f,'args_template')
def nonempty_args_template(omit=[]):
"""This is the same as args_template, except it must
contain data.
>>> pstream(nonempty_args_template(),'{ x ; y ; z} r (s t) v')
Etok(args_template,'{ x ; y ; z } r ( s t ) v')
>>> try:
... pstream(nonempty_args_template(),'')
... except:
... 'invalid'
'invalid'
>>> pstream(nonempty_args_template(omit=['y']),'x y')
Etok(args_template,'x')
"""
def p(etok):
return (etok.rawstring())
return args_template(omit).if_test(p)
@memo
def tightest_arg():
"""
This allows too much. We should restrict to admissible patterns.
>>> pstream(tightest_arg(),'TIGHTEST_EXPR')
Etok(tightest_expr,backdoor1,'TIGHTEST_EXPR')
>>> pstream(tightest_arg(),'(x uu : sort_expr)')
Etok(tightest_arg,'( x uu : sort_expr )')
"""
def f(acc):
(_,(vs,o),_)=acc
return Etok(name='tightest_arg',etoks=(vs,o),raw=acc)
return (get_lookup_parse('tightest_expr') |
c.paren(var_or_atomic().atleast(2) +
opt_colon_sort_or_type()).treat(f,'tightest_arg'))
@memo
def tightest_args():
return brace_noassign().possibly() + tightest_arg().many()
@memo
def holding_vars():
""" input
This is experimental, used to indicate unbound (free) variables in
a sum or list comprehensive.
This is inspired by Harrison's {a | b | c} set comprehension notation.
>>> pstream(holding_vars(),', holding x,y,z')
Etok(holding_vars,', holding x , y , z')
"""
def f(acc):
((_,_),cs) = acc
return Etok(name='holding_vars',etoks=cs[0::2],raw=acc)
return (comma + next_word('holding') + c.plus_comma(var())).treat(f,'holding_vars')
@memo
def proof_expr():
r"""parser for the QED symbol
>>> pstream(proof_expr(),r'\qed')
Etok(SYMBOL_QED,\qed,'\qed')
"""
return c.next_type('SYMBOL_QED').treat(Etok.etok,'proof_expr')
@memo
def tightest_expr():
"""
Parser for expressions in which the boundaries are clear.
"""
return first(get_lookup_parse('tightest_term'),
get_lookup_parse('tightest_prop'),
get_lookup_parse('tightest_type'),
proof_expr())
@memo
def sort_expr():
"""Parser for arrows ending in rawvalue Sort or Type
>>> pstream(sort_expr(),'BINDER_TYPE -> Type')
Etok(sort_expr,'BINDER_TYPE -> type')
"""
def f(acc):
(m,s) = acc
m1 = [a for (a,_) in m]
return Etok(name='sort_expr',etoks=(m1,s),raw=acc)
return c.LazyParse((lambda s:((get_lookup_parse(s) + c.next_type('ARROW')).many() + read_keyword('sort')).treat(f,'sort_expr')),'binder_type')
# colon_sort above
# opt_colon_sort above
@memo
def paren_type():
"""Parser for a type wrapped in parentheses
>>> pstream(paren_type(),'(GENERAL_TYPE)')
Etok(general_type,backdoor1,'GENERAL_TYPE')
"""
def f(acc):
(_,a,_) = acc
return a
return c.paren(get_lookup_parse('general_type')).treat(f,'paren_type')
@memo
def annotated_type():
"""Parser for an annotated type
>>> pstream(annotated_type(),'(GENERAL_TYPE : Type)')
Etok(general_type,backdoor1,'GENERAL_TYPE')
"""
def f(acc):
(_,((a,_),_),_)=acc
return a
return c.paren(get_lookup_parse('general_type') + colon + rawtype).treat(f,'annotated_type')
@memo
def controlseq_type():
"""Parser for a control sequence type
>>> pstream(controlseq_type(),'PRIM_TYPE_CONTROLSEQ { TERM }')
Etok(cs_brace,prim_type_controlseq,'PRIM_TYPE_CONTROLSEQ { TERM }')
"""
return cs_brace(get_lookup_parse('prim_type_controlseq'))
@memo
def const_type():
"""Parser for an identifier representing a type"""
return get_lookup_parse('prim_identifier_type')
@memo
def field_type():
"""Parser for a field of a structure"""
def f(acc):
return Etok('field_type',etoks=acc,raw=acc)
return (get_lookup_parse('tighteset_term') + get_lookup_parse('prim_field_type_accessor')).treat(f,'field_type')
@memo
def over_args():
"""Parser for the experimental feature of using keyword over to
unbundle structures
input (there are three different forms, shown in examples)
output Etok(over_args,1 2 or 3)
>>> pstream(over_args(),'over { a := TERM ; b := TERM }')
Etok(over_args,1,'over { a := TERM ; b := TERM }')
>>> pstream(over_args(),'over TIGHTEST_TERM')
Etok(over_args,2,'over TIGHTEST_TERM')
>>> pstream(over_args(),'(over TIGHTEST_TERM,TIGHTEST_TERM)')
Etok(over_args,3,'( over TIGHTEST_TERM , TIGHTEST_TERM )')
"""
over = next_word('over')
def over_args1():
#n_acc = []
#def f_brace(acc):
# nonlocal n_acc
# (_,(_,b,_)) = acc
# n_acc = acc
# return b[0::2] #remove semi
#def f1(acc):
# return Etok(name='over_args',etoks=acc[0::2],raw=n_acc,rule='1')
#return ((over + c.brace_semif()).treat(f_brace).reparse_list(var_or_atomic() + assign_expr())).treat(f1,'over_args1')
def f(acc):
(_,(_,b,_))=acc
return Etok(name='over_args',etoks=b[0::2],raw=acc,rule='1')
p = (var_or_atomic() + assign_expr())
return (over + c.brace(p.plus(semicolon))).treat(f,'over_args1')
def over_args2():
def f2(acc):
(_,b)=acc
return Etok(name='over_args',etoks=b,raw=acc,rule='2')
return (over + get_lookup_parse('tightest_term')).treat(f2,'over_args2')
def over_args3():
def f3(acc):
(_,(_,b),_)=acc
return Etok(name='over_args',etoks=b[0::2],raw=acc,rule='3')
return (c.paren(over + c.plus_comma(tightest_expr()))).treat(f3,'over_args3')
return first(over_args1() , over_args2() , over_args3())
@memo
def overstructure_type():
"""Parser for overstructure.
The structure name must be a primitive identitifer.
>>> pstream(overstructure_type(),'PRIM_STRUCTURE { x:= TERM } TIGHTEST_EXPR over TIGHTEST_TERM')
Etok(overstructure_type,'PRIM_STRUCTURE { x := TERM } TIGHTEST_EXPR over TIGHTEST_TERM')
"""
def f(acc):
return Etok(name='overstructure_type',etoks=acc,raw=acc)
return (get_lookup_parse('prim_structure') + app_args() + over_args().possibly()).treat(f,'overstructure_type')
@memo
def var_type():
"""
Parser for a type variable.
If not annotated, the var should be
previously annotated (v : Type) in the context.
Output: Etok(VAR_TYPE,v,'v') (in LexToken format)
>>> pstream(var_type(),'(v:Type)')
Etok(VAR_TYPE,v,'( v : type )')
"""
def f(acc):
return acc.update({'name':'VAR_TYPE'})
def f2(acc):
(_,((v,_),_),_) = acc
return Etok.rawupdate(v,acc)
return (var() | c.paren(var() + colon + rawtype).treat(f2)).treat(f,'var_type')
@memo
def subtype():
r"""
Parser for a subtype comprehension { x // P(x)}
>>> pstream(subtype(),r'{ TERM, holding x \tmid STATEMENT }')
Etok(subtype,'{ TERM , holding x \tmid STATEMENT }')
"""
def f(acc):
(_,(((t,h),_),s),_)=acc
return Etok(name='subtype',etoks=(t,h,s),raw=acc)
return c.brace(get_lookup_parse('term') + holding_vars().possibly() + c.next_type('TMID') + get_lookup_parse('statement')).treat(f,'subtype')
@memo
def app_type():
"""Parser for the application of a type to its arguments
>>> pstream(app_type(),'TIGHTEST_TYPE TIGHTEST_EXPR')
Etok(app_type,tightest_type,'TIGHTEST_TYPE TIGHTEST_EXPR')
"""
def f(acc):
return Etok(name='app_type',etoks=acc,raw=acc,rule='tightest_type')
return ((get_lookup_parse('tightest_type') + app_args()).treat(f,'app_type') |
overstructure_type())
@memo
def binder_comma():
"""Parser for a comma in a binder expression"""
def f(acc):
return Etok(name='binder_comma',etoks=[Etok.etok(acc)],raw=[acc])
return comma.treat(f,'binder_comma')
@memo
def binder_type():
"""Recursive parser for type binders (Pi-types, etc.)
>>> pstream(binder_type(),'PRIM_PI_BINDER TIGHTEST_EXPR, TIGHTEST_TYPE')
Etok(binder_type,'PRIM_PI_BINDER TIGHTEST_EXPR , TIGHTEST_TYPE')
"""
def f(acc):
(((p,a),_),b)=acc
return Etok(name='binder_type',etoks=(p,a,b),raw=acc)
return (app_type() |
(get_lookup_parse('prim_pi_binder') + tightest_args() + binder_comma() + c.lazy_call(binder_type)).treat(f,'binder_type')
)
@memo
def agda_vars():
"""
Agda style dependent type variables (a : A ) -> B(a)
>>> pstream(agda_vars(),'(x : POST_COLON_TYPE) (z u : POST_COLON_TYPE)')
Etok(agda_vars,'( x : POST_COLON_TYPE ) ( z u : POST_COLON_TYPE )')
"""
def f(acc):
return Etok(name='agda_vars',etoks=acc,raw=acc)
return annotated_vars().plus().treat(f,'agda_vars')
@memo
def _type_operand():
"""
Parser for argument of a binary type operation.
"""
return binder_type() | agda_vars()
@memo
def _type_op():
"""Parser for a binary type operator
>>> pstream(_type_op(),'PRIM_TYPE_OP')
Etok(prim_type_op,backdoor1,'PRIM_TYPE_OP')
>>> pstream(_type_op(),'PRIM_TYPE_OP_CONTROLSEQ { TERM }')
Etok(cs_brace,prim_type_op_controlseq,'PRIM_TYPE_OP_CONTROLSEQ { TERM }')
"""
return (get_lookup_parse('prim_type_op') |
cs_brace(get_lookup_parse('prim_type_op_controlseq')))
@memo
def binop_type():
"""Parser for binary operation on types.
for product types A * B, sum types A + B,
including arrows A -> B,
including Agda style dependent arrows (x:A) -> B x.
all type operators are right assoc with the same precedence
N.B. binder_types is tighter than binop_type, which might be non-intuitive.
Operators appear in etoks[1] odd positions.
>>> pstream(binop_type(),'TIGHTEST_TYPE PRIM_TYPE_OP TIGHTEST_TYPE')
Etok(binop_type,'TIGHTEST_TYPE PRIM_TYPE_OP TIGHTEST_TYPE')
"""
def f(acc):
((p,m),b) = acc
return Etok(name='binop_type',etoks=(p,m+[b]),raw=acc)
return (brace_noassign().possibly() + (_type_operand() + _type_op()).many() + binder_type()).treat(f,'binop_type')
@memo
def quotient_type():
"""parser for quotient types
>>> pstream(quotient_type(),'quotient of GENERAL_TYPE by TERM')
Etok(quotient_type,'quotient of GENERAL_TYPE by TERM')
"""
def f(acc):
((((_,_),g),_),t) = acc
return Etok(name='quotient_type',etoks=(g,t),raw=acc)
return (next_word('quotient') + next_word('of').possibly() +
get_lookup_parse('general_type') + next_word('by') +
get_lookup_parse('term')).treat(f,'quotient_type')
@memo
def coercion_type():
r"""parser for coercion of a term to type
>>> pstream(coercion_type(),r'\^TERM')
Etok(coercion_type,'\^ TERM')
"""
def f(acc):
(_,t)=acc
return Etok(name='coercion_type',etoks=[t],raw=acc)
return (c.next_type('COERCION') + get_lookup_parse('term')).treat(f,'coercion_type')
@memo
def coerced_type():
"""parser for (possibly implicit) coercion from term to type
>>> pstream(coerced_type(),'TERM')
Etok(coercion_type,'TERM')
"""
def f(acc):
return Etok(name='coercion_type',etoks=[acc],raw=acc)
return (coercion_type() | get_lookup_parse('term').treat(f,'coerced_type'))
@memo
def opentail_type():
"""Parser for binop, quotient, or coercion type"""
return first(binop_type() , quotient_type() , coercion_type())
@memo
def post_colon_type():
"""parser for type appearing after a colon
>>> pstream(post_colon_type(),'PRIM_RELATION')
Etok(post_colon_type,2,'PRIM_RELATION')
"""
def f2(acc):
return Etok(name='post_colon_type',etoks=acc,raw=acc,rule='2')
return first(get_lookup_parse('general_type') ,
(get_lookup_parse('prim_relation') + app_args()).treat(f2,'post_colon_type-2') ,
coerced_type())
# general_type - implement after attribute
@memo
def hierarchical_identifier():
"""
Parser for hierarchical identifiers.
Output is a Etok.
"""
return c.next_type('HIERARCHICAL_IDENTIFIER').treat(Etok.etok,'hierarchical_identifier')
@memo
def identifier():
"""parser for hierarchical or atomic identifier.
Output is a single Etok"""
return (atomic() | hierarchical_identifier()).name('identifier')
@memo
def _opt_alt_constructor():
"""Parser for a single constructor in an inductive type declaration.
>>> pstream(_opt_alt_constructor(),'| id : POST_COLON_TYPE')
Etok(alt_constructor,'| id : POST_COLON_TYPE')
"""
def f(acc):
(((_,i),a),t)=acc
return Etok(name='alt_constructor',etoks=(i,a,t),raw=acc)
return (c.next_type('ALT') + identifier() + args_template() + opt_colon_type()).treat(f,'_opt_alt_constructor')
def not_period(tok):
"""boolean token test for non-period."""
return not(tok.type == 'PERIOD')
#no memo parameter
def not_end(tok):
"""boolean token test for not keyword 'end'"""
return not(tok.value == 'end') and not_period(tok)
@memo
def field_prefix():
"""
parser for field prefixes:
coercion notationless notation parameter type call
These are decorators or attributes.
coercion - structure coerces to this field
parameter - field can float to unbundled position
type - objects can coerce to this type.
call - objects can be used as a function.
notation - field is for notational type classing.
notationless - ??
>>> pstream(field_prefix(),' random ')
Etok(field_prefix,'')
>>> pstream(field_prefix(),'a type,call,notation')
Etok(field_prefix,'a type , call , notation')
"""
def f(acc):
keys = []
if acc:
(_,keys) = acc
keys = keys[0::2]
return Etok(name='field_prefix',etoks=keys,raw=acc)
return (lit('a').possibly() +
c.plus_comma(read_keyword('field_key'))).possibly().treat(f,'field_prefix')
@memo
def field_identifier():
"""Parser for identifier in one field of structure declaration
The word 'proof' or '_' can be used as
anonymous field identifiers for props.
>>> pstream(field_identifier(),'x : POST_COLON_TYPE')
Etok(field_identifier,'x : POST_COLON_TYPE')
>>> pstream(field_identifier(),'proof')
Etok(PROOF,proof,'proof')
"""
def fp(acc):
return Etok(name='PROOF',etoks=[],raw=acc,rule='proof')
def f(acc):
return Etok(name='field_identifier',etoks=acc,raw=acc)
return first(get_lookup_parse('prim_structure') ,
(next_word('proof')|c.next_value('_')).treat(fp) ,
(var_or_atomic() +
opt_colon_sort_or_type()
).treat(f,'field_identifier')
)
@memo
def field():
"""Parser for one field of a structure
>>> pstream(field(),'a call,type,parameter x := TERM')
Etok(field,'a call , type , parameter x := TERM')
"""
def f(acc):
((a,b),c)=acc
return Etok(name='field',etoks=(a,b,c),raw=acc)
return (field_prefix() + field_identifier() + assign_expr().possibly()).treat(f,'field')
@memo
def structure():
"""Parser for a structure declaration
>>> pstream(structure(),'notational structure with parameters { x : POST_COLON_TYPE } with { parameter y := TERM }')
Etok(structure,'notational structure with parameter { x : POST_COLON_TYPE } with { parameter y := TERM }')
"""
def f(acc):
((((n,_),t),_),(_,b,_))=acc
if t:
(_,t)=t
return Etok(name='structure',etoks=(n,t,b[0::2]),raw=acc)
# Prohibit identifiers named 'with' to avoid grammar ambiguity.
return (next_word('notational').treat(Etok.etok).possibly() +
next_word('structure') +
(lit('param').possibly() + nonempty_args_template(omit=['with'])).possibly() +
next_word('with').possibly() +
c.brace(field().plus(semicolon))).treat(f,'structure')
proof_expr # implemented above
@memo
def controlseq_term():
"""parser for terms expressed as control sequences
>>> pstream(controlseq_term(),'PRIM_TERM_CONTROLSEQ { TERM }')
Etok(cs_brace,prim_term_controlseq,'PRIM_TERM_CONTROLSEQ { TERM }')
"""
return cs_brace(get_lookup_parse('prim_term_controlseq'))
@memo
def tightest_prefix():
"""Parser for very tightly bound terms.
This prefix is the stem of the term, to which suffixes are added.
>>> pstream(tightest_prefix(),'33.456')
Etok(DECIMAL,33.456,'33.456')
>>> pstream(tightest_prefix(),'1799')
Etok(INTEGER,1799,'1799')
"""
return first(Parse.next_token().if_types(['DECIMAL','INTEGER','STRING','BLANK','VAR']).treat(Etok.etok,'tightest_prefix') ,
get_lookup_parse('prim_identifier_term') ,
controlseq_term() ,
get_lookup_parse('delimited_term') , #future reference
get_lookup_parse('alt_term')) #future reference
@memo
def tightest_suffix():
"""Recursive parser for suffix to a tightly bound term.
The suffix can be a .field (field accessor) or subscript
"""
return first(get_lookup_parse('prim_field_term_accessor') ,
(c.lazy_call(tightest_subscript))
)
@memo
def tightest_term():
r"""Parser for a tightly bound term
>>> pstream(tightest_term(),r'33.456 PRIM_FIELD_TERM_ACCESSOR\sub(3)')
Etok(tightest_term,'33.456 PRIM_FIELD_TERM_ACCESSOR \sub ( 3 )')
"""
def f(acc):
return Etok(name='tightest_term',etoks=acc,raw=acc)
return (tightest_prefix() + tightest_suffix().many()).treat(f,'tightest_term')
@memo
def tightest_subscript():
"""Parser for subscript
APPLYSUB handles subscripts coming from a TeX file.
The braces have been converted to ()
In brief,
x_1 is an identifier.
x APPLYSUB (1) is equivalent to x 1 and is the de-TeXed form of x_{1}.
x APPLYSUB (i j) is equivalent to x i j. (This is perhaps a surprise.)
x APPLYSUB ((f j)) is equivalent to x (f j).
"""
def f(acc):
(_,(_,t,_))=acc
return Etok(name='apply_sub',etoks=t,raw=acc)
return (c.next_type('APPLYSUB') + c.paren(tightest_term().plus())).treat(f,'tightest_subscript')
controlseq_term # defined above
var_or_atomic_or_blank # defined above
@memo
def annotated_term():
return annotated(get_lookup_parse('term'))
@memo
def set_enum_term():
"""parser for set enumeration
>>> pstream(set_enum_term(),'{ PLAIN_TERM, PLAIN_TERM, PLAIN_TERM }')
Etok(set_enum_term,'{ PLAIN_TERM , PLAIN_TERM , PLAIN_TERM }')
"""
def f(acc):
(_,t,_)=acc
t = t[0::2]
return Etok(name='set_enum_term',etoks=t,raw=acc)
return c.brace(c.many_comma(get_lookup_parse('plain_term'))).treat(f,'set_enum_term')
@memo
def set_comprehension_term():
"""Parser for set comprehension
>>> pstream(set_comprehension_term(),'{ PLAIN_TERM, holding u,v \mid STATEMENT}')
Etok(set_comprehension_term,'{ PLAIN_TERM , holding u , v \mid STATEMENT }')
"""
def f(acc):
(_,(((p,h),_),s),_)=acc
return Etok(name='set_comprehension_term',etoks=(p,h,s),raw=acc)
return c.brace(get_lookup_parse('plain_term') + holding_vars() + c.next_type('MID') + get_lookup_parse('statement')).treat(f,'set_comprehension_term')
@memo
def tuple_term():
"""Parser for n=tuples.
There must be at least one comma.
(x) is parsed as x in parentheses.
>>> pstream(tuple_term(),'(PLAIN_TERM,PLAIN_TERM,PLAIN_TERM)')
Etok(tuple_term,'( PLAIN_TERM , PLAIN_TERM , PLAIN_TERM )')
"""
def f(acc):
(_,((p,_),ps),_)=acc
ps = [p]+ps[0::2]
return Etok(name='tuple_term',etoks=ps,raw=acc)
return c.paren(get_lookup_parse('plain_term') + comma + c.plus_comma(get_lookup_parse('plain_term'))).treat(f,'tuple_term')
@memo
def list_term():
"""Parser for lists: [a;b;c], possibly empty []
>>> pstream(list_term(),'[PLAIN_TERM;PLAIN_TERM;PLAIN_TERM]')
Etok(list_term,'[ PLAIN_TERM ; PLAIN_TERM ; PLAIN_TERM ]')
"""
def f(acc):
(_,ps,_)=acc
ps = ps[0::2]
return Etok(name='list_term',etoks=ps,raw=acc)
return c.bracket(get_lookup_parse('plain_term').many(semicolon)).treat(f,'list_term')
@memo
def make_term():
"""parser for make statement (structure constructor).
DEBUG: I forget the purpose of the tightest_type.
>>> pstream(make_term(),'make { it : POST_COLON_TYPE := TERM }')
Etok(make_term,'make { it : POST_COLON_TYPE := TERM }')
"""
#def fp(acc):
# ((a,b),c)=acc
# return (a,b,c)
#def f(acc):
# ((_,t),(_,b,_)) = acc
# p = (var_or_atomic_or_blank() + opt_colon_type() +
# assign_expr().possibly()).treat(fp)
# b = c.retreat_list(p,b)
# return Etok('make_term',etoks=(t,b),raw=acc)
#return (next_word('make') + get_lookup_parse('tightest_type').possibly() +
# c.brace_semif()).treat(f,'make_term')
def f(acc):
((_,t),(_,bs,_))=acc
bs = [(a,b,c) for ((a,b),c) in bs[0::2]]
return Etok('make_term',etoks=(t,bs),raw=acc)
p = (var_or_atomic_or_blank() + opt_colon_type() +
assign_expr().possibly())
return (next_word('make') + get_lookup_parse('tightest_type').possibly() +
c.brace(p.plus(semicolon))).treat(f,'make_term')
@memo
def paren_term():
"""parser for term in parentheses
>>> pstream(paren_term(),'(TERM)')
Etok(term,backdoor1,'( TERM )')
"""
def f(acc):
(_,t,_)=acc
return Etok.rawupdate(t,acc)
return c.paren(get_lookup_parse('term')).treat(f,'paren_term')
@memo
def delimited_term():
"""Parser for terms that are delimited:
(x), (x : A), make { x := 3 }, [1;2],
{3,4}, (5,6), {x : f(x)}
>>> pstream(delimited_term(),'(TERM)')
Etok(term,backdoor1,'( TERM )')
"""
return first(paren_term() ,
annotated_term() ,
make_term() ,
list_term() ,
tuple_term() ,
set_enum_term() ,
set_comprehension_term())
@memo
def alt_case():
"""Parser for a single case of a case term
>>> pstream(alt_case(),'| PROP := PLAIN_TERM')
Etok(alt_case,'| PROP := PLAIN_TERM')
"""
def f(acc):
(((_,p),_),t)=acc
return Etok(name='alt_case',etoks=(p,t),raw=acc)
return (c.next_type('ALT') + get_lookup_parse('prop') + c.next_type('ASSIGN') + get_lookup_parse('plain_term')).treat(f,'alt_case')
@memo
def case_term():
"""Parser for a case term
>>> pstream(case_term(),'case | PROP := PLAIN_TERM end')
Etok(case_term,'case | PROP := PLAIN_TERM end')
"""
def f(acc):
((_,a),_)=acc
a= c.retreat_list(alt_case().plus(),[lib.fflatten(a)])
return Etok(name='case_term',etoks=a[0],raw=acc)
return (c.next_word('case')+ c.balanced_condition(not_end) +c.next_word('end')).treat(f,'case_term')
@memo
def app_term():
"""Parser for a function applied to arguments
"""
def f(acc):
return Etok(name='app_term',etoks=acc,raw=acc)
return (tightest_term() + app_args()).treat(f,'app_term')
@memo
def match_pats():
return c.plus_comma(get_lookup_parse('plain_term'))
@memo
def alt_match():
"""Parser for a single alternative in match term"""
def f(acc):
(((_,p),_),p2)=acc
return Etok(name='alt_match',etoks=(p,p2),raw=acc)
return (c.next_type('ALT')+match_pats()+c.next_type('ASSIGN')+get_lookup_parse('plain_term')).treat(f,'alt_match')
@memo
def match_term():
"""Parser for a match term
>>> pstream(match_term(),'match PLAIN_TERM with | PLAIN_TERM := PLAIN_TERM end')
Etok(match_term,'match PLAIN_TERM with | PLAIN_TERM := PLAIN_TERM end')
"""
def f(acc):
((((_,mp),_),b),_)=acc
b = c.retreat_list(alt_match().plus(),[lib.fflatten(b)])
return Etok(name='match_term',etoks=(mp,b[0]),raw=acc)
return (next_word('match') + match_pats() + next_word('with') +
c.balanced_condition(not_end) + next_word('end')
).treat(f,'match_term')
@memo
def match_function():
"""parser for a function with match statement
>>> pstream(match_function(),'function | PLAIN_TERM := PLAIN_TERM end')
Etok(match_function,'function | PLAIN_TERM := PLAIN_TERM end')
"""
def f(acc):
((((_,t),o),b),_)=acc
b = c.retreat_list(alt_match().plus(),[lib.fflatten(b)])
return Etok(name='match_function',etoks=(t,o,b),raw=acc)
return (next_word('function') + args_template() +
opt_colon_type() + c.balanced_condition(not_end) +
next_word('end')).treat(f,'match_function')
@memo
def alt_term():
"""Parser for term following the '| ... end' template"""
return first(case_term() , match_term() , match_function())
# opentail_term - later
@memo
def lambda_term():
"""Parser for lambda abstraction
>>> pstream(lambda_term(),'TDOP_TERM \mapsto OPENTAIL_TERM')
Etok(mapsto,'TDOP_TERM \mapsto OPENTAIL_TERM')
>>> pstream(lambda_term(),'fun TIGHTEST_EXPR := OPENTAIL_TERM')
Etok(fun_term,'fun TIGHTEST_EXPR := OPENTAIL_TERM')
"""
def f1(acc):
((t,_),o)=acc
return Etok(name='mapsto',etoks=(t,o),raw=acc)
def f2(acc):
(((p,a),_),o)=acc
return Etok(name='lambda_term',etoks=(p,a,o),raw=acc)
def f3(acc):
(((_,t),_),o)=acc
return Etok(name='fun_term',etoks=(t,o),raw=acc)
return first((get_lookup_parse('tdop_term') + c.next_type('MAPSTO') + get_lookup_parse('opentail_term')).treat(f1,'mapsto') ,
(get_lookup_parse('prim_lambda_binder') + tightest_args() + binder_comma() + get_lookup_parse('opentail_term')).treat(f2,'lambda_term') ,
(next_word('fun')+ tightest_args() + c.next_type('ASSIGN') + get_lookup_parse('opentail_term')).treat(f3,'fun_term')
)
@memo
def let_term():
"""Parser for let ....
>>> pstream(let_term(),'let x := PLAIN_TERM in OPENTAIL_TERM')
Etok(let,'let x := PLAIN_TERM in OPENTAIL_TERM')
"""
def f(acc):
(((((_,p),_),t),_),o)=acc
return Etok(name='let',etoks=(p,t,o),raw=acc)
return (next_word('let') + tightest_prefix() +
c.next_type('ASSIGN') + get_lookup_parse('plain_term') + next_word('in') + get_lookup_parse('opentail_term')).treat(f,'let_term')
@memo
def if_then_else_term():
"""Parse 'if bool then A else B'
>>> pstream(if_then_else_term(),'if PROP then PLAIN_TERM else OPENTAIL_TERM')
Etok(if_then_else_term,'if PROP then PLAIN_TERM else OPENTAIL_TERM')
"""
def f(acc):
(((((_,p),_),t),_),f)=acc
return Etok(name='if_then_else_term',etoks=(p,t,f),raw=acc)
return (next_word('if') + get_lookup_parse('prop') +
next_word('then') + get_lookup_parse('plain_term') + next_word('else') + get_lookup_parse('opentail_term')).treat(f,'if_then_else_term')
@memo
def opentail_term():
"""Recursive parser for terms with open tails.
These are terms that can be iterated as in
let x := y in let u:= v in tail
if b then t else if b2 then t2 else tail
Specifically, this includes lambdas, let, if_then, tdop
>>> pstream(opentail_term(),'let x := PLAIN_TERM in OPENTAIL_TERM')
Etok(let,'let x := PLAIN_TERM in OPENTAIL_TERM')
"""
return first(c.lazy_call(lambda_term) ,
c.lazy_call(let_term) ,
c.lazy_call(if_then_else_term) ,
get_lookup_parse('tdop_term')
)
@memo
def where_suffix():
"""suffix to Haskell 'where'
>>> pstream(where_suffix(),'where { x : POST_COLON_TYPE := TERM ; y := TERM }')
Etok(where_suffix,'where { x : POST_COLON_TYPE := TERM ; y := TERM }')
"""
#def f_inner(acc):
# ((a,b),c)=acc
# return (a,b,c)
#def f(acc):
# (_,(_,b,_))=acc
# b=c.retreat_list((var()+opt_colon_type()+assign_expr().possibly()).treat(f_inner),b[0::2])
# return Etok(name='where_suffix',etoks=b,raw=acc)
#return (next_word('where') + c.brace_semif()).treat(f,'where_suffix')
def f(acc):
(_,(_,bs,_))=acc
bs = [(a,b,c) for ((a,b),c) in bs[0::2]]
return Etok(name='where_suffix',etoks=bs,raw=acc)
p = (var()+opt_colon_type()+assign_expr().possibly())
return (next_word('where') + c.brace(p.plus(semicolon))).treat(f,'where_suffix')
@memo
def where_term():
"""Parser for term with (possible) Haskell style where suffix
>>> pstream(where_term(),'TDOP_TERM where {x : POST_COLON_TYPE := TERM }')
Etok(where_term,'TDOP_TERM where { x : POST_COLON_TYPE := TERM }')
"""
def f(acc):
return Etok('where_term',etoks=acc,raw=acc)
return (opentail_term() + where_suffix().possibly()).treat(f,'where_term')
@memo
def term_op():
"""Parser for symbolic operators
>>> pstream(term_op(),'PRIM_TERM_OP_CONTROLSEQ { TERM } {TERM }')
Etok(cs_brace,prim_term_op_controlseq,'PRIM_TERM_OP_CONTROLSEQ { TERM } { TERM }')
"""
return first(get_lookup_parse('prim_term_op') ,
cs_brace(get_lookup_parse('prim_term_op_controlseq'))
)
@memo
def term_ops():
return term_op().plus()
@memo
def definite_term():
"""term with a definite article, subsuming where_term
>>> pstream(definite_term(),'the PRIM_DEFINITE_NOUN')
Etok(prim_definite_noun,backdoor1,'the PRIM_DEFINITE_NOUN')
"""
def f(acc):
(_,t)=acc
return Etok.rawupdate(t,acc)
return first(where_term() ,
(next_word('the') + get_lookup_parse('prim_definite_noun')).treat(f,'definite_term')
)
@memo
def any_args():
def f(acc):
b = acc[0::2]
return Etok(name='any_args',etoks=b,raw=acc)
return c.plus_comma(var() | annotated_vars()).treat(f,'any_args')
@memo
def any_name():
"""Parse for terms with forthel
natural language quantification
every x, each x, all x, no x, some x,...
>>> pstream(any_name(),'every x, y, z')
Etok(any_name,'every x , y , z')
"""
def f(acc):
return Etok(name='any_name',etoks=acc,raw=acc)
return (lit_any() +
first(any_args() ,
get_lookup_parse('pseudoterm') ,
get_lookup_parse('general_type'))).treat(f,'any_name')
@memo
def term():
"""parser for terms, subsuming all other terms (through definite_term)"""
def f(acc):
(_,t)=acc
return Etok.rawupdate(t,acc)
return first((get_lookup_parse('prim_classifier').possibly() + definite_term()).treat(f,'term') ,
any_name())
@memo
def terms():
def f(acc):
return Etok(name='terms',etoks=acc[0::2],raw=acc)
return c.plus_andcomma(term()).treat(f,'terms')
def isplains(etoks):
"""Boolean test if a (nested) list of Etok is plain.
All elements must be Etok s (or test to False).
"""
return all(isplain(e) for e in lib.fflatten(etoks) if e)
def isplain(etok):
"""Boolean test if an Etok is plain.
Input must be an Etok.
"""
if tokenlib.is_lex(etok):
return True
if etok.name=='any_name':
return False
return isplains(etok.etoks)
@memo
def plain_term():
"""
Following Forthel 1.3.3,
a plain_term contains no any_name recursively within it.
We implement this with a separate check that the term is plain,
rather than build plain terms as a separate nonterminal.
We require plain terms on the right-hand-side of definitions.
Also, in dependent types, the terms should be plain.
"""
return term().if_test(isplain)
@memo
def tdop_term():
"""Parser for an expression involving symbolic operators.
tdop = top down operator precedence,
which is how such expressions will eventually be handled.
Here we just collect together the tokens in the expression
for later handling.
In the expression, there are no adjacent non-symbolic terms.
That is, f x + y is interpreted as function application of f to x...
There can be adjacent symbols: 3! + 1.
The expression can optionally begin or end with a symbol.
The expression might be a solitary symbol or app_term.
There are three general precedence categories built into
the grammar.
* prop operators; (precedence < 0)
* binary relation operators such as "="; (precedence=0)
* term operators. (precedence > 0) (this case).
This allows us to distinguish terms from props and types.
>>> pstream(tdop_term(),'x PRIM_TERM_OP y')
Etok(tdop_term,'x PRIM_TERM_OP y')
"""
def f(acc):
(((p,o),ao),tp)=acc
r=[o]+ao
if p:
r =[p]+r
if tp:
r=r+[tp]
return Etok('tdop_term',etoks=r,raw=acc)
return first((app_term().possibly() + term_ops() +
(app_term() + term_ops()).many() +
app_term().possibly()).treat(f,'tdop_term') ,
app_term()
)
@memo
def adjective_left_attribute():
def f(acc):
return Etok(name='adjective_left_attribute',etoks=acc,raw=acc)
return (next_word('non').treat(Etok.etok).possibly() + get_lookup_parse('prim_simple_adjective')).treat(f,'adjective_left_attribute')
@memo
def multisubject_left_attribute():
return (get_lookup_parse('prim_simple_adjective_multisubject'))
@memo
def left_attribute():
return first(adjective_left_attribute() ,
multisubject_left_attribute())
@memo
def is_right_attribute():
def f(acc):
return Etok(name='is_right_attribute',etoks=acc[0::2],raw=acc)
return c.plus_andcomma(get_lookup_parse('is_pred')).treat(f,'is_right_attribute')
@memo
def does_right_attribute():
def f(acc):
(_,t)=acc
return Etok(name='does_right_attribute',etoks=t[0::2],raw=acc)
return (next_word('that') + c.plus_andcomma(get_lookup_parse('does_pred'))).treat(f,'does_right_attribute')
@memo
def such_that_right_attribute():
def f(acc):
(_,t)=acc
return Etok.rawupdate(t,acc)
return (c.next_phrase('such that') + get_lookup_parse('statement')).treat(f,'such_that_right_attribute')
@memo
def right_attribute():
return first(is_right_attribute() , does_right_attribute() , such_that_right_attribute())
def attribute(p):
"""Parser for a term with left and right attributes
"""
def f(acc):
return Etok(name='attribute',etoks=acc,raw=acc)
return (left_attribute().many() + p + right_attribute().possibly()).treat(f,'attribute')
@memo
def general_type():
"""parser for a general type.
This is one of the main nonterminals.
It subsumes all specialized type nonterminals.
"""
return attribute(opentail_type())
@memo
def binary_relation_op():
"""binary relation symbols"""
return first(get_lookup_parse('prim_binary_relation_op') ,
cs_brace(get_lookup_parse('prim_binary_relation_controlseq'))
)
# deprecated, now part of tdop_rel_prop
#def tdop_terms():
# def f(acc):
# return Etok(name='tdop_terms',etoks=acc[0::2],raw=acc)
# return c.plus_andcomma(tdop_term).treat(f,'tdop_terms')
@memo
def tdop_rel_prop():
"""Parser for terms chained by binary relation symbols.
All symbols have the same precedence 0.
We allow x,y < z < w. The first arg can be a list of terms.
The chain expands as x < z and y < z and z < w.
output contains the list [x<z,y<z,z<w] (coded as Etoks)
No parentheses allowed in chain.
>>> pstream(tdop_rel_prop(),'x,y,z PRIM_BINARY_RELATION_OP u PRIM_BINARY_RELATION_OP x')
Etok(tdop_rel_prop,'x , y , z PRIM_BINARY_RELATION_OP u PRIM_BINARY_RELATION_OP x')
"""
def f(acc):
(t,ls)=acc
#expand chain
op0 = [(a,r0,t0) for a in t[0::2] for (r0,t0) in ls[0:1]] #chain comma
op1 = [(a,r0,t0) for ((_,a),(r0,t0)) in zip(ls[:-1],ls[1:])]
return Etok(name='tdop_rel_prop',etoks=op0+op1,raw=acc)
return (c.plus_andcomma(tdop_term()) + (binary_relation_op() + tdop_term()).plus()).treat(f,'tdop_rel_prop')
@memo
def prop_op():
"""Parser for propositional connectives
>>> pstream(prop_op(),'PRIM_PROPOSITIONAL_OP')
Etok(prim_propositional_op,backdoor1,'PRIM_PROPOSITIONAL_OP')
"""
return first(
get_lookup_parse('prim_propositional_op') ,
cs_brace(get_lookup_parse('prim_propositional_op_controlseq'))
)
@memo
def tdop_prop():
"""Parser for operators among props, such
as and, or, implies.
precedence is negative.
It must be infix (possibly with multiple ops).
For example, a symbolic negation is not included.
subsumes binder_prop
output etoks: binder_props in even positions, ops in odd positions
>>> pstream(tdop_prop(),'BINDER_PROP PRIM_PROPOSITIONAL_OP BINDER_PROP')
Etok(tdop_prop,'BINDER_PROP PRIM_PROPOSITIONAL_OP BINDER_PROP')
"""
def f(acc):
(b,m)=acc
return Etok(name='tdop_prop',etoks=[b]+lib.flatten(m),raw=acc)
return (get_lookup_parse('binder_prop') +
(prop_op().plus() + get_lookup_parse('binder_prop')).many()).treat(f,'tdop_prop')
@memo
def identifier_prop():
"""Parser for identifiers of type prop"""
return get_lookup_parse('prim_relation')
@memo
def annotated_prop():
"""Parser for prop, annotated as prop
>>> pstream(annotated_prop(),'(PROP : Prop)')
Etok(annotated_prop,'( PROP : prop )')
"""
def f(acc):
(_,((p,_),_),_) =acc
return Etok('annotated_prop',etoks=[p],raw=acc)
return c.paren(get_lookup_parse('prop')+colon + rawprop).treat(f,'annotated_prop')
@memo
def field_prop():
"""
Parser for prop obtained as dotted c.f, where the field f has type prop
Debug: should we add app_args (and move to app_args): c.f (x)?
"""
def f(acc):
return Etok(name='field_prop',etoks=acc,raw=acc)
return (tightest_term() + get_lookup_parse('prim_field_prop_accessor')).treat(f,'field_prop')
@memo
def prop_var():
"""parser for propositional var"""
def f(acc):
return Etok(name='prop_var',etoks=[acc],raw=acc)
return var().treat(f,'prop_var')
@memo
def tightest_prop():
"""Parser for tightly bound propositional statements"""
def f(acc):
(_,s,_)=acc
return Etok.rawupdate(s,acc)
return first(c.paren(get_lookup_parse('statement')).treat(f) ,
identifier_prop() ,
prop_var() ,
annotated_prop() ,
field_prop()
)
@memo
def app_prop():
"""parser for predicate application"""
def f(acc):
return Etok(name='app_prop',etoks=acc,raw=acc)
return (tightest_prop() + app_args()).treat(f,'any_prop')
@memo
def lambda_predicate():
"""parser for lambda term with values in prop
>>> pstream(lambda_predicate(),'fun TIGHTEST_EXPR : Prop := (STATEMENT)')
Etok(lambda_predicate,'fun TIGHTEST_EXPR : prop := ( STATEMENT )')
"""
def f(acc):
#return acc
(((((_,t),_),_),_),p)=acc
return Etok(name='lambda_predicate',etoks=(t,p),raw=acc)
return (next_word('fun')+ tightest_args() + colon + rawprop +
c.next_type('ASSIGN') + tightest_prop()
).treat(f,'lambda_predicate')
@memo
def binder_prop():
"""Recursive parser for props with (optional) binders (universal, etc.)
Subsumes various other kinds of props.
>>> pstream(binder_prop(),'PRIM_BINDER_PROP TIGHTEST_EXPR , A')
Etok(binder_prop,'PRIM_BINDER_PROP TIGHTEST_EXPR , A')
"""
def f(acc):
(((b,a),_),b2)=acc
return Etok(name='binder_prop',etoks=(b,a,b2),raw=acc)
return first(app_prop() ,
tdop_rel_prop() ,
lambda_predicate() ,
( get_lookup_parse('prim_binder_prop') +
args_template() + binder_comma() +
c.lazy_call(binder_prop)
).treat(f,'binder_prop')
)
@memo
def prop():
"""Parser for prop.
This is one of the main nonterminals.
It subsumes all specialized prop nonterminals.
The classifier is a sort of meta sort, which is currently ignored.
It might be a word such as 'predicate'
>>> pstream(prop(),'BINDER_PROP')
Etok(tdop_prop,'BINDER_PROP')
"""
def f(acc):
(_,t)=acc
return t
return (get_lookup_parse('prim_classifier').possibly() + tdop_prop()).treat(f,'prop')
# install binder_prop,prop
# statements...
@memo
def possessed_noun():
return (attribute(get_lookup_parse('prim_possessed_noun')))
@memo
def has_pred():
"""Parser for has_pred or its negation.
Note that commas may appear in both attributes
and the list of has_pred, but the parse should be unambiguous
because of the articles.
>>> pstream(has_pred(),'the PRIM_POSSESSED_NOUN and the PRIM_POSSESSED_NOUN')
Etok(has_pred,'the PRIM_POSSESSED_NOUN and the PRIM_POSSESSED_NOUN')
"""
def f1(acc):
t = [p for (_,p) in acc[0::2]] # drop commas, articles
return Etok(name='has_pred',etoks=t,raw=acc)
def f2(acc):
return Etok(name='no_has_pred',etoks=acc,raw=acc)
return first(c.plus_andcomma(lit('article') + possessed_noun()).treat(f1,'has_pred') ,
(next_word('no') + possessed_noun()).treat(f2,'has_no_pred')
)
enot = next_word('not').treat(Etok.etok,'not')
@memo
def is_aPred():
"""Parser for nominal predicates
>>> pstream(is_aPred(),'not a TIGHTEST_TYPE')
Etok(indefinite_pred,'not a TIGHTEST_TYPE')
"""
def f1(acc):
((n,_),g)=acc
return Etok(name='indefinite_pred',etoks=(n,g),raw=acc)
def f2(acc):
return Etok(name='definite_pred',etoks=acc,raw=acc)
return first((enot.possibly() + lit('a').possibly() + general_type()).treat(f1,'indefinite_pred') ,
(enot.possibly() + definite_term()).treat(f2,'definite_pred')
)
@memo
def is_pred():
"""Parser for adjectival predicates
>>> pstream(is_pred(),'not PRIM_ADJECTIVE')
Etok(is_adjective,'not PRIM_ADJECTIVE')
>>> pstream(is_pred(),'not pairwise PRIM_ADJECTIVE_MULTISUBJECT')
Etok(is_adjective_multisubject,'not pairwise PRIM_ADJECTIVE_MULTISUBJECT')
>>> pstream(is_pred(),'having the PRIM_POSSESSED_NOUN')
Etok(is_with,'having the PRIM_POSSESSED_NOUN')
"""
def f1(acc):
return Etok(name='is_adjective',etoks=acc,raw=acc)
def f2(acc):
((n,p),m)=acc
return Etok(name='is_adjective_multisubject',etoks=(n,p,m),raw=acc)
def f3(acc):
return Etok(name='is_with',etoks=acc[1:],raw=acc) ##
return first(
(enot.possibly() + get_lookup_parse('prim_adjective')).treat(f1,'is_adjective') ,
(enot.possibly() + next_word('pairwise').treat(Etok.etok).possibly() + get_lookup_parse('prim_adjective_multisubject')).treat(f2,'is_adjective_multisubject') ,
(lit('with') + has_pred()).treat(f3,'is_with')
)
@memo
def does_pred():
"""Parser for verbal predicates.
Umbrella for various verbal, adjectival, nominal predicates.
"""
def f1(acc):
((_,n),v)=acc
return Etok(name='do_verb',etoks=(n,v),raw=acc)
def f2(acc):
((_,n),v)=acc
return Etok(name='do_verb_multisubject',etoks=(n,v),raw=acc)
def f3(acc):
(_,h)=acc
return Etok(name='do_has_pred',etoks=[h],raw=acc)
def f4(acc):
(_,ps)=acc
return Etok(name='does_is_adj',etoks=ps,raw=acc)
def f5(acc):
(_,ps)=acc
return Etok(name='is_nominal',etoks=ps,raw=acc)
return first(
(lit('do').possibly() + enot.possibly() + get_lookup_parse('prim_verb')).treat(f1,'do_verb') ,
(lit('do').possibly() + enot.possibly() + get_lookup_parse('prim_verb_multisubject')).treat(f2,'do_verb_multisubject') ,
(lit('has') + has_pred()).treat(f3,'do_has_pred') ,
(lit('is') + c.plus_andcomma(is_pred())).treat(f4,'does_is_adj') ,
(lit('is') + c.plus_andcomma(is_aPred())).treat(f5,'is_nominal')
)
# pseudoterms here
@memo
def plain_pred_pseudoterm():
"""Parser for a pseudoterm.
A pseudoterm is not a term in the grammar.
It is a term-like entity that can be
quantified over by extracting the
free variables from the pseudoterm and
quantifying over them.
For example, 'for all x,y < 5'.
The output is checked to be plain.
>>> pstream(plain_pred_pseudoterm(),'x, y = u, holding x')
Etok(plain_pred_pseudoterm,'x , y = u , holding x')
"""
def f(acc):
(_,t,_)=acc
return Etok(name='plain_pred_pseudoterm',etoks=t,raw=acc)
return c.opt_paren(tdop_rel_prop() + holding_vars().possibly()).if_test(isplains).treat(f,'plain_pred_pseudo_term')
#def predicate_pseudoterm():
# """Parse a plain_pred_pseudoterm with attribute"""
# return attribute(plain_pred_pseudoterm())
#def attribute_pseudoterm():
# """Parser for a pseudoterm with attribute"""
# return attribute(pseudoterm_without_attribute())
@memo
def pseudoterm_without_attribute():
"""Recursive parser for various pseudoterms
>>> pstream(pseudoterm_without_attribute(),'x of type TIGHTEST_TYPE')
Etok(annotated,'x of type TIGHTEST_TYPE')
>>> pstream(attribute(pseudoterm_without_attribute()),'x')
Etok(attribute,'x')
"""
def f2(acc):
(_,t)=acc
return Etok.rawupdate(t,acc)
def f3(acc):
((v,_),ann)=acc
return Etok('annotated',etoks=(v,ann),raw=acc)
def f5(acc):
(_,ps,_)=acc
return Etok.rawupdate(ps,acc)
return first(get_lookup_parse('prim_typed_name') ,
(get_lookup_parse('prim_classifier') + tvar()).treat(f2,'pseudoterm-2') ,
(var() + (lit('with') + next_word('type')) + opentail_type()).treat(f3,'pseudoterm-3') ,
tvar() , #after: var with...
c.paren(c.lazy_call(pseudoterm_without_attribute)).treat(f5,'pseudoterm-5')
)
@memo
def pseudoterm():
"""
Parser for pseudoterm.
This is the principal nonterminal for pseudoterm,
subsuming others.
"""
def f(acc):
return Etok(name='pseudoterm',etoks=acc,raw=acc)
return first(attribute(pseudoterm_without_attribute()) ,
attribute(plain_pred_pseudoterm()) ).treat(f,'pseudoterm')
# statements
comma_and = comma + next_word('and')
comma_or = comma + next_word('or')
filler = phrase_list_filler().possibly()
@memo
def simple_statement():
"""Parser for simple statement"""
def f(acc):
return Etok(name='simple_statement',etoks=acc,raw=acc)
return terms() + does_pred().plus(next_word('and'))
@memo
def there_is_statement():
"""Parser for pseudoterm existence"""
def f(acc):
(((_,_),n),p)=acc
return Etok(name='there_is_statement',etoks=(n,p),raw=acc)
return (next_word('there')+lit('exist')+next_word('no').possibly()+pseudoterm()).treat(f,'there_is_statement')
@memo
def const_statement():
def f(acc):
return Etok(name='const_statement',etoks=acc,raw=acc)
return first((next_word('the').possibly() + next_word('thesis')) ,
(lit('article').possibly() + lit('contradiction')))
@memo
def symbol_statement():
"""Recursive parser for first-order-logic like statements
Debug: should parse blocks of binders in single pass.
>>> pstream(symbol_statement(),'BINDER_PROP')
Etok(tdop_prop,'BINDER_PROP')
>>> pstream(symbol_statement(),'BINDER_PROP')
Etok(tdop_prop,'BINDER_PROP')
>>> pstream(symbol_statement(),'forall x, BINDER_PROP')
Etok(forall_symbol_statement,'forall x , BINDER_PROP')
"""
def f_forall(acc):
(((_,a),_),s)=acc
return Etok(name='forall_symbol_statement',etoks=(a,s),raw=acc)
def f_exist(acc):
(((_,a),_),s)=acc
return Etok(name='exist_symbol_statement',etoks=(a,s),raw=acc)
def f_not(acc):
return Etok(name='not_symbol_statement',etoks=acc[1],raw=acc)
def f(acc):
(_,s,_)=acc
return Etok.rawupdate(s,acc)
return first(
prop() ,
(lit('forall') + pseudoterm() + binder_comma() + c.lazy_call(symbol_statement)).treat(f_forall,'forall_statement') ,
(lit('exist') + pseudoterm() + binder_comma() + c.lazy_call(symbol_statement)).treat(f_exist,'exist') ,
(next_word('not') + c.lazy_call(symbol_statement)).treat(f_not,'not') ,
(c.paren(c.lazy_call(symbol_statement))).treat(f,'symbol_statement')
)
@memo
def primary_statement():
"""Parser for primary statement"""
return first(
simple_statement() ,
there_is_statement() ,
(filler + const_statement()).treat(lib.snd) ,
(filler + symbol_statement()).treat(lib.snd)
)
@memo
def head_primary():
return first(get_lookup_parse('head_statement') ,
primary_statement())
@memo
def or_chain():
"""Parser for chain of or statements"""
def f(acc):
((p,_),h)=acc
return Etok(name='or_chain',etoks=p[0::2]+[h],raw=acc)
return (primary_statement().plus(comma_or) + comma_or + head_primary()).treat(f,'or_chain')
@memo
def and_chain():
"""Parser for chain of and statements"""
def f(acc):
((p,_),h)=acc
return Etok(name='and_chain',etoks=p[0::2]+[h],raw=acc)
return (primary_statement().plus(comma_and) + comma_and + head_primary()).treat(f,'and_chain')
@memo
def andor_chain():
"""Parser for chain of and/or statements"""
return first(and_chain() , or_chain() , primary_statement()
).name('andor_statement')
@memo
def chain_statement():
"""Parser for chain of and/or/iff statements"""
def f(acc):
(((_,ao,_),_),s)=acc
return Etok('iff_statement',etoks=(ao,s),raw=acc)
return first(andor_chain () ,
(c.paren(andor_chain()) + lit('iff') + get_lookup_parse('statement')).treat(f,'iff_statement')
).name('chain_statement')
@memo
def head_statement():
"""Parser for if/then, negation, for ..., statements
We distinguish between if-then statements and if-then terms.
"""
def f_for(acc):
(((_,p),_),s)=acc
return Etok(name='for_statement',etoks=(p,s),raw=acc)
def f_ifthen(acc):
((((_,s),_),_),s2)=acc
return Etok(name='if_then_statement',etoks=(s,s2),raw=acc)
def f_wrong(acc):
return Etok(name='wrong_statement',etoks=acc[1:],raw=acc)
return first(
# DEBUG: use quasiterm instead of any_name?
(next_word('for') + c.plus_andcomma(any_name()) + binder_comma() + get_lookup_parse('statement')).treat(f_for,'for_statement') ,
(next_word('if')+ get_lookup_parse('statement') + comma + next_word('then') + get_lookup_parse('statement')).treat(f_ifthen,'if_then_statement') ,
(lit('wrong') + get_lookup_parse('statement')).treat(f_wrong,'wrong_statement')
).name('head_statement')
@memo
def statement():
"""Parser for statement.
This subsumes other specialized statements."""
return first(head_statement() , chain_statement()).name('statement')
# next texts
@memo
def namespace():
"""Not implemented. Always fails."""
return Parse.fail()
@memo
def synonym_item():
"""Parser for synonym item as text item
>>> pstream(synonym_item(),'we introduce synonyms rough/-en, tilde/ tildilla.')
Etok(instruction,synonym,'we introduce synonym rough /- en , tilde / tildilla .')
"""
pre = next_word('we').possibly() + next_word('introduce').possibly() + next_word('synonyms')
def f(acc):
((_,b),_)=acc
b1 = c.retreat_list(Instruction.syn(),[b])
return Etok.rawupdate(b1[0],acc)
return (pre + c.balanced_condition(not_period) + period).commit(pre).treat(f)
@memo
def inductive_decl():
"""Parser for declaration of induction types.
It terminates with 'end' keyword.
Identifier must be located internally because of recursion.
>>> pstream(inductive_decl(),'inductive integer | id : POST_COLON_TYPE end')
Etok(inductive_decl,'inductive integer | id : POST_COLON_TYPE end')
"""
def f(acc):
(((((_,i),a),s),c1),_)=acc
#print(f'c1={c1}')
c1 = c.retreat_list(_opt_alt_constructor().many(),[lib.fflatten(c1)])
return Etok(name='inductive_decl',etoks=(i,a,s,c1),raw=acc)
return (c.next_word('inductive') + identifier() + args_template() + opt_colon_sort() +
c.balanced_condition(not_end) + c.next_word('end')).treat(f,'inductive_decl')
@memo
def mutual_inductive_decl_item():
"""DEBUG: not tested"""
pre = lit('declare_mutual_inductive_decl')
def f(acc):
(((_,w),pa),_)=acc
if pa:
pa = pa[1]
return Etok('mutual_inductive_decl_item',etoks=(w[0::2],pa),raw=acc)
return (pre + c.plus_comma(atomic()) +
(lit('param') + args_template()).possibly() + period
).commit(pre).treat(f,'mutual_inductive_decl_item')
@memo
def mutual_inductive_def_item():
"""DEBUG: not tested"""
pre = lit('declare_mutual_inductive_def')
def f(acc):
(((_,w),pa),_)=acc
if pa:
pa = pa[1]
return Etok('mutual_inductive_def_item',etoks=(w[0::2],pa),raw=acc)
return (pre + c.plus_comma(atomic()) +
(lit('param') + args_template()).possibly() + period
).commit(pre).treat(f,'mutual_inductive_def_item')
def moreover_implements_deprecated():
"""DEBUG: not tested.
Deprecated. Add predicate satisfaction instead.
Parser for an item that extends a structure or
inductive type with """
def f(acc):
(((((_,_),g),_),b),_)=acc
b = c.reparse_list(field(),b[0::2])
return (g,b)
return (next_word('moreover') + comma + general_type() +
lit('implement') + c.brace_semif() +
period).treat(f,'moreover_implements')
def this_exists_deprecated():
"""parsing of 'this'-directives.
DEBUG: Remove this feature. Deprecated Unfinished.
"""
def adjective(tok):
s1 = tok.value.lower.replace('_','')
return s1 in ['unique','canonical','welldefined','wellpropped','total','exhaustive']
def this_directive_right_attr():
return next_phrase('by recursion')
def this_directive_pred():
# debug, need to slice [0::2]
return c.plus_andcomma(Parse.next_token().if_test(adjective))
return first_phrase(['this exist','this is'])
@memo
def satisfy_item():
"""
Parser for item that extends a given type with
a unique existence statement, used in satisfaction-style
structural typing of structures.
This is used to define coercions say from a
metric space to topological space.
The statement should be exists unique.
DEBUG: A pseudoterm might be too general.
We want expressions like (G:group) or group G ...
>>> pstream(satisfy_item(),'Every (G: POST_COLON_TYPE) satisfies BINDER_PROP.')
Etok(satisfy_item,'every ( G : POST_COLON_TYPE ) satisfy BINDER_PROP .')
"""
def f(acc):
(((p,f),s),_)=acc
if f:
(_,f,_)=f
return Etok('satisfy_item',etoks=(p,f,s),raw=acc)
pre = next_word('every') + pseudoterm() +lit('satisfy')
return (pre + c.opt_paren(field_prefix()).possibly() + statement() + period).commit(pre,'satisfy_item').treat(f,'satisfy_item')
@memo
def then_prefix():
return lit('then').possibly()
# no memo, takes an argument
def decl_label(s:str):
"""
Sample input for decl_label('axiom')
Axiom.
Conjecture Riemann.
Sample input for decl_label('theorem')
Theorem Pappus.
>>> pstream(decl_label('axiom'),'Equation 90.')
Etok(decl_label,'equation 90 .')
"""
def f(acc):
((a,l),_)=acc
return Etok(name='decl_label',etoks=(a,l),raw=acc)
return (lit(s)+label().possibly() + period).treat(f)
@memo
def let_annotation_prefix():
"""Parser for initial segment of a let statement.
>>> pstream(let_annotation_prefix(),'let u,v,w be fixed ...')
Etok(let_annotation_prefix,'let u , v , w be fixed')
"""
def f(acc):
((((l,vs),_),_),f)=acc
if f:
l=f
vs = vs[0::2]
return Etok(name='let_annotation_prefix',etoks=(Etok.etok(l),vs),raw=acc)
return (next_word('let') + c.plus_comma(var()) +
next_word('be') + lit('a').possibly() +
next_word('fixed').possibly()).treat(f)
@memo
def let_annotation():
"""Parser for let_annotations. Terminating punctuation not included.
Sample parser inputs:
Let G be a group
Let G be a fixed group
Let (H G : group)
Fix (x : R)
"""
def f1(acc):
return Etok(name='let_annotation',etoks=acc,raw=acc,rule='1')
def f2(acc):
return Etok(name='let_annotation',etoks=acc,raw=acc,rule='2')
def f3(acc):
return Etok(name='let_annotation',etoks=acc,raw=acc,rule='3')
return first(
(first_word( 'fix let') + annotated_vars()).treat(f1,'let_annotation1') ,
(let_annotation_prefix() + general_type()).treat(f2,'let_annotation2') ,
(let_annotation_prefix() + (rawtype|rawprop)).treat(f3,'let_annotation3')
)
@memo
def assumption_prefix():
"""Parser for prefix of assumption.
>>> pstream(assumption_prefix(),'We assume that')
((Etok(LIT,lets,'we'), Etok(LIT,assume,'assume')), LexToken(WORD,'that',1,10))
"""
def f(acc):
return Etok(name='assumption_prefix',etoks=[],raw=acc)
return (
lit('lets') + lit('assume') + next_word('that').possibly()
)
@memo
def assumption():
"""Parser for assumptions in theorems and axioms.
There are two varieties: 'We assume that' or type annotations.
>>> pstream(assumption(),'We assume that BINDER_PROP.')
Etok(assumption,'we assume that BINDER_PROP .')
"""
def f(acc):
((_,s),_)=acc
return Etok(name='assumption',etoks=[s],raw=acc)
def f2(acc):
(l,_)=acc
return Etok.rawupdate(l,acc)
pre = lit('lets') + lit('assume')
pre2 = first_word('let fix')
return first((assumption_prefix() + statement() + period).commit(pre).treat(f,'assumption') ,
(let_annotation() + period).commit(pre2).treat(f2,'assumption')
)
@memo
def axiom():
"""Parser for axioms and other statements without proof.
We need unambiguous lines between assumptions and claims.
'Then' and 'Moreover' always belong to claims.
Sentences starting with let, fix, assumption_prefix are assumptions.
>>> pstream(axiom(),'Conjecture. We assume that BINDER_PROP. Then BINDER_PROP.')
Etok(axiom,'conjecture . we assume that BINDER_PROP . then BINDER_PROP .')
"""
def f(acc):
(((((_,aa),_),s),_),ms) = acc
ms = [s for ((_,s),_) in ms]
return Etok(name='axiom',etoks=(aa,[s]+ms),raw=acc)
return (
decl_label('axiom') + assumption().many() +
then_prefix() + statement() + period +
(next_word('moreover') + statement() + period).many()
).treat(f,'axiom')
@memo
def theorem():
"""Parser for theorem and proof.
>>> pstream(theorem(),'Theorem 1. AFFIRM_PROOF')
Etok(theorem,'theorem 1 . AFFIRM_PROOF')
"""
def f(acc):
((_,aa),p)=acc
return Etok(name='theorem',etoks=(aa,p),raw=acc)
return (decl_label('theorem') + assumption().many() +
get_lookup_parse('affirm_proof')).treat(f,'theorem')
@memo
def nonkey(): #was not_banned
keyword = [
'is','be','are','denote','define','enter','namespace','stand',
'if','iff','inferring','the','a','an','we','say','write',
'assume','suppose','let','said','defined','or','fix','fixed'
]
def p(token):
return not(c.singularize(token.value) in keyword)
return c.next_type(['VAR','WORD','ATOMIC_IDENTIFIER']).if_test(p)
@memo
def any_controlseq(): #was controlseq
r"""
>>> pstream(any_controlseq(),r'\include')
Etok(CONTROLSEQ,\include,'\include')
"""
return c.next_type('CONTROLSEQ').treat(Etok.etok)
def controlseq(s): #was the_controlseq
"""Parser for a particular control sequence 's'.
s includes the backslash."""
return any_controlseq().if_value(s)
# PROOFS
class Proof_step:
"""Parser constructors for proof steps
Everything in this class is non-recursive in terms of
the lookup 'proof_script' 'affirm_proof'
"""
def kill(acc):
"""Currently proof statements are not being
saved into the AST. Parse then discard."""
return Etok(name='proof-step',etoks=[],raw=acc)
def canned_prefix():
"""
>>> pstream(Proof_step.canned_prefix(),'Of course, it is trivial to see,')
Etok(proof-step,'of course , it is trivial to see ,')
"""
# debug need slice [0::2]
return (c.plus_andcomma(phrase_list_transition()) +
comma.possibly()
).treat(Proof_step.kill,'canned_prefix')
def canned():
"""parser for canned proof statements
>>> pstream(Proof_step.canned(),'The proof is routine')
Etok(proof-step,'the proof is routine')
>>> pstream(Proof_step.canned(),'The corollary follows')
Etok(proof-step,'the corollary follow')
"""
return first(next_phrase("we proceed as follows") ,
(next_word('the') +
first_word('result lemma theorem proposition corollary') +
next_word('now').possibly() +
next_word('follows')) ,
next_phrase('the other cases are similar') ,
(next_phrase('the proof is')+ first_word('obvious trivial easy routine'))).treat(Proof_step.kill,'canned')
def ref_item():
"""
>>> pstream(Proof_step.ref_item(),'theorem 33')
Etok(proof-step,'theorem 33')
"""
return c.plus_andcomma(read_keyword('location').possibly() + atomic()).treat(Proof_step.kill,'ref_item')
def by_ref():
"""
>>> pstream(Proof_step.by_ref(),'(by Theorem 1)')
Etok(proof-step,'( by theorem 1 )')
"""
return c.paren(next_word('by') + Proof_step.ref_item()).possibly().treat(Proof_step.kill,'by_ref')
def by_method():
"""
menhir/ocaml doc describes an ambiguity here.
I'm hoping it goes away now that plain_term is implemented.'
>>> pstream(Proof_step.by_method(),'by induction on TDOP_TERM. ')
Etok(proof-step,'by induction on TDOP_TERM')
"""
return (next_word('by') +
(first_phrase(['contradiction','case analysis']) |
(next_word('induction') +
(next_word('on') + plain_term()).possibly()) +
(next_word('that')| period).probe()).name('post_by_method')
).treat(Proof_step.kill,'by_method')
def choose_prefix():
"""
>>> pstream(Proof_step.choose_prefix(),'We choose')
Etok(proof-step,'we choose')
"""
return (then_prefix() + lit('lets').possibly() + lit('choose')).treat(Proof_step.kill,'choose_prefix')
def opt_proof():
return get_lookup_parse('proof_script').possibly().treat(Proof_step.kill,'opt_proof')
def choose():
"""
>>> pstream(Proof_step.choose(),'We choose x and y.')
Etok(proof-step,'we choose x and y .')
"""
return (Proof_step.choose_prefix() + c.plus_andcomma(pseudoterm()) +
Proof_step.by_ref() + period +
Proof_step.opt_proof()
).treat(Proof_step.kill,'choose')
def proof_preamble():
"""
>>> pstream(Proof_step.proof_preamble(),'Proof by contradiction.')
Etok(proof-step,'proof by contradiction .')
"""
return first(
(next_word('proof') + Proof_step.by_method().possibly() + period) ,
next_word('indeed')
).treat(Proof_step.kill,'proof_preamble')
def goal_prefix():
"""
>>> pstream(Proof_step.goal_prefix(),'We prove that ...')
Etok(proof-step,'we prove that')
"""
return first((lit('lets').possibly() + lit('prove') + next_word('that')) ,
(Proof_step.by_method() + next_word('that')).possibly()
).treat(Proof_step.kill,'goal_prefix')
def goal_proof():
"""
>>> pstream(Proof_step.goal_proof(),'We prove that it is wrong that STATEMENT (by theorem 3). proof_script ...')
Etok(proof-step,'we prove that it is wrong that STATEMENT ( by theorem 3 ) . proof_script')
"""
return (Proof_step.goal_prefix() + statement() + Proof_step.by_ref() + period +
get_lookup_parse('proof_script')
).treat(Proof_step.kill,'goal_proof')
def statement_proof():
return (then_prefix() + statement() + Proof_step.by_ref() + period +
(next_word('moreover') + statement() + Proof_step.by_ref() + period).many()
).treat(Proof_step.kill,'statement_proof')
def case():
"""
>>> pstream(Proof_step.case(),'Case it is wrong that STATEMENT.')
Etok(proof-step,'case it is wrong that STATEMENT .')
"""
return (next_word('case') + statement() + period + Proof_step.opt_proof()
).treat(Proof_step.kill,'case')
def proof_body():
"""Forthel prohibits the last proof-body in a proof
from being an assumption. We do not prohibit this.
"""
return first(
assumption() ,
Proof_step.canned() ,
Proof_step.case() ,
Proof_step.choose() ,
get_lookup_parse('affirm_proof')
).treat(Proof_step.kill,'proof_body')
@memo
def affirm_proof():
return first(c.lazy_call(Proof_step.statement_proof) ,
c.lazy_call(Proof_step.goal_proof))
@memo
def proof_script():
return (Proof_step.proof_preamble() +
c.lazy_call(Proof_step.proof_body).plus() +
lit('qed') + period
)
# patterns
pattern_key = ["is","be","are","denote","define"
"enter","namespace",
"stand","if","iff","inferring","the","a","an",
"we","say","write",
"assume","suppose","let",
"said","defined","or","fix","fixed" # and (need in 'resultant of f and g')
]
class Pattern:
"""Parser generators for patterns"""
def word_nonkey():
"""Parser for any WORD token except for keywords."""
return c.next_any_word_except(pattern_key).treat(Etok.etok).name('word_nonkey')
def word_extended():
"""parser for 'word (or word) (word pattern)'.
words cannote be key words
(or word) gives a synonym as a parenthetical.
(word pattern) is an optional recursive word pattern.
>>> pstream(Pattern.word_extended(),'unsupported (or empty) (WORD_PATTERN)')
Etok(word_extended,'unsupported ( or empty ) ( WORD_PATTERN )')
"""
def f(acc):
((w,o),wp)=acc
if o:
(_,(_,o),_)=o
wp = (w for (_,w,_) in wp)
return Etok('word_extended',etoks=(w,wp,o),raw=acc)
return (Pattern.word_nonkey() +
c.paren(next_word('or') + Pattern.word_nonkey()).possibly() +
c.paren(get_lookup_parse('word_pattern')).many()
).treat(f,'word_extended')
def words_extended():
return Pattern.word_extended().plus()
def _var():
"""parser for a variable appearing in a pattern
>>> pstream(Pattern._var(),'x')
Etok(VAR,x,'x')
"""
return var() | annotated_var()
def word_pattern():
"""Parser for an (extended) word pattern,
starting with an (extended) word.
Extended words appear in even positions and
variables appear in odd positions.
>>> pstream(Pattern.word_pattern(),'integrable with respect to x')
Etok(word_pattern,'integrable with respect to x')
"""
def f(acc):
((w,vws),v)=acc
vws = [w]+vws
if v:
vws = lib.fflatten(vws + [v])
return Etok('word_pattern',etoks=vws,raw=acc)
return (Pattern.words_extended() +
(Pattern._var() + Pattern.words_extended()).many() +
Pattern._var().possibly()).treat(f,'word_pattern')
def type_word_pattern():
def f(acc):
(_,wp)=acc
return wp
return (lit('a').possibly() + Pattern.word_pattern()).treat(f,'type_word_pattern')
def function_word_pattern():
def f(acc):
(_,wp)=acc
return wp
return (next_word('the') + Pattern.word_pattern()).treat(f,'function_word_pattern')
def notion_pattern():
def f(acc):
(((v,_),_),wp)=acc
return Etok('notion_pattern',etoks=(v,wp),raw=acc)
return (Pattern._var() + next_word('is') + lit('a') +
Pattern.word_pattern()
).treat(f,'notion_pattern')
def adjective_pattern():
"""
profile ('adjective_pattern' ('var','word_pattern'))
"""
def f(acc):
(((v,_),_),wp)=acc
return Etok('adjective_pattern',etoks=(v,wp),raw=acc)
return (Pattern._var() + next_word('is') +
next_word('called').possibly() +
Pattern.word_pattern()
).treat(f,'adjective_pattern')
def var_multisubsect_pattern():
"""
Debug: The variables in a multisubject must have the same type.
"""
def f1(acc):
((v1,_),v2)=acc
return Etok(name='var_multisubject_pattern',etoks=(v1,v2,None),raw=acc)
def f2(acc):
(_,(((v1,_),v2),o),_)=acc
return Etok(name='var_multisubject_pattern',etoks=(v1,v2,o),raw=acc)
return first(
(Pattern._var() + comma +
Pattern._var()).treat(f1,'var_multisubject_pattern') ,
c.paren(Pattern._var() + comma + Pattern._var() +
opt_colon_type()).treat(f2,'var_multisubject_pattern')
)
def adjective_multisubject_pattern():
def f(acc):
(((v,_),_),w)=acc
return Etok('adjective_multisubject_pattern',etoks=(v,w),raw=acc)
return (
Pattern.var_multisubsect_pattern() + next_word('are') +
next_word('called').possibly() + Pattern.word_pattern()
).treat(f,'adjective_multisubject_pattern')
def verb_pattern():
def f(acc):
return Etok('verb_pattern',etoks=acc,raw=acc)
return (Pattern._var() + Pattern.word_pattern()).treat(f,'verb_pattern')
def verb_multisubject_pattern():
def f(acc):
return Etok('verb_multisubject_pattern',etoks=acc,raw=acc)
return (Pattern.var_multisubsect_pattern() +
Pattern.word_pattern()).treat(f,'verb_multisubject_pattern')
def predicate_word_pattern():
return first(
Pattern.notion_pattern() ,
Pattern.adjective_pattern() ,
Pattern.adjective_multisubject_pattern() ,
Pattern.verb_pattern() ,
Pattern.verb_multisubject_pattern()
).name('predicate_word_pattern')
def controlseq_pattern():
r"""
>>> pstream(Pattern.controlseq_pattern(),r'\tie {x} {y} [z]')
Etok(controlseq_pattern,'\tie { x } { y }')
"""
def f(acc):
(a,vs)=acc
vs = [v for (_,v,_) in vs]
return Etok('controlseq_pattern',etoks=(a,vs),raw=acc)
return (
any_controlseq() + c.brace(Pattern._var()).many()
).treat(f,'controlseq_pattern')
def binary_controlseq_pattern():
def f(acc):
((v,c),v2)=acc
return Etok('binary_controlseq_pattern',etoks=(v,c,v2),raw=acc)
return (
Pattern._var() + Pattern.controlseq_pattern() + Pattern._var()
).treat(f,'binary_controlseq_pattern')
def identifier_pattern():
def f(acc):
return Etok('identifier_pattern',etoks=acc,raw=acc)
return first(
(identifier() + args_template()) ,
(c.next_type('BLANK').treat(Etok.etok) + args_template())
).treat(f)
def precedence_level(): #was paren_precedence_level
"""parser for the precedence level.
output: (INTEGER,ASSOC), where ASSOC in ['left','right','no'].
integer conversion is not performed.
>>> pstream(Pattern.precedence_level(),'with precedence 10 and left associativity ...')
Etok(precedence_level,'with precedence 10 and left associativity')
"""
def f(acc):
(_,((_,i),a),_) =acc
if a:
((_,a),_)=a
return Etok('precedence_level',etoks=(i,a),raw=acc)
return c.opt_paren(
(next_phrase('with precedence') + c.next_type('INTEGER').treat(Etok.etok)) +
(next_word('and') + read_keyword('assoc').treat(Etok.etok) + next_word('associativity')).possibly()
).treat(f,'precedence_level')
def get_precedence_level(e):
"""helper function that computes the precedence of Etok e
as integer,assoc"""
(i,a)=e.etoks
i = int(i)
if not(a):
return (i,'no')
return (i,c.getvalue(a))
def any_symbol(): # was symbol
return first(
c.next_type('SYMBOL').treat(Etok.etok) ,
Pattern.controlseq_pattern()
)
def symbol(s): # was the_symbol
return c.next_type('SYMBOL').if_value(s).treat(Etok.etok)
def symbol_pattern():
"""Parser for general symbol pattern. Alternating S/V.
At least one symbol appears, but possibly no variables.
V? S (VS)* V? where V=var, S=symbol.
The symbols can occupy either even or add positions.
>>> pstream(Pattern.symbol_pattern(),'x ## y ## z')
Etok(symbol_pattern,'x ## y ## z')
>>> pstream(Pattern.symbol_pattern(),'x ## y ## z with precedence 5')
Etok(symbol_pattern,'x ## y ## z with precedence 5')
"""
def f(acc):
((((v1,s1),vs),v2),p)=acc
vs = [s1]+vs
if v1:
vs = [v1]+vs
if v2:
vs = vs + [v2]
return Etok('symbol_pattern',etoks=(vs,p),raw=acc)
return (
Pattern._var().possibly() + Pattern.any_symbol() +
(Pattern._var() + Pattern.any_symbol()).many() +
Pattern._var().possibly() + Pattern.precedence_level().possibly()
).treat(f,'symbol_pattern')
def binary_symbol_pattern():
"""Parser for binary symbol pattern.
VSV (V=var, S=symbol)
Special case of symbol_pattern.
>>> pstream(Pattern.binary_symbol_pattern(),'x ^^ y with precedence 10 and right associativity')
Etok(binary_symbol_pattern,'x ^^ y with precedence 10 and right associativity')
>>> pstream(Pattern.binary_symbol_pattern(),'x ## y')
Etok(binary_symbol_pattern,'x ## y')
"""
def f(acc):
((v,s),v2)=acc
return Etok('binary_symbol_pattern',etoks=(v,s,v2),raw=acc)
return (
Pattern._var() + Pattern.any_symbol() + Pattern._var() +
Pattern.precedence_level().possibly()
).treat(f,'binary_symbol_pattern')
class Macro:
def in_section():
"""Parser for in-section scoping.
>>> pstream(Macro.in_section(),'In this section,')
Etok(in_section,'in this section ,')
Output:
Etok whose value is the location keyword.
"""
def f(acc):
((_,d),_)=acc
return Etok(name='in_section',etoks=[d],raw=acc)
return (next_phrase('in this') + read_keyword('doc') +
comma.possibly()).treat(f,'in_section')
#def we_record_def_deprecated():
# """Parser for registered facts.
def copula():
"""Parser for copula in macro declarations.
>>> pstream(Macro.copula(),'is defined as')
Etok(copula,'is defined as')
"""
def f(acc):
return Etok('copula',etoks=[],raw=acc)
return first(
(lit('is') + lit('defined_as').possibly()) ,
(next_value(':=')) ,
(lit('denote'))
).treat(f,'copula')
def function_copula():
"""Parser for function_copula with possible type annotation
>>> pstream(Macro.function_copula(),': POST_COLON_TYPE := ...')
Etok(function_copula,': POST_COLON_TYPE :=')
"""
def f2(acc):
(o,_)=acc
return Etok(name='function_copula',etoks=[o],raw=acc)
return first(
Macro.copula() ,
(opt_colon_type() + next_value(':=')).treat(f2,'function_copula')
)
def iff_junction():
return lit('iff')
def opt_say():
"""
>>> pstream(Macro.opt_say(),'We say')
Etok(LIT,we-say,'we say')
"""
return lit('we-say').possibly()
#def opt_record_deprecated():
# return lit('we-record').possibly()
def opt_define():
"""
>>> pstream(Macro.opt_define(),'Let us define ...')
Etok(opt_define,'let us define')
"""
def f(acc):
return Etok('opt_define',etoks=[],raw=acc)
return (
(lit('lets') + next_word('define').possibly()) # |
#Macro.opt_record()
).treat(f,'opt_define')
#def macro_inferring():
def classifier_word_pattern(): # was classifier_words
def f(acc):
return Etok(name='classifier_word_pattern',etoks=acc[0::2],raw=acc)
return (
c.plus_andcomma(c.next_any_word_except(['is','are','be']).treat(Etok.etok))
).treat(f,'classifier_word_pattern')
def classifier_def():
"""Parser for defining classifiers.
profile: ('classifier_word_pattern' 'WORD'*)
>>> pstream(Macro.classifier_def(),'Let function, symbol, object be classifiers')
Etok(classifier_word_pattern,'let function , symbol , object be classifier')
"""
def f(acc):
((((_,w),_),_),_)=acc
return Etok.rawupdate(w,acc)
return (
next_word('let') + Macro.classifier_word_pattern() +
lit('is') + lit('a').possibly() + next_word('classifier')
).treat(f,'classifier_def')
def type_head():
"""
Parser for the LHS pattern of a type def.
The symbol pattern has fixed precedence, right assoc
"""
return first(
Pattern.symbol_pattern() ,
Pattern.type_word_pattern() ,
Pattern.identifier_pattern() ,
Pattern.controlseq_pattern()
)
def type_def():
"""
Parser for a type definition.
>>> pstream(Macro.type_def(),'We define x ## y : Type to be TIGHTEST_TYPE')
Etok(type_def,'we define x ## y : type to be TIGHTEST_TYPE')
>>> pstream(Macro.type_def(),'We define x ## y to be the type TIGHTEST_TYPE')
Etok(type_def,'we define x ## y to be the type TIGHTEST_TYPE')
"""
def f1(acc):
((((((_,h),_),_),_),_),t)=acc
return Etok('type_def',etoks=(h,t),raw=acc)
def f2(acc):
((((_,h),_),_),t)=acc
return Etok('type_def',etoks=(h,t),raw=acc)
"""Parser for a type definition"""
return first(
(Macro.opt_define() + Macro.type_head() + colon + rawtype +
Macro.copula() + lit('a').possibly() + general_type()).treat(f1,'type_def') ,
(Macro.opt_define() + Macro.type_head() + Macro.copula() +
c.next_phrase('the type') + general_type()).treat(f2,'type_def')
)
def function_head():
"""
Parser for the LHS pattern of a function def.
"""
return first(
Pattern.function_word_pattern() ,
Pattern.symbol_pattern() ,
Pattern.identifier_pattern()
)
def function_def():
"""
Parser for function definitions.
>>> pstream(Macro.function_def(),'We define x ## y := the PRIM_DEFINITE_NOUN')
Etok(function_def,'we define x ## y := the PRIM_DEFINITE_NOUN')
"""
def f(acc):
#return acc
(((((_,h),_),_),p))=acc
return Etok('function_def',etoks=(h,p),raw=acc)
return (
Macro.opt_define() + Macro.function_head() +
Macro.function_copula() + lit('equal').possibly() +
#next_word('the').possibly() + N.B. plain_definite_noun includes 'the' already
plain_term()
).treat(f,'function_def')
def predicate_head():
"""Parser for the LHS pattern of a predicate def"""
return first(
Pattern.identifier_pattern() , #before word pattern
Pattern.predicate_word_pattern() ,
Pattern.symbol_pattern()
)
def predicate_def():
"""Parser for predicate definitions
>>> pstream(Macro.predicate_def(),'We write x >> y iff it is wrong that STATEMENT')
Etok(predicate_def,'we write x >> y iff it is wrong that STATEMENT')
"""
def f(acc):
(((_,h),_),s)=acc
return Etok('predicate_def',etoks=(h,s),raw=acc)
return (
Macro.opt_say() + Macro.predicate_head() +
Macro.iff_junction() + statement()
).treat(f,'predicate_def')
def binder_def():
"""Parser for definition of new binders (quantifiers)
>>> pstream(Macro.binder_def(),'Let the binder ## (x : POST_COLON_TYPE), P denote it is wrong that STATEMENT ')
Etok(binder_def,'let the binder ## ( x : POST_COLON_TYPE ) , P denote it is wrong that STATEMENT')
"""
def f(acc):
((((((_,s),(_,(v,t),_)),_),v2),_),p)=acc
return Etok('binder_def',etoks=(s,v,t,v2,p),raw=acc)
return (
next_phrase('let the binder') + Pattern.any_symbol() +
c.paren(var() + opt_colon_type()) +
comma + var() +
lit('denote') + statement()
).treat(f,'binder_def')
def definition_statement():
"""Parser for classifier, type, function, predicate defs
"""
return first(
Macro.classifier_def() ,
Macro.type_def() ,
Macro.function_def() ,
Macro.predicate_def()
)
def definition_affirm():
"""Definition + period"""
def f(acc):
(d,_)=acc
return Etok.rawupdate(d,acc)
return (Macro.definition_statement() + period).treat(f,'definition_affirm')
def definition_label():
"""
>>> pstream (Macro.definition_label(),'Definition XX.')
Etok(definition_label,'definition XX .')
"""
def f(acc):
((_,l),_) = acc
return Etok(name='definition_label',etoks=l,raw=acc)
return (lit('def') + label().possibly() + period).treat(f,'definition_label')
def definition():
"""
Main parser for definitions.
Disambiguation:
predicates use iff_junction.
identifier patterns start with an identifier.
predicate word patterns contains words
binary symbol patterns contain start with tvar then a symbol.
Functions use the copula.
word patterns starts with LIT_THE word ...
symbol patterns contain a symbol or CONTROLSEQ.
identifier patterns start with an identifier.
Types use the copula.
type_def from function_def distinguished by COLON ID_TYPE
before the copula.
>>> pstream(Macro.definition(),'Definition XX. We say x >> y iff it is wrong that STATEMENT.')
Etok(definition,'definition XX . we say x >> y iff it is wrong that STATEMENT .')
"""
def f(acc):
((p,a),d)=acc
return Etok('definition',etoks=(p,a,d),raw=acc)
return (
Macro.definition_label() + assumption().many() +
Macro.definition_affirm()
).treat(f,'definition')
def macro_body():
"""Parser for macro classifier, type, function def,
let annotation, etc."""
return first(
Macro.classifier_def() ,
Macro.type_def() ,
Macro.function_def() ,
Macro.predicate_def() ,
let_annotation() ,
Macro.binder_def()
# record_def
# enter_namespace
)
def macro():
"""A macros are like definitions, but they
can have local scope, they can be chained,
they include binder defs and let annotations,
and they does not have a label.
Macros are expanded immediately, but definitions
create a new constant and are not expanded.
The expansion of a macro must be a plain_term,
but a definition may contain bound variables.
>>> pstream(Macro.macro(),'In this section, we write x << y iff it is wrong that STATEMENT.')
Etok(macro,'in this section , we write x << y iff it is wrong that STATEMENT .')
"""
sep = semicolon + next_word('and').possibly()
def f(acc):
((s,b),_)=acc
return Etok('macro',etoks=(s,b[0::2]),raw=acc)
return (
Macro.in_section().possibly() +
Macro.macro_body().plus(sep) + period
).treat(f,'macro')
@memo
def declaration():
"""Parser for axiom, definition, or theorem
"""
return first(
theorem() ,
axiom() ,
Macro.definition()
)
@memo
def utterance():
"""A text item is a major block of texts
in a document.
Every item must end with a period or be an
instruction in [].
"""
return first(
section_label() ,
# not implemented. namespace() ,
Instruction.instruction() ,
synonym_item() ,
declaration() ,
Macro.macro() ,
inductive_decl() ,
mutual_inductive_decl_item() ,
mutual_inductive_def_item() ,
satisfy_item()
)
@memo
def text():
"""Parse a sequence of utterance
Normally, we work at the level of utterances,
processing each in turn, before moving to the next.
The text Parser skips the processing.
"""
return utterance().many() + lit('done').possibly()
@memo
def program_text():
return text() + Parse.finished()
# initialize lookup tables
def _add_prim1():
def equal():
return next_value('=').treat(Etok.etok)
add_lookup_parse('prim_binary_relation_op',equal())
def bool():
return (rawtrue | rawfalse)
add_lookup_parse('prim_relation', bool().name('prim_relation','True-False'))
pass
_add_prim1()
lookup = {
'affirm_proof':affirm_proof,
'alt_term': alt_term,
'binder_prop': binder_prop,
'delimited_term':delimited_term,
'does_pred':does_pred,
'general_type':general_type,
'head_statement':head_statement,
'is_pred':is_pred,
'opentail_term':opentail_term,
'plain_term':plain_term,
'post_colon_type':post_colon_type,
'proof_script':proof_script,
'prop':prop,
'pseudoterm':pseudoterm,
'sort_expr':sort_expr,
'statement':statement,
'tdop_term':tdop_term,
'term':term,
'tightest_expr':tightest_expr,
'word_pattern':Pattern.word_pattern,
}
def _init_lookup_parse():
for s in {}:
add_lookup_parse(s,c.lazy_call(lookup[s]))
_init_lookup_parse()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
|
[
"parser_combinator.Parse.first",
"tokenlib.Etok.etok",
"parser_combinator.Parse",
"parser_combinator.Parse.next_token",
"parser_combinator.plus_andcomma",
"tokenlib.Etok",
"tokenlib.update",
"parser_combinator.Parse.fail",
"parser_combinator.next_type",
"parser_combinator.next_value",
"doctest.testmod",
"exception.ErrorItem",
"parser_combinator.Parse.finished",
"parser_combinator.first_word",
"parser_combinator.getvalue",
"lib.fflatten",
"parser_combinator.next_any_word_except",
"parser_combinator.first_phrase",
"parser_combinator.brace_semif",
"sample.word_net",
"parser_combinator.singularize",
"tokenlib.is_lex",
"parser_combinator.can_wordify",
"parser_combinator.synonymize",
"parser_combinator.next_phrase",
"parser_combinator.next_word",
"parser_combinator.balanced_condition",
"parser_combinator.next_any_word",
"lib.flatten",
"parser_combinator.balanced",
"parser_combinator.Parse.word",
"parser_combinator.lazy_call",
"tokenlib.Etok.rawupdate",
"parser_combinator.bracket"
] |
[((6977, 6992), 'parser_combinator.next_value', 'next_value', (['"""."""'], {}), "('.')\n", (6987, 6992), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7001, 7016), 'parser_combinator.next_value', 'next_value', (['""","""'], {}), "(',')\n", (7011, 7016), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7029, 7044), 'parser_combinator.next_value', 'next_value', (['""";"""'], {}), "(';')\n", (7039, 7044), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7135, 7150), 'parser_combinator.next_value', 'next_value', (['""":"""'], {}), "(':')\n", (7145, 7150), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((1439, 1456), 'parser_combinator.Parse.first', 'Parse.first', (['args'], {}), '(args)\n', (1450, 1456), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7084, 7099), 'parser_combinator.next_value', 'next_value', (['""":"""'], {}), "(':')\n", (7094, 7099), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7175, 7193), 'parser_combinator.first_word', 'first_word', (['"""a an"""'], {}), "('a an')\n", (7185, 7193), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7223, 7245), 'parser_combinator.first_word', 'first_word', (['"""a an the"""'], {}), "('a an the')\n", (7233, 7245), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7261, 7289), 'parser_combinator.first_word', 'first_word', (['"""assume suppose"""'], {}), "('assume suppose')\n", (7271, 7289), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7304, 7362), 'parser_combinator.first_word', 'first_word', (['"""axiom conjecture hypothesis equation formula"""'], {}), "('axiom conjecture hypothesis equation formula')\n", (7314, 7362), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7378, 7408), 'parser_combinator.first_word', 'first_word', (['"""take choose pick"""'], {}), "('take choose pick')\n", (7388, 7408), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7432, 7468), 'parser_combinator.first_word', 'first_word', (['"""contradiction contrary"""'], {}), "('contradiction contrary')\n", (7442, 7468), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7507, 7538), 'parser_combinator.next_phrase', 'next_phrase', (['"""mutual inductive"""'], {}), "('mutual inductive')\n", (7518, 7538), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7576, 7611), 'parser_combinator.next_phrase', 'next_phrase', (['"""mutual inductive def"""'], {}), "('mutual inductive def')\n", (7587, 7611), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7624, 7652), 'parser_combinator.first_word', 'first_word', (['"""def definition"""'], {}), "('def definition')\n", (7634, 7652), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7673, 7732), 'parser_combinator.first_phrase', 'first_phrase', (["['said to be', 'defined as', 'defined to be']"], {}), "(['said to be', 'defined as', 'defined to be'])\n", (7685, 7732), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7746, 7783), 'parser_combinator.first_phrase', 'first_phrase', (["['denote', 'stand for']"], {}), "(['denote', 'stand for'])\n", (7758, 7783), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7794, 7815), 'parser_combinator.first_word', 'first_word', (['"""do does"""'], {}), "('do does')\n", (7804, 7815), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7829, 7852), 'parser_combinator.first_word', 'first_word', (['"""done quit"""'], {}), "('done quit')\n", (7839, 7852), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7867, 7890), 'parser_combinator.next_phrase', 'next_phrase', (['"""equal to"""'], {}), "('equal to')\n", (7878, 7890), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7999, 8025), 'parser_combinator.first_word', 'first_word', (['"""off false no"""'], {}), "('off false no')\n", (8009, 8025), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8038, 8059), 'parser_combinator.first_word', 'first_word', (['"""fix let"""'], {}), "('fix let')\n", (8048, 8059), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8134, 8160), 'parser_combinator.first_word', 'first_word', (['"""has have had"""'], {}), "('has have had')\n", (8144, 8160), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8353, 8395), 'parser_combinator.first_phrase', 'first_phrase', (["['is', 'are', 'be', 'to be']"], {}), "(['is', 'are', 'be', 'to be'])\n", (8365, 8395), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8406, 8453), 'parser_combinator.first_phrase', 'first_phrase', (["['let us', 'let', 'we can', 'we']"], {}), "(['let us', 'let', 'we can', 'we'])\n", (8418, 8453), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8465, 8494), 'parser_combinator.next_phrase', 'next_phrase', (['"""with parameter"""'], {}), "('with parameter')\n", (8476, 8494), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8509, 8533), 'parser_combinator.first_word', 'first_word', (['"""prove show"""'], {}), "('prove show')\n", (8519, 8533), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8546, 8583), 'parser_combinator.first_word', 'first_word', (['"""end qed obvious literal"""'], {}), "('end qed obvious literal')\n", (8556, 8583), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8601, 8655), 'parser_combinator.first_phrase', 'first_phrase', (["['satisfy', 'give rise to', 'determine']"], {}), "(['satisfy', 'give rise to', 'determine'])\n", (8613, 8655), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8666, 8689), 'parser_combinator.first_word', 'first_word', (['"""say write"""'], {}), "('say write')\n", (8676, 8689), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8703, 8737), 'parser_combinator.first_word', 'first_word', (['"""then therefore hence"""'], {}), "('then therefore hence')\n", (8713, 8737), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8754, 8803), 'parser_combinator.first_word', 'first_word', (['"""proposition theorem lemma corollary"""'], {}), "('proposition theorem lemma corollary')\n", (8764, 8803), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8817, 8842), 'parser_combinator.first_word', 'first_word', (['"""on true yes"""'], {}), "('on true yes')\n", (8827, 8842), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8861, 8885), 'parser_combinator.next_phrase', 'next_phrase', (['"""we record"""'], {}), "('we record')\n", (8872, 8885), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9082, 9110), 'parser_combinator.first_word', 'first_word', (['"""with of having"""'], {}), "('with of having')\n", (9092, 9110), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9133, 9161), 'parser_combinator.next_phrase', 'next_phrase', (['"""with property"""'], {}), "('with property')\n", (9144, 9161), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9176, 9207), 'parser_combinator.next_phrase', 'next_phrase', (['"""it is wrong that"""'], {}), "('it is wrong that')\n", (9187, 9207), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((12966, 13002), 'parser_combinator.Parse', 'Parse', (['f', 'primitive_nonterminal', '"""!"""'], {}), "(f, primitive_nonterminal, '!')\n", (12971, 13002), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((30755, 30772), 'parser_combinator.next_word', 'next_word', (['"""over"""'], {}), "('over')\n", (30764, 30772), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((57346, 57367), 'tokenlib.is_lex', 'tokenlib.is_lex', (['etok'], {}), '(etok)\n', (57361, 57367), False, 'import tokenlib\n'), ((72704, 72720), 'parser_combinator.next_word', 'next_word', (['"""and"""'], {}), "('and')\n", (72713, 72720), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((72740, 72755), 'parser_combinator.next_word', 'next_word', (['"""or"""'], {}), "('or')\n", (72749, 72755), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((77554, 77566), 'parser_combinator.Parse.fail', 'Parse.fail', ([], {}), '()\n', (77564, 77566), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((80779, 80818), 'parser_combinator.first_phrase', 'first_phrase', (["['this exist', 'this is']"], {}), "(['this exist', 'this is'])\n", (80791, 80818), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((84499, 84520), 'parser_combinator.first_word', 'first_word', (['"""let fix"""'], {}), "('let fix')\n", (84509, 84520), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((114953, 115029), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': '(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)'}), '(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n', (114968, 115029), False, 'import doctest\n'), ((3788, 3846), 'tokenlib.Etok', 'Etok', ([], {'name': 'nonterminal', 'etoks': 'b', 'raw': 'acc', 'rule': '"""backdoor2"""'}), "(name=nonterminal, etoks=b, raw=acc, rule='backdoor2')\n", (3792, 3846), False, 'from tokenlib import Etok\n'), ((6012, 6054), 'tokenlib.update', 'tokenlib.update', (['([acc1] + item2.acc)', 'item2'], {}), '([acc1] + item2.acc, item2)\n', (6027, 6054), False, 'import tokenlib\n'), ((6745, 6760), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (6758, 6760), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((6791, 6806), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (6804, 6806), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((6837, 6852), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (6850, 6852), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((6883, 6898), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (6896, 6898), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((6930, 6945), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (6943, 6945), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8076, 8095), 'parser_combinator.next_word', 'next_word', (['"""forall"""'], {}), "('forall')\n", (8085, 8095), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8098, 8120), 'parser_combinator.next_phrase', 'next_phrase', (['"""for all"""'], {}), "('for all')\n", (8109, 8120), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8175, 8214), 'parser_combinator.first_phrase', 'first_phrase', (["['iff', 'if and only if']"], {}), "(['iff', 'if and only if'])\n", (8187, 8214), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9548, 9573), 'tokenlib.Etok', 'Etok', (['"""LIT"""', '[]', '[acc]', 's'], {}), "('LIT', [], [acc], s)\n", (9552, 9573), False, 'from tokenlib import Etok\n'), ((10284, 10311), 'parser_combinator.first_word', 'first_word', (['"""left right no"""'], {}), "('left right no')\n", (10294, 10311), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((10338, 10402), 'parser_combinator.first_word', 'first_word', (['"""coercion notationless notation parameter type call"""'], {}), "('coercion notationless notation parameter type call')\n", (10348, 10402), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((10449, 10538), 'parser_combinator.first_word', 'first_word', (['"""document article section subsection subsubsection subdivision division"""'], {}), "(\n 'document article section subsection subsubsection subdivision division')\n", (10459, 10538), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((10563, 10650), 'parser_combinator.first_word', 'first_word', (['"""endsection endsubsection endsubsubsection enddivision endsubdivision"""'], {}), "(\n 'endsection endsubsection endsubsubsection enddivision endsubdivision')\n", (10573, 10650), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((10993, 11029), 'tokenlib.Etok', 'Etok', ([], {'name': '"""any"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='any', etoks=acc, raw=acc)\n", (10997, 11029), False, 'from tokenlib import Etok\n'), ((11599, 11672), 'tokenlib.Etok', 'Etok', ([], {'name': '"""cs_brace"""', 'etoks': '(cs, bs)', 'raw': 'acc', 'rule': 'cs_parse.nonterminal'}), "(name='cs_brace', etoks=(cs, bs), raw=acc, rule=cs_parse.nonterminal)\n", (11603, 11672), False, 'from tokenlib import Etok\n'), ((12450, 12501), 'tokenlib.Etok', 'Etok', ([], {'name': '"""ATOMIC"""', 'etoks': '[]', 'raw': '[acc]', 'rule': 'rule'}), "(name='ATOMIC', etoks=[], raw=[acc], rule=rule)\n", (12454, 12501), False, 'from tokenlib import Etok\n'), ((13247, 13291), 'tokenlib.Etok', 'Etok', ([], {'name': '"""section_label"""', 'etoks': 'e', 'raw': 'acc'}), "(name='section_label', etoks=e, raw=acc)\n", (13251, 13291), False, 'from tokenlib import Etok\n'), ((15596, 15664), 'tokenlib.Etok', 'Etok', ([], {'name': '"""instruction"""', 'etoks': '[]', 'raw': 'acc', 'rule': '"""synonym"""', 'misc': 'tt'}), "(name='instruction', etoks=[], raw=acc, rule='synonym', misc=tt)\n", (15600, 15664), False, 'from tokenlib import Etok\n'), ((16640, 16661), 'parser_combinator.first_word', 'first_word', (['_keywords'], {}), '(_keywords)\n', (16650, 16661), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((18220, 18257), 'tokenlib.Etok', 'Etok', (['"""coerce_as"""'], {'etoks': '[e]', 'raw': 'acc'}), "('coerce_as', etoks=[e], raw=acc)\n", (18224, 18257), False, 'from tokenlib import Etok\n'), ((18719, 18756), 'tokenlib.Etok', 'Etok', (['"""coerce_as"""'], {'etoks': '[e]', 'raw': 'acc'}), "('coerce_as', etoks=[e], raw=acc)\n", (18723, 18756), False, 'from tokenlib import Etok\n'), ((19805, 19861), 'tokenlib.Etok', 'Etok', (['"""annotated"""'], {'etoks': 'vs', 'raw': 'acc', 'rule': 'p.nonterminal'}), "('annotated', etoks=vs, raw=acc, rule=p.nonterminal)\n", (19809, 19861), False, 'from tokenlib import Etok\n'), ((20427, 20491), 'tokenlib.Etok', 'Etok', (['"""annotateds"""'], {'etoks': '(vs, ann)', 'raw': 'acc', 'rule': 'p.nonterminal'}), "('annotateds', etoks=(vs, ann), raw=acc, rule=p.nonterminal)\n", (20431, 20491), False, 'from tokenlib import Etok\n'), ((21109, 21131), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['e', 'acc'], {}), '(e, acc)\n', (21123, 21131), False, 'from tokenlib import Etok\n'), ((23049, 23093), 'tokenlib.Etok', 'Etok', ([], {'name': '"""brace_assign"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='brace_assign', etoks=ps, raw=acc)\n", (23053, 23093), False, 'from tokenlib import Etok\n'), ((24115, 24161), 'tokenlib.Etok', 'Etok', ([], {'name': '"""brace_noassign"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='brace_noassign', etoks=ps, raw=acc)\n", (24119, 24161), False, 'from tokenlib import Etok\n'), ((24624, 24665), 'tokenlib.Etok', 'Etok', ([], {'name': '"""app_args"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='app_args', etoks=acc, raw=acc)\n", (24628, 24665), False, 'from tokenlib import Etok\n'), ((25401, 25448), 'tokenlib.Etok', 'Etok', ([], {'name': '"""annotated_args"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='annotated_args', etoks=acc, raw=acc)\n", (25405, 25448), False, 'from tokenlib import Etok\n'), ((25935, 25981), 'tokenlib.Etok', 'Etok', ([], {'name': '"""args_template"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='args_template', etoks=acc, raw=acc)\n", (25939, 25981), False, 'from tokenlib import Etok\n'), ((27006, 27055), 'tokenlib.Etok', 'Etok', ([], {'name': '"""tightest_arg"""', 'etoks': '(vs, o)', 'raw': 'acc'}), "(name='tightest_arg', etoks=(vs, o), raw=acc)\n", (27010, 27055), False, 'from tokenlib import Etok\n'), ((27696, 27746), 'tokenlib.Etok', 'Etok', ([], {'name': '"""holding_vars"""', 'etoks': 'cs[0::2]', 'raw': 'acc'}), "(name='holding_vars', etoks=cs[0::2], raw=acc)\n", (27700, 27746), False, 'from tokenlib import Etok\n'), ((28595, 28641), 'tokenlib.Etok', 'Etok', ([], {'name': '"""sort_expr"""', 'etoks': '(m1, s)', 'raw': 'acc'}), "(name='sort_expr', etoks=(m1, s), raw=acc)\n", (28599, 28641), False, 'from tokenlib import Etok\n'), ((30003, 30041), 'tokenlib.Etok', 'Etok', (['"""field_type"""'], {'etoks': 'acc', 'raw': 'acc'}), "('field_type', etoks=acc, raw=acc)\n", (30007, 30041), False, 'from tokenlib import Etok\n'), ((32302, 32353), 'tokenlib.Etok', 'Etok', ([], {'name': '"""overstructure_type"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='overstructure_type', etoks=acc, raw=acc)\n", (32306, 32353), False, 'from tokenlib import Etok\n'), ((32896, 32918), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['v', 'acc'], {}), '(v, acc)\n', (32910, 32918), False, 'from tokenlib import Etok\n'), ((33285, 33331), 'tokenlib.Etok', 'Etok', ([], {'name': '"""subtype"""', 'etoks': '(t, h, s)', 'raw': 'acc'}), "(name='subtype', etoks=(t, h, s), raw=acc)\n", (33289, 33331), False, 'from tokenlib import Etok\n'), ((33724, 33787), 'tokenlib.Etok', 'Etok', ([], {'name': '"""app_type"""', 'etoks': 'acc', 'raw': 'acc', 'rule': '"""tightest_type"""'}), "(name='app_type', etoks=acc, raw=acc, rule='tightest_type')\n", (33728, 33787), False, 'from tokenlib import Etok\n'), ((34412, 34462), 'tokenlib.Etok', 'Etok', ([], {'name': '"""binder_type"""', 'etoks': '(p, a, b)', 'raw': 'acc'}), "(name='binder_type', etoks=(p, a, b), raw=acc)\n", (34416, 34462), False, 'from tokenlib import Etok\n'), ((34910, 34952), 'tokenlib.Etok', 'Etok', ([], {'name': '"""agda_vars"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='agda_vars', etoks=acc, raw=acc)\n", (34914, 34952), False, 'from tokenlib import Etok\n'), ((36174, 36226), 'tokenlib.Etok', 'Etok', ([], {'name': '"""binop_type"""', 'etoks': '(p, m + [b])', 'raw': 'acc'}), "(name='binop_type', etoks=(p, m + [b]), raw=acc)\n", (36178, 36226), False, 'from tokenlib import Etok\n'), ((36605, 36654), 'tokenlib.Etok', 'Etok', ([], {'name': '"""quotient_type"""', 'etoks': '(g, t)', 'raw': 'acc'}), "(name='quotient_type', etoks=(g, t), raw=acc)\n", (36609, 36654), False, 'from tokenlib import Etok\n'), ((37060, 37106), 'tokenlib.Etok', 'Etok', ([], {'name': '"""coercion_type"""', 'etoks': '[t]', 'raw': 'acc'}), "(name='coercion_type', etoks=[t], raw=acc)\n", (37064, 37106), False, 'from tokenlib import Etok\n'), ((37400, 37448), 'tokenlib.Etok', 'Etok', ([], {'name': '"""coercion_type"""', 'etoks': '[acc]', 'raw': 'acc'}), "(name='coercion_type', etoks=[acc], raw=acc)\n", (37404, 37448), False, 'from tokenlib import Etok\n'), ((37900, 37958), 'tokenlib.Etok', 'Etok', ([], {'name': '"""post_colon_type"""', 'etoks': 'acc', 'raw': 'acc', 'rule': '"""2"""'}), "(name='post_colon_type', etoks=acc, raw=acc, rule='2')\n", (37904, 37958), False, 'from tokenlib import Etok\n'), ((38844, 38898), 'tokenlib.Etok', 'Etok', ([], {'name': '"""alt_constructor"""', 'etoks': '(i, a, t)', 'raw': 'acc'}), "(name='alt_constructor', etoks=(i, a, t), raw=acc)\n", (38848, 38898), False, 'from tokenlib import Etok\n'), ((40013, 40059), 'tokenlib.Etok', 'Etok', ([], {'name': '"""field_prefix"""', 'etoks': 'keys', 'raw': 'acc'}), "(name='field_prefix', etoks=keys, raw=acc)\n", (40017, 40059), False, 'from tokenlib import Etok\n'), ((40598, 40649), 'tokenlib.Etok', 'Etok', ([], {'name': '"""PROOF"""', 'etoks': '[]', 'raw': 'acc', 'rule': '"""proof"""'}), "(name='PROOF', etoks=[], raw=acc, rule='proof')\n", (40602, 40649), False, 'from tokenlib import Etok\n'), ((40678, 40727), 'tokenlib.Etok', 'Etok', ([], {'name': '"""field_identifier"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='field_identifier', etoks=acc, raw=acc)\n", (40682, 40727), False, 'from tokenlib import Etok\n'), ((41213, 41257), 'tokenlib.Etok', 'Etok', ([], {'name': '"""field"""', 'etoks': '(a, b, c)', 'raw': 'acc'}), "(name='field', etoks=(a, b, c), raw=acc)\n", (41217, 41257), False, 'from tokenlib import Etok\n'), ((41759, 41813), 'tokenlib.Etok', 'Etok', ([], {'name': '"""structure"""', 'etoks': '(n, t, b[0::2])', 'raw': 'acc'}), "(name='structure', etoks=(n, t, b[0::2]), raw=acc)\n", (41763, 41813), False, 'from tokenlib import Etok\n'), ((43413, 43444), 'parser_combinator.lazy_call', 'c.lazy_call', (['tightest_subscript'], {}), '(tightest_subscript)\n', (43424, 43444), True, 'import parser_combinator as c\n'), ((43716, 43762), 'tokenlib.Etok', 'Etok', ([], {'name': '"""tightest_term"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='tightest_term', etoks=acc, raw=acc)\n", (43720, 43762), False, 'from tokenlib import Etok\n'), ((44310, 44350), 'tokenlib.Etok', 'Etok', ([], {'name': '"""apply_sub"""', 'etoks': 't', 'raw': 'acc'}), "(name='apply_sub', etoks=t, raw=acc)\n", (44314, 44350), False, 'from tokenlib import Etok\n'), ((44886, 44930), 'tokenlib.Etok', 'Etok', ([], {'name': '"""set_enum_term"""', 'etoks': 't', 'raw': 'acc'}), "(name='set_enum_term', etoks=t, raw=acc)\n", (44890, 44930), False, 'from tokenlib import Etok\n'), ((45336, 45397), 'tokenlib.Etok', 'Etok', ([], {'name': '"""set_comprehension_term"""', 'etoks': '(p, h, s)', 'raw': 'acc'}), "(name='set_comprehension_term', etoks=(p, h, s), raw=acc)\n", (45340, 45397), False, 'from tokenlib import Etok\n'), ((45911, 45953), 'tokenlib.Etok', 'Etok', ([], {'name': '"""tuple_term"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='tuple_term', etoks=ps, raw=acc)\n", (45915, 45953), False, 'from tokenlib import Etok\n'), ((46368, 46409), 'tokenlib.Etok', 'Etok', ([], {'name': '"""list_term"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='list_term', etoks=ps, raw=acc)\n", (46372, 46409), False, 'from tokenlib import Etok\n'), ((47329, 47370), 'tokenlib.Etok', 'Etok', (['"""make_term"""'], {'etoks': '(t, bs)', 'raw': 'acc'}), "('make_term', etoks=(t, bs), raw=acc)\n", (47333, 47370), False, 'from tokenlib import Etok\n'), ((47805, 47827), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['t', 'acc'], {}), '(t, acc)\n', (47819, 47827), False, 'from tokenlib import Etok\n'), ((48581, 48625), 'tokenlib.Etok', 'Etok', ([], {'name': '"""alt_case"""', 'etoks': '(p, t)', 'raw': 'acc'}), "(name='alt_case', etoks=(p, t), raw=acc)\n", (48585, 48625), False, 'from tokenlib import Etok\n'), ((49055, 49098), 'tokenlib.Etok', 'Etok', ([], {'name': '"""case_term"""', 'etoks': 'a[0]', 'raw': 'acc'}), "(name='case_term', etoks=a[0], raw=acc)\n", (49059, 49098), False, 'from tokenlib import Etok\n'), ((49314, 49355), 'tokenlib.Etok', 'Etok', ([], {'name': '"""app_term"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='app_term', etoks=acc, raw=acc)\n", (49318, 49355), False, 'from tokenlib import Etok\n'), ((49635, 49681), 'tokenlib.Etok', 'Etok', ([], {'name': '"""alt_match"""', 'etoks': '(p, p2)', 'raw': 'acc'}), "(name='alt_match', etoks=(p, p2), raw=acc)\n", (49639, 49681), False, 'from tokenlib import Etok\n'), ((50155, 50205), 'tokenlib.Etok', 'Etok', ([], {'name': '"""match_term"""', 'etoks': '(mp, b[0])', 'raw': 'acc'}), "(name='match_term', etoks=(mp, b[0]), raw=acc)\n", (50159, 50205), False, 'from tokenlib import Etok\n'), ((50729, 50782), 'tokenlib.Etok', 'Etok', ([], {'name': '"""match_function"""', 'etoks': '(t, o, b)', 'raw': 'acc'}), "(name='match_function', etoks=(t, o, b), raw=acc)\n", (50733, 50782), False, 'from tokenlib import Etok\n'), ((51501, 51543), 'tokenlib.Etok', 'Etok', ([], {'name': '"""mapsto"""', 'etoks': '(t, o)', 'raw': 'acc'}), "(name='mapsto', etoks=(t, o), raw=acc)\n", (51505, 51543), False, 'from tokenlib import Etok\n'), ((51599, 51649), 'tokenlib.Etok', 'Etok', ([], {'name': '"""lambda_term"""', 'etoks': '(p, a, o)', 'raw': 'acc'}), "(name='lambda_term', etoks=(p, a, o), raw=acc)\n", (51603, 51649), False, 'from tokenlib import Etok\n'), ((51705, 51749), 'tokenlib.Etok', 'Etok', ([], {'name': '"""fun_term"""', 'etoks': '(t, o)', 'raw': 'acc'}), "(name='fun_term', etoks=(t, o), raw=acc)\n", (51709, 51749), False, 'from tokenlib import Etok\n'), ((52419, 52461), 'tokenlib.Etok', 'Etok', ([], {'name': '"""let"""', 'etoks': '(p, t, o)', 'raw': 'acc'}), "(name='let', etoks=(p, t, o), raw=acc)\n", (52423, 52461), False, 'from tokenlib import Etok\n'), ((52955, 53011), 'tokenlib.Etok', 'Etok', ([], {'name': '"""if_then_else_term"""', 'etoks': '(p, t, f)', 'raw': 'acc'}), "(name='if_then_else_term', etoks=(p, t, f), raw=acc)\n", (52959, 53011), False, 'from tokenlib import Etok\n'), ((53651, 53675), 'parser_combinator.lazy_call', 'c.lazy_call', (['lambda_term'], {}), '(lambda_term)\n', (53662, 53675), True, 'import parser_combinator as c\n'), ((53690, 53711), 'parser_combinator.lazy_call', 'c.lazy_call', (['let_term'], {}), '(let_term)\n', (53701, 53711), True, 'import parser_combinator as c\n'), ((53726, 53756), 'parser_combinator.lazy_call', 'c.lazy_call', (['if_then_else_term'], {}), '(if_then_else_term)\n', (53737, 53756), True, 'import parser_combinator as c\n'), ((54500, 54544), 'tokenlib.Etok', 'Etok', ([], {'name': '"""where_suffix"""', 'etoks': 'bs', 'raw': 'acc'}), "(name='where_suffix', etoks=bs, raw=acc)\n", (54504, 54544), False, 'from tokenlib import Etok\n'), ((54971, 55009), 'tokenlib.Etok', 'Etok', (['"""where_term"""'], {'etoks': 'acc', 'raw': 'acc'}), "('where_term', etoks=acc, raw=acc)\n", (54975, 55009), False, 'from tokenlib import Etok\n'), ((55769, 55791), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['t', 'acc'], {}), '(t, acc)\n', (55783, 55791), False, 'from tokenlib import Etok\n'), ((56010, 56049), 'tokenlib.Etok', 'Etok', ([], {'name': '"""any_args"""', 'etoks': 'b', 'raw': 'acc'}), "(name='any_args', etoks=b, raw=acc)\n", (56014, 56049), False, 'from tokenlib import Etok\n'), ((56389, 56430), 'tokenlib.Etok', 'Etok', ([], {'name': '"""any_name"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='any_name', etoks=acc, raw=acc)\n", (56393, 56430), False, 'from tokenlib import Etok\n'), ((56749, 56771), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['t', 'acc'], {}), '(t, acc)\n', (56763, 56771), False, 'from tokenlib import Etok\n'), ((56948, 56992), 'tokenlib.Etok', 'Etok', ([], {'name': '"""terms"""', 'etoks': 'acc[0::2]', 'raw': 'acc'}), "(name='terms', etoks=acc[0::2], raw=acc)\n", (56952, 56992), False, 'from tokenlib import Etok\n'), ((59031, 59066), 'tokenlib.Etok', 'Etok', (['"""tdop_term"""'], {'etoks': 'r', 'raw': 'acc'}), "('tdop_term', etoks=r, raw=acc)\n", (59035, 59066), False, 'from tokenlib import Etok\n'), ((59334, 59391), 'tokenlib.Etok', 'Etok', ([], {'name': '"""adjective_left_attribute"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='adjective_left_attribute', etoks=acc, raw=acc)\n", (59338, 59391), False, 'from tokenlib import Etok\n'), ((59820, 59877), 'tokenlib.Etok', 'Etok', ([], {'name': '"""is_right_attribute"""', 'etoks': 'acc[0::2]', 'raw': 'acc'}), "(name='is_right_attribute', etoks=acc[0::2], raw=acc)\n", (59824, 59877), False, 'from tokenlib import Etok\n'), ((60046, 60103), 'tokenlib.Etok', 'Etok', ([], {'name': '"""does_right_attribute"""', 'etoks': 't[0::2]', 'raw': 'acc'}), "(name='does_right_attribute', etoks=t[0::2], raw=acc)\n", (60050, 60103), False, 'from tokenlib import Etok\n'), ((60303, 60325), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['t', 'acc'], {}), '(t, acc)\n', (60317, 60325), False, 'from tokenlib import Etok\n'), ((60672, 60714), 'tokenlib.Etok', 'Etok', ([], {'name': '"""attribute"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='attribute', etoks=acc, raw=acc)\n", (60676, 60714), False, 'from tokenlib import Etok\n'), ((62204, 62256), 'tokenlib.Etok', 'Etok', ([], {'name': '"""tdop_rel_prop"""', 'etoks': '(op0 + op1)', 'raw': 'acc'}), "(name='tdop_rel_prop', etoks=op0 + op1, raw=acc)\n", (62208, 62256), False, 'from tokenlib import Etok\n'), ((63841, 63883), 'tokenlib.Etok', 'Etok', (['"""annotated_prop"""'], {'etoks': '[p]', 'raw': 'acc'}), "('annotated_prop', etoks=[p], raw=acc)\n", (63845, 63883), False, 'from tokenlib import Etok\n'), ((64185, 64228), 'tokenlib.Etok', 'Etok', ([], {'name': '"""field_prop"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='field_prop', etoks=acc, raw=acc)\n", (64189, 64228), False, 'from tokenlib import Etok\n'), ((64418, 64461), 'tokenlib.Etok', 'Etok', ([], {'name': '"""prop_var"""', 'etoks': '[acc]', 'raw': 'acc'}), "(name='prop_var', etoks=[acc], raw=acc)\n", (64422, 64461), False, 'from tokenlib import Etok\n'), ((64636, 64658), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['s', 'acc'], {}), '(s, acc)\n', (64650, 64658), False, 'from tokenlib import Etok\n'), ((64945, 64986), 'tokenlib.Etok', 'Etok', ([], {'name': '"""app_prop"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='app_prop', etoks=acc, raw=acc)\n", (64949, 64986), False, 'from tokenlib import Etok\n'), ((65375, 65427), 'tokenlib.Etok', 'Etok', ([], {'name': '"""lambda_predicate"""', 'etoks': '(t, p)', 'raw': 'acc'}), "(name='lambda_predicate', etoks=(t, p), raw=acc)\n", (65379, 65427), False, 'from tokenlib import Etok\n'), ((65928, 65979), 'tokenlib.Etok', 'Etok', ([], {'name': '"""binder_prop"""', 'etoks': '(b, a, b2)', 'raw': 'acc'}), "(name='binder_prop', etoks=(b, a, b2), raw=acc)\n", (65932, 65979), False, 'from tokenlib import Etok\n'), ((67377, 67416), 'tokenlib.Etok', 'Etok', ([], {'name': '"""has_pred"""', 'etoks': 't', 'raw': 'acc'}), "(name='has_pred', etoks=t, raw=acc)\n", (67381, 67416), False, 'from tokenlib import Etok\n'), ((67447, 67491), 'tokenlib.Etok', 'Etok', ([], {'name': '"""no_has_pred"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='no_has_pred', etoks=acc, raw=acc)\n", (67451, 67491), False, 'from tokenlib import Etok\n'), ((67662, 67678), 'parser_combinator.next_word', 'next_word', (['"""not"""'], {}), "('not')\n", (67671, 67678), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((67933, 67984), 'tokenlib.Etok', 'Etok', ([], {'name': '"""indefinite_pred"""', 'etoks': '(n, g)', 'raw': 'acc'}), "(name='indefinite_pred', etoks=(n, g), raw=acc)\n", (67937, 67984), False, 'from tokenlib import Etok\n'), ((68014, 68060), 'tokenlib.Etok', 'Etok', ([], {'name': '"""definite_pred"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='definite_pred', etoks=acc, raw=acc)\n", (68018, 68060), False, 'from tokenlib import Etok\n'), ((68720, 68765), 'tokenlib.Etok', 'Etok', ([], {'name': '"""is_adjective"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='is_adjective', etoks=acc, raw=acc)\n", (68724, 68765), False, 'from tokenlib import Etok\n'), ((68818, 68882), 'tokenlib.Etok', 'Etok', ([], {'name': '"""is_adjective_multisubject"""', 'etoks': '(n, p, m)', 'raw': 'acc'}), "(name='is_adjective_multisubject', etoks=(n, p, m), raw=acc)\n", (68822, 68882), False, 'from tokenlib import Etok\n'), ((68911, 68955), 'tokenlib.Etok', 'Etok', ([], {'name': '"""is_with"""', 'etoks': 'acc[1:]', 'raw': 'acc'}), "(name='is_with', etoks=acc[1:], raw=acc)\n", (68915, 68955), False, 'from tokenlib import Etok\n'), ((69486, 69529), 'tokenlib.Etok', 'Etok', ([], {'name': '"""do_verb"""', 'etoks': '(n, v)', 'raw': 'acc'}), "(name='do_verb', etoks=(n, v), raw=acc)\n", (69490, 69529), False, 'from tokenlib import Etok\n'), ((69581, 69637), 'tokenlib.Etok', 'Etok', ([], {'name': '"""do_verb_multisubject"""', 'etoks': '(n, v)', 'raw': 'acc'}), "(name='do_verb_multisubject', etoks=(n, v), raw=acc)\n", (69585, 69637), False, 'from tokenlib import Etok\n'), ((69685, 69729), 'tokenlib.Etok', 'Etok', ([], {'name': '"""do_has_pred"""', 'etoks': '[h]', 'raw': 'acc'}), "(name='do_has_pred', etoks=[h], raw=acc)\n", (69689, 69729), False, 'from tokenlib import Etok\n'), ((69779, 69822), 'tokenlib.Etok', 'Etok', ([], {'name': '"""does_is_adj"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='does_is_adj', etoks=ps, raw=acc)\n", (69783, 69822), False, 'from tokenlib import Etok\n'), ((69872, 69914), 'tokenlib.Etok', 'Etok', ([], {'name': '"""is_nominal"""', 'etoks': 'ps', 'raw': 'acc'}), "(name='is_nominal', etoks=ps, raw=acc)\n", (69876, 69914), False, 'from tokenlib import Etok\n'), ((70934, 70986), 'tokenlib.Etok', 'Etok', ([], {'name': '"""plain_pred_pseudoterm"""', 'etoks': 't', 'raw': 'acc'}), "(name='plain_pred_pseudoterm', etoks=t, raw=acc)\n", (70938, 70986), False, 'from tokenlib import Etok\n'), ((71739, 71761), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['t', 'acc'], {}), '(t, acc)\n', (71753, 71761), False, 'from tokenlib import Etok\n'), ((71818, 71860), 'tokenlib.Etok', 'Etok', (['"""annotated"""'], {'etoks': '(v, ann)', 'raw': 'acc'}), "('annotated', etoks=(v, ann), raw=acc)\n", (71822, 71860), False, 'from tokenlib import Etok\n'), ((71911, 71934), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['ps', 'acc'], {}), '(ps, acc)\n', (71925, 71934), False, 'from tokenlib import Etok\n'), ((72494, 72537), 'tokenlib.Etok', 'Etok', ([], {'name': '"""pseudoterm"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='pseudoterm', etoks=acc, raw=acc)\n", (72498, 72537), False, 'from tokenlib import Etok\n'), ((72898, 72947), 'tokenlib.Etok', 'Etok', ([], {'name': '"""simple_statement"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='simple_statement', etoks=acc, raw=acc)\n", (72902, 72947), False, 'from tokenlib import Etok\n'), ((73134, 73188), 'tokenlib.Etok', 'Etok', ([], {'name': '"""there_is_statement"""', 'etoks': '(n, p)', 'raw': 'acc'}), "(name='there_is_statement', etoks=(n, p), raw=acc)\n", (73138, 73188), False, 'from tokenlib import Etok\n'), ((73362, 73410), 'tokenlib.Etok', 'Etok', ([], {'name': '"""const_statement"""', 'etoks': 'acc', 'raw': 'acc'}), "(name='const_statement', etoks=acc, raw=acc)\n", (73366, 73410), False, 'from tokenlib import Etok\n'), ((74075, 74134), 'tokenlib.Etok', 'Etok', ([], {'name': '"""forall_symbol_statement"""', 'etoks': '(a, s)', 'raw': 'acc'}), "(name='forall_symbol_statement', etoks=(a, s), raw=acc)\n", (74079, 74134), False, 'from tokenlib import Etok\n'), ((74195, 74253), 'tokenlib.Etok', 'Etok', ([], {'name': '"""exist_symbol_statement"""', 'etoks': '(a, s)', 'raw': 'acc'}), "(name='exist_symbol_statement', etoks=(a, s), raw=acc)\n", (74199, 74253), False, 'from tokenlib import Etok\n'), ((74286, 74342), 'tokenlib.Etok', 'Etok', ([], {'name': '"""not_symbol_statement"""', 'etoks': 'acc[1]', 'raw': 'acc'}), "(name='not_symbol_statement', etoks=acc[1], raw=acc)\n", (74290, 74342), False, 'from tokenlib import Etok\n'), ((74392, 74414), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['s', 'acc'], {}), '(s, acc)\n', (74406, 74414), False, 'from tokenlib import Etok\n'), ((75357, 75408), 'tokenlib.Etok', 'Etok', ([], {'name': '"""or_chain"""', 'etoks': '(p[0::2] + [h])', 'raw': 'acc'}), "(name='or_chain', etoks=p[0::2] + [h], raw=acc)\n", (75361, 75408), False, 'from tokenlib import Etok\n'), ((75623, 75675), 'tokenlib.Etok', 'Etok', ([], {'name': '"""and_chain"""', 'etoks': '(p[0::2] + [h])', 'raw': 'acc'}), "(name='and_chain', etoks=p[0::2] + [h], raw=acc)\n", (75627, 75675), False, 'from tokenlib import Etok\n'), ((76089, 76134), 'tokenlib.Etok', 'Etok', (['"""iff_statement"""'], {'etoks': '(ao, s)', 'raw': 'acc'}), "('iff_statement', etoks=(ao, s), raw=acc)\n", (76093, 76134), False, 'from tokenlib import Etok\n'), ((76536, 76585), 'tokenlib.Etok', 'Etok', ([], {'name': '"""for_statement"""', 'etoks': '(p, s)', 'raw': 'acc'}), "(name='for_statement', etoks=(p, s), raw=acc)\n", (76540, 76585), False, 'from tokenlib import Etok\n'), ((76652, 76706), 'tokenlib.Etok', 'Etok', ([], {'name': '"""if_then_statement"""', 'etoks': '(s, s2)', 'raw': 'acc'}), "(name='if_then_statement', etoks=(s, s2), raw=acc)\n", (76656, 76706), False, 'from tokenlib import Etok\n'), ((76741, 76793), 'tokenlib.Etok', 'Etok', ([], {'name': '"""wrong_statement"""', 'etoks': 'acc[1:]', 'raw': 'acc'}), "(name='wrong_statement', etoks=acc[1:], raw=acc)\n", (76745, 76793), False, 'from tokenlib import Etok\n'), ((77896, 77917), 'parser_combinator.next_word', 'next_word', (['"""synonyms"""'], {}), "('synonyms')\n", (77905, 77917), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((78022, 78048), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['b1[0]', 'acc'], {}), '(b1[0], acc)\n', (78036, 78048), False, 'from tokenlib import Etok\n'), ((78657, 78714), 'tokenlib.Etok', 'Etok', ([], {'name': '"""inductive_decl"""', 'etoks': '(i, a, s, c1)', 'raw': 'acc'}), "(name='inductive_decl', etoks=(i, a, s, c1), raw=acc)\n", (78661, 78714), False, 'from tokenlib import Etok\n'), ((79104, 79168), 'tokenlib.Etok', 'Etok', (['"""mutual_inductive_decl_item"""'], {'etoks': '(w[0::2], pa)', 'raw': 'acc'}), "('mutual_inductive_decl_item', etoks=(w[0::2], pa), raw=acc)\n", (79108, 79168), False, 'from tokenlib import Etok\n'), ((79550, 79613), 'tokenlib.Etok', 'Etok', (['"""mutual_inductive_def_item"""'], {'etoks': '(w[0::2], pa)', 'raw': 'acc'}), "('mutual_inductive_def_item', etoks=(w[0::2], pa), raw=acc)\n", (79554, 79613), False, 'from tokenlib import Etok\n'), ((80601, 80628), 'parser_combinator.next_phrase', 'next_phrase', (['"""by recursion"""'], {}), "('by recursion')\n", (80612, 80628), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((81517, 81563), 'tokenlib.Etok', 'Etok', (['"""satisfy_item"""'], {'etoks': '(p, f, s)', 'raw': 'acc'}), "('satisfy_item', etoks=(p, f, s), raw=acc)\n", (81521, 81563), False, 'from tokenlib import Etok\n'), ((82180, 82226), 'tokenlib.Etok', 'Etok', ([], {'name': '"""decl_label"""', 'etoks': '(a, l)', 'raw': 'acc'}), "(name='decl_label', etoks=(a, l), raw=acc)\n", (82184, 82226), False, 'from tokenlib import Etok\n'), ((83116, 83173), 'tokenlib.Etok', 'Etok', ([], {'name': '"""let_annotation"""', 'etoks': 'acc', 'raw': 'acc', 'rule': '"""1"""'}), "(name='let_annotation', etoks=acc, raw=acc, rule='1')\n", (83120, 83173), False, 'from tokenlib import Etok\n'), ((83203, 83260), 'tokenlib.Etok', 'Etok', ([], {'name': '"""let_annotation"""', 'etoks': 'acc', 'raw': 'acc', 'rule': '"""2"""'}), "(name='let_annotation', etoks=acc, raw=acc, rule='2')\n", (83207, 83260), False, 'from tokenlib import Etok\n'), ((83290, 83347), 'tokenlib.Etok', 'Etok', ([], {'name': '"""let_annotation"""', 'etoks': 'acc', 'raw': 'acc', 'rule': '"""3"""'}), "(name='let_annotation', etoks=acc, raw=acc, rule='3')\n", (83294, 83347), False, 'from tokenlib import Etok\n'), ((83872, 83921), 'tokenlib.Etok', 'Etok', ([], {'name': '"""assumption_prefix"""', 'etoks': '[]', 'raw': 'acc'}), "(name='assumption_prefix', etoks=[], raw=acc)\n", (83876, 83921), False, 'from tokenlib import Etok\n'), ((84336, 84379), 'tokenlib.Etok', 'Etok', ([], {'name': '"""assumption"""', 'etoks': '[s]', 'raw': 'acc'}), "(name='assumption', etoks=[s], raw=acc)\n", (84340, 84379), False, 'from tokenlib import Etok\n'), ((84428, 84450), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['l', 'acc'], {}), '(l, acc)\n', (84442, 84450), False, 'from tokenlib import Etok\n'), ((85270, 85319), 'tokenlib.Etok', 'Etok', ([], {'name': '"""axiom"""', 'etoks': '(aa, [s] + ms)', 'raw': 'acc'}), "(name='axiom', etoks=(aa, [s] + ms), raw=acc)\n", (85274, 85319), False, 'from tokenlib import Etok\n'), ((85740, 85784), 'tokenlib.Etok', 'Etok', ([], {'name': '"""theorem"""', 'etoks': '(aa, p)', 'raw': 'acc'}), "(name='theorem', etoks=(aa, p), raw=acc)\n", (85744, 85784), False, 'from tokenlib import Etok\n'), ((87017, 87059), 'tokenlib.Etok', 'Etok', ([], {'name': '"""proof-step"""', 'etoks': '[]', 'raw': 'acc'}), "(name='proof-step', etoks=[], raw=acc)\n", (87021, 87059), False, 'from tokenlib import Etok\n'), ((92349, 92388), 'parser_combinator.lazy_call', 'c.lazy_call', (['Proof_step.statement_proof'], {}), '(Proof_step.statement_proof)\n', (92360, 92388), True, 'import parser_combinator as c\n'), ((92403, 92437), 'parser_combinator.lazy_call', 'c.lazy_call', (['Proof_step.goal_proof'], {}), '(Proof_step.goal_proof)\n', (92414, 92437), True, 'import parser_combinator as c\n'), ((113814, 113830), 'parser_combinator.Parse.finished', 'Parse.finished', ([], {}), '()\n', (113828, 113830), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((4127, 4142), 'parser_combinator.Parse.first', 'Parse.first', (['ps'], {}), '(ps)\n', (4138, 4142), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((5968, 5996), 'tokenlib.update', 'tokenlib.update', (['None', 'item1'], {}), '(None, item1)\n', (5983, 5996), False, 'import tokenlib\n'), ((6099, 6118), 'sample.word_net', 'sample.word_net', (['wn'], {}), '(wn)\n', (6114, 6118), False, 'import sample\n'), ((6627, 6664), 'parser_combinator.first_word', 'first_word', (['"""put write have know see"""'], {}), "('put write have know see')\n", (6637, 6664), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7100, 7115), 'parser_combinator.next_word', 'next_word', (['"""as"""'], {}), "('as')\n", (7109, 7115), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8316, 8338), 'parser_combinator.next_word', 'next_word', (['"""predicate"""'], {}), "('predicate')\n", (8325, 8338), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8958, 8981), 'parser_combinator.first_word', 'first_word', (['"""say write"""'], {}), "('say write')\n", (8968, 8981), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((12368, 12391), 'parser_combinator.synonymize', 'c.synonymize', (['acc.value'], {}), '(acc.value)\n', (12380, 12391), True, 'import parser_combinator as c\n'), ((17253, 17275), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['s', 'acc'], {}), '(s, acc)\n', (17267, 17275), False, 'from tokenlib import Etok\n'), ((17692, 17792), 'tokenlib.Etok', 'Etok', (['"""expr"""'], {'etoks': 'acc.etoks', 'raw': 'acc.raw', 'rule': 'nonterminal', 'misc': 'acc.misc', 'altrepr': 'acc.altrepr'}), "('expr', etoks=acc.etoks, raw=acc.raw, rule=nonterminal, misc=acc.misc,\n altrepr=acc.altrepr)\n", (17696, 17792), False, 'from tokenlib import Etok\n'), ((18183, 18205), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['e', 'acc'], {}), '(e, acc)\n', (18197, 18205), False, 'from tokenlib import Etok\n'), ((18682, 18704), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['e', 'acc'], {}), '(e, acc)\n', (18696, 18704), False, 'from tokenlib import Etok\n'), ((27987, 28012), 'parser_combinator.next_type', 'c.next_type', (['"""SYMBOL_QED"""'], {}), "('SYMBOL_QED')\n", (27998, 28012), True, 'import parser_combinator as c\n'), ((31263, 31319), 'tokenlib.Etok', 'Etok', ([], {'name': '"""over_args"""', 'etoks': 'b[0::2]', 'raw': 'acc', 'rule': '"""1"""'}), "(name='over_args', etoks=b[0::2], raw=acc, rule='1')\n", (31267, 31319), False, 'from tokenlib import Etok\n'), ((31521, 31571), 'tokenlib.Etok', 'Etok', ([], {'name': '"""over_args"""', 'etoks': 'b', 'raw': 'acc', 'rule': '"""2"""'}), "(name='over_args', etoks=b, raw=acc, rule='2')\n", (31525, 31571), False, 'from tokenlib import Etok\n'), ((31741, 31797), 'tokenlib.Etok', 'Etok', ([], {'name': '"""over_args"""', 'etoks': 'b[0::2]', 'raw': 'acc', 'rule': '"""3"""'}), "(name='over_args', etoks=b[0::2], raw=acc, rule='3')\n", (31745, 31797), False, 'from tokenlib import Etok\n'), ((38290, 38328), 'parser_combinator.next_type', 'c.next_type', (['"""HIERARCHICAL_IDENTIFIER"""'], {}), "('HIERARCHICAL_IDENTIFIER')\n", (38301, 38328), True, 'import parser_combinator as c\n'), ((72984, 73000), 'parser_combinator.next_word', 'next_word', (['"""and"""'], {}), "('and')\n", (72993, 73000), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((73457, 73476), 'parser_combinator.next_word', 'next_word', (['"""thesis"""'], {}), "('thesis')\n", (73466, 73476), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((81570, 81588), 'parser_combinator.next_word', 'next_word', (['"""every"""'], {}), "('every')\n", (81579, 81588), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((86261, 86310), 'parser_combinator.next_type', 'c.next_type', (["['VAR', 'WORD', 'ATOMIC_IDENTIFIER']"], {}), "(['VAR', 'WORD', 'ATOMIC_IDENTIFIER'])\n", (86272, 86310), True, 'import parser_combinator as c\n'), ((86481, 86506), 'parser_combinator.next_type', 'c.next_type', (['"""CONTROLSEQ"""'], {}), "('CONTROLSEQ')\n", (86492, 86506), True, 'import parser_combinator as c\n'), ((93685, 93733), 'tokenlib.Etok', 'Etok', (['"""word_extended"""'], {'etoks': '(w, wp, o)', 'raw': 'acc'}), "('word_extended', etoks=(w, wp, o), raw=acc)\n", (93689, 93733), False, 'from tokenlib import Etok\n'), ((94784, 94824), 'tokenlib.Etok', 'Etok', (['"""word_pattern"""'], {'etoks': 'vws', 'raw': 'acc'}), "('word_pattern', etoks=vws, raw=acc)\n", (94788, 94824), False, 'from tokenlib import Etok\n'), ((95495, 95541), 'tokenlib.Etok', 'Etok', (['"""notion_pattern"""'], {'etoks': '(v, wp)', 'raw': 'acc'}), "('notion_pattern', etoks=(v, wp), raw=acc)\n", (95499, 95541), False, 'from tokenlib import Etok\n'), ((95874, 95923), 'tokenlib.Etok', 'Etok', (['"""adjective_pattern"""'], {'etoks': '(v, wp)', 'raw': 'acc'}), "('adjective_pattern', etoks=(v, wp), raw=acc)\n", (95878, 95923), False, 'from tokenlib import Etok\n'), ((96314, 96382), 'tokenlib.Etok', 'Etok', ([], {'name': '"""var_multisubject_pattern"""', 'etoks': '(v1, v2, None)', 'raw': 'acc'}), "(name='var_multisubject_pattern', etoks=(v1, v2, None), raw=acc)\n", (96318, 96382), False, 'from tokenlib import Etok\n'), ((96457, 96522), 'tokenlib.Etok', 'Etok', ([], {'name': '"""var_multisubject_pattern"""', 'etoks': '(v1, v2, o)', 'raw': 'acc'}), "(name='var_multisubject_pattern', etoks=(v1, v2, o), raw=acc)\n", (96461, 96522), False, 'from tokenlib import Etok\n'), ((96919, 96980), 'tokenlib.Etok', 'Etok', (['"""adjective_multisubject_pattern"""'], {'etoks': '(v, w)', 'raw': 'acc'}), "('adjective_multisubject_pattern', etoks=(v, w), raw=acc)\n", (96923, 96980), False, 'from tokenlib import Etok\n'), ((97261, 97301), 'tokenlib.Etok', 'Etok', (['"""verb_pattern"""'], {'etoks': 'acc', 'raw': 'acc'}), "('verb_pattern', etoks=acc, raw=acc)\n", (97265, 97301), False, 'from tokenlib import Etok\n'), ((97467, 97520), 'tokenlib.Etok', 'Etok', (['"""verb_multisubject_pattern"""'], {'etoks': 'acc', 'raw': 'acc'}), "('verb_multisubject_pattern', etoks=acc, raw=acc)\n", (97471, 97520), False, 'from tokenlib import Etok\n'), ((98266, 98316), 'tokenlib.Etok', 'Etok', (['"""controlseq_pattern"""'], {'etoks': '(a, vs)', 'raw': 'acc'}), "('controlseq_pattern', etoks=(a, vs), raw=acc)\n", (98270, 98316), False, 'from tokenlib import Etok\n'), ((98545, 98605), 'tokenlib.Etok', 'Etok', (['"""binary_controlseq_pattern"""'], {'etoks': '(v, c, v2)', 'raw': 'acc'}), "('binary_controlseq_pattern', etoks=(v, c, v2), raw=acc)\n", (98549, 98605), False, 'from tokenlib import Etok\n'), ((98819, 98865), 'tokenlib.Etok', 'Etok', (['"""identifier_pattern"""'], {'etoks': 'acc', 'raw': 'acc'}), "('identifier_pattern', etoks=acc, raw=acc)\n", (98823, 98865), False, 'from tokenlib import Etok\n'), ((99556, 99603), 'tokenlib.Etok', 'Etok', (['"""precedence_level"""'], {'etoks': '(i, a)', 'raw': 'acc'}), "('precedence_level', etoks=(i, a), raw=acc)\n", (99560, 99603), False, 'from tokenlib import Etok\n'), ((100118, 100131), 'parser_combinator.getvalue', 'c.getvalue', (['a'], {}), '(a)\n', (100128, 100131), True, 'import parser_combinator as c\n'), ((101150, 101196), 'tokenlib.Etok', 'Etok', (['"""symbol_pattern"""'], {'etoks': '(vs, p)', 'raw': 'acc'}), "('symbol_pattern', etoks=(vs, p), raw=acc)\n", (101154, 101196), False, 'from tokenlib import Etok\n'), ((102029, 102085), 'tokenlib.Etok', 'Etok', (['"""binary_symbol_pattern"""'], {'etoks': '(v, s, v2)', 'raw': 'acc'}), "('binary_symbol_pattern', etoks=(v, s, v2), raw=acc)\n", (102033, 102085), False, 'from tokenlib import Etok\n'), ((102634, 102677), 'tokenlib.Etok', 'Etok', ([], {'name': '"""in_section"""', 'etoks': '[d]', 'raw': 'acc'}), "(name='in_section', etoks=[d], raw=acc)\n", (102638, 102677), False, 'from tokenlib import Etok\n'), ((103114, 103147), 'tokenlib.Etok', 'Etok', (['"""copula"""'], {'etoks': '[]', 'raw': 'acc'}), "('copula', etoks=[], raw=acc)\n", (103118, 103147), False, 'from tokenlib import Etok\n'), ((103638, 103686), 'tokenlib.Etok', 'Etok', ([], {'name': '"""function_copula"""', 'etoks': '[o]', 'raw': 'acc'}), "(name='function_copula', etoks=[o], raw=acc)\n", (103642, 103686), False, 'from tokenlib import Etok\n'), ((104331, 104368), 'tokenlib.Etok', 'Etok', (['"""opt_define"""'], {'etoks': '[]', 'raw': 'acc'}), "('opt_define', etoks=[], raw=acc)\n", (104335, 104368), False, 'from tokenlib import Etok\n'), ((104651, 104713), 'tokenlib.Etok', 'Etok', ([], {'name': '"""classifier_word_pattern"""', 'etoks': 'acc[0::2]', 'raw': 'acc'}), "(name='classifier_word_pattern', etoks=acc[0::2], raw=acc)\n", (104655, 104713), False, 'from tokenlib import Etok\n'), ((105273, 105295), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['w', 'acc'], {}), '(w, acc)\n', (105287, 105295), False, 'from tokenlib import Etok\n'), ((106361, 106400), 'tokenlib.Etok', 'Etok', (['"""type_def"""'], {'etoks': '(h, t)', 'raw': 'acc'}), "('type_def', etoks=(h, t), raw=acc)\n", (106365, 106400), False, 'from tokenlib import Etok\n'), ((106472, 106511), 'tokenlib.Etok', 'Etok', (['"""type_def"""'], {'etoks': '(h, t)', 'raw': 'acc'}), "('type_def', etoks=(h, t), raw=acc)\n", (106476, 106511), False, 'from tokenlib import Etok\n'), ((107551, 107594), 'tokenlib.Etok', 'Etok', (['"""function_def"""'], {'etoks': '(h, p)', 'raw': 'acc'}), "('function_def', etoks=(h, p), raw=acc)\n", (107555, 107594), False, 'from tokenlib import Etok\n'), ((108495, 108539), 'tokenlib.Etok', 'Etok', (['"""predicate_def"""'], {'etoks': '(h, s)', 'raw': 'acc'}), "('predicate_def', etoks=(h, s), raw=acc)\n", (108499, 108539), False, 'from tokenlib import Etok\n'), ((109125, 109176), 'tokenlib.Etok', 'Etok', (['"""binder_def"""'], {'etoks': '(s, v, t, v2, p)', 'raw': 'acc'}), "('binder_def', etoks=(s, v, t, v2, p), raw=acc)\n", (109129, 109176), False, 'from tokenlib import Etok\n'), ((109822, 109844), 'tokenlib.Etok.rawupdate', 'Etok.rawupdate', (['d', 'acc'], {}), '(d, acc)\n', (109836, 109844), False, 'from tokenlib import Etok\n'), ((110167, 110214), 'tokenlib.Etok', 'Etok', ([], {'name': '"""definition_label"""', 'etoks': 'l', 'raw': 'acc'}), "(name='definition_label', etoks=l, raw=acc)\n", (110171, 110214), False, 'from tokenlib import Etok\n'), ((111228, 111272), 'tokenlib.Etok', 'Etok', (['"""definition"""'], {'etoks': '(p, a, d)', 'raw': 'acc'}), "('definition', etoks=(p, a, d), raw=acc)\n", (111232, 111272), False, 'from tokenlib import Etok\n'), ((112629, 112671), 'tokenlib.Etok', 'Etok', (['"""macro"""'], {'etoks': '(s, b[0::2])', 'raw': 'acc'}), "('macro', etoks=(s, b[0::2]), raw=acc)\n", (112633, 112671), False, 'from tokenlib import Etok\n'), ((114854, 114876), 'parser_combinator.lazy_call', 'c.lazy_call', (['lookup[s]'], {}), '(lookup[s])\n', (114865, 114876), True, 'import parser_combinator as c\n'), ((3669, 3683), 'tokenlib.Etok.etok', 'Etok.etok', (['acc'], {}), '(acc)\n', (3678, 3683), False, 'from tokenlib import Etok\n'), ((6423, 6440), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (6432, 6440), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((6680, 6697), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (6689, 6697), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7938, 7956), 'parser_combinator.next_word', 'next_word', (['"""exist"""'], {}), "('exist')\n", (7947, 7956), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8244, 8286), 'parser_combinator.first_phrase', 'first_phrase', (["['is', 'are', 'be', 'to be']"], {}), "(['is', 'are', 'be', 'to be'])\n", (8256, 8286), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9011, 9028), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (9020, 9028), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((10806, 10892), 'parser_combinator.Parse.first', 'Parse.first', (["[local_lit_dict['document'], lit_dict['theorem'], lit_dict['axiom']]"], {}), "([local_lit_dict['document'], lit_dict['theorem'], lit_dict[\n 'axiom']])\n", (10817, 10892), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((11040, 11090), 'parser_combinator.first_phrase', 'first_phrase', (["['each and every', 'some and every']"], {}), "(['each and every', 'some and every'])\n", (11052, 11090), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((11092, 11132), 'parser_combinator.first_word', 'first_word', (['"""every each all any some no"""'], {}), "('every each all any some no')\n", (11102, 11132), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((12822, 12884), 'exception.ErrorItem', 'ErrorItem', (['item', 'primitive_nonterminal', '"""undeclared primitive"""'], {}), "(item, primitive_nonterminal, 'undeclared primitive')\n", (12831, 12884), False, 'from exception import ParseError, ParseNoCatch, ErrorItem\n'), ((15082, 15100), 'parser_combinator.can_wordify', 'c.can_wordify', (['tok'], {}), '(tok)\n', (15095, 15100), True, 'import parser_combinator as c\n'), ((15171, 15195), 'parser_combinator.plus_andcomma', 'c.plus_andcomma', (['synlist'], {}), '(synlist)\n', (15186, 15195), True, 'import parser_combinator as c\n'), ((16690, 16708), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (16706, 16708), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((21939, 21954), 'parser_combinator.next_value', 'next_value', (['"""_"""'], {}), "('_')\n", (21949, 21954), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((37117, 37140), 'parser_combinator.next_type', 'c.next_type', (['"""COERCION"""'], {}), "('COERCION')\n", (37128, 37140), True, 'import parser_combinator as c\n'), ((44361, 44384), 'parser_combinator.next_type', 'c.next_type', (['"""APPLYSUB"""'], {}), "('APPLYSUB')\n", (44372, 44384), True, 'import parser_combinator as c\n'), ((49022, 49037), 'lib.fflatten', 'lib.fflatten', (['a'], {}), '(a)\n', (49034, 49037), False, 'import lib, word_lists\n'), ((49161, 49179), 'parser_combinator.next_word', 'c.next_word', (['"""end"""'], {}), "('end')\n", (49172, 49179), True, 'import parser_combinator as c\n'), ((50122, 50137), 'lib.fflatten', 'lib.fflatten', (['b'], {}), '(b)\n', (50134, 50137), False, 'import lib, word_lists\n'), ((50315, 50331), 'parser_combinator.next_word', 'next_word', (['"""end"""'], {}), "('end')\n", (50324, 50331), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((50696, 50711), 'lib.fflatten', 'lib.fflatten', (['b'], {}), '(b)\n', (50708, 50711), False, 'import lib, word_lists\n'), ((50909, 50925), 'parser_combinator.next_word', 'next_word', (['"""end"""'], {}), "('end')\n", (50918, 50925), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((54613, 54631), 'parser_combinator.next_word', 'next_word', (['"""where"""'], {}), "('where')\n", (54622, 54631), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((57217, 57236), 'lib.fflatten', 'lib.fflatten', (['etoks'], {}), '(etoks)\n', (57229, 57236), False, 'import lib, word_lists\n'), ((60114, 60131), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (60123, 60131), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((60337, 60363), 'parser_combinator.next_phrase', 'c.next_phrase', (['"""such that"""'], {}), "('such that')\n", (60350, 60363), True, 'import parser_combinator as c\n'), ((78623, 78639), 'lib.fflatten', 'lib.fflatten', (['c1'], {}), '(c1)\n', (78635, 78639), False, 'import lib, word_lists\n'), ((78845, 78863), 'parser_combinator.next_word', 'c.next_word', (['"""end"""'], {}), "('end')\n", (78856, 78863), True, 'import parser_combinator as c\n'), ((83971, 83988), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (83980, 83988), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((86211, 86237), 'parser_combinator.singularize', 'c.singularize', (['token.value'], {}), '(token.value)\n', (86224, 86237), True, 'import parser_combinator as c\n'), ((94741, 94764), 'lib.fflatten', 'lib.fflatten', (['(vws + [v])'], {}), '(vws + [v])\n', (94753, 94764), False, 'import lib, word_lists\n'), ((113911, 113926), 'parser_combinator.next_value', 'next_value', (['"""="""'], {}), "('=')\n", (113921, 113926), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((3858, 3876), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (3874, 3876), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((4010, 4022), 'parser_combinator.balanced', 'c.balanced', ([], {}), '()\n', (4020, 4022), True, 'import parser_combinator as c\n'), ((5575, 5590), 'parser_combinator.next_any_word', 'next_any_word', ([], {}), '()\n', (5588, 5590), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((5681, 5713), 'exception.ErrorItem', 'ErrorItem', (['item', '"""next_word_net"""'], {}), "(item, 'next_word_net')\n", (5690, 5713), False, 'from exception import ParseError, ParseNoCatch, ErrorItem\n'), ((5812, 5837), 'tokenlib.update', 'tokenlib.update', (['[]', 'item'], {}), '([], item)\n', (5827, 5837), False, 'import tokenlib\n'), ((6598, 6613), 'parser_combinator.next_word', 'next_word', (['"""we"""'], {}), "('we')\n", (6607, 6613), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8902, 8917), 'parser_combinator.next_word', 'next_word', (['"""we"""'], {}), "('we')\n", (8911, 8917), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((17368, 17408), 'parser_combinator.bracket', 'c.bracket', (['Instruction._keyword_instruct'], {}), '(Instruction._keyword_instruct)\n', (17377, 17408), True, 'import parser_combinator as c\n'), ((19163, 19181), 'parser_combinator.next_type', 'c.next_type', (['"""VAR"""'], {}), "('VAR')\n", (19174, 19181), True, 'import parser_combinator as c\n'), ((27765, 27785), 'parser_combinator.next_word', 'next_word', (['"""holding"""'], {}), "('holding')\n", (27774, 27785), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((34044, 34058), 'tokenlib.Etok.etok', 'Etok.etok', (['acc'], {}), '(acc)\n', (34053, 34058), False, 'from tokenlib import Etok\n'), ((34570, 34594), 'parser_combinator.lazy_call', 'c.lazy_call', (['binder_type'], {}), '(binder_type)\n', (34581, 34594), True, 'import parser_combinator as c\n'), ((36765, 36780), 'parser_combinator.next_word', 'next_word', (['"""by"""'], {}), "('by')\n", (36774, 36780), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((40793, 40811), 'parser_combinator.next_word', 'next_word', (['"""proof"""'], {}), "('proof')\n", (40802, 40811), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((40812, 40829), 'parser_combinator.next_value', 'c.next_value', (['"""_"""'], {}), "('_')\n", (40824, 40829), True, 'import parser_combinator as c\n'), ((47475, 47492), 'parser_combinator.next_word', 'next_word', (['"""make"""'], {}), "('make')\n", (47484, 47492), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((48683, 48704), 'parser_combinator.next_type', 'c.next_type', (['"""ASSIGN"""'], {}), "('ASSIGN')\n", (48694, 48704), True, 'import parser_combinator as c\n'), ((49109, 49128), 'parser_combinator.next_word', 'c.next_word', (['"""case"""'], {}), "('case')\n", (49120, 49128), True, 'import parser_combinator as c\n'), ((49130, 49159), 'parser_combinator.balanced_condition', 'c.balanced_condition', (['not_end'], {}), '(not_end)\n', (49150, 49159), True, 'import parser_combinator as c\n'), ((49723, 49744), 'parser_combinator.next_type', 'c.next_type', (['"""ASSIGN"""'], {}), "('ASSIGN')\n", (49734, 49744), True, 'import parser_combinator as c\n'), ((50283, 50312), 'parser_combinator.balanced_condition', 'c.balanced_condition', (['not_end'], {}), '(not_end)\n', (50303, 50312), True, 'import parser_combinator as c\n'), ((50865, 50894), 'parser_combinator.balanced_condition', 'c.balanced_condition', (['not_end'], {}), '(not_end)\n', (50885, 50894), True, 'import parser_combinator as c\n'), ((52579, 52594), 'parser_combinator.next_word', 'next_word', (['"""in"""'], {}), "('in')\n", (52588, 52594), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((53131, 53148), 'parser_combinator.next_word', 'next_word', (['"""else"""'], {}), "('else')\n", (53140, 53148), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((55836, 55852), 'parser_combinator.next_word', 'next_word', (['"""the"""'], {}), "('the')\n", (55845, 55852), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((63307, 63321), 'lib.flatten', 'lib.flatten', (['m'], {}), '(m)\n', (63318, 63321), False, 'import lib, word_lists\n'), ((65503, 65524), 'parser_combinator.next_type', 'c.next_type', (['"""ASSIGN"""'], {}), "('ASSIGN')\n", (65514, 65524), True, 'import parser_combinator as c\n'), ((66190, 66214), 'parser_combinator.lazy_call', 'c.lazy_call', (['binder_prop'], {}), '(binder_prop)\n', (66201, 66214), True, 'import parser_combinator as c\n'), ((67587, 67602), 'parser_combinator.next_word', 'next_word', (['"""no"""'], {}), "('no')\n", (67596, 67602), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((72237, 72278), 'parser_combinator.lazy_call', 'c.lazy_call', (['pseudoterm_without_attribute'], {}), '(pseudoterm_without_attribute)\n', (72248, 72278), True, 'import parser_combinator as c\n'), ((73427, 73443), 'parser_combinator.next_word', 'next_word', (['"""the"""'], {}), "('the')\n", (73436, 73443), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((74506, 74535), 'parser_combinator.lazy_call', 'c.lazy_call', (['symbol_statement'], {}), '(symbol_statement)\n', (74517, 74535), True, 'import parser_combinator as c\n'), ((74631, 74660), 'parser_combinator.lazy_call', 'c.lazy_call', (['symbol_statement'], {}), '(symbol_statement)\n', (74642, 74660), True, 'import parser_combinator as c\n'), ((74697, 74713), 'parser_combinator.next_word', 'next_word', (['"""not"""'], {}), "('not')\n", (74706, 74713), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((74716, 74745), 'parser_combinator.lazy_call', 'c.lazy_call', (['symbol_statement'], {}), '(symbol_statement)\n', (74727, 74745), True, 'import parser_combinator as c\n'), ((74785, 74814), 'parser_combinator.lazy_call', 'c.lazy_call', (['symbol_statement'], {}), '(symbol_statement)\n', (74796, 74814), True, 'import parser_combinator as c\n'), ((77831, 77846), 'parser_combinator.next_word', 'next_word', (['"""we"""'], {}), "('we')\n", (77840, 77846), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((77860, 77882), 'parser_combinator.next_word', 'next_word', (['"""introduce"""'], {}), "('introduce')\n", (77869, 77882), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((78813, 78842), 'parser_combinator.balanced_condition', 'c.balanced_condition', (['not_end'], {}), '(not_end)\n', (78833, 78842), True, 'import parser_combinator as c\n'), ((80186, 80201), 'parser_combinator.brace_semif', 'c.brace_semif', ([], {}), '()\n', (80199, 80201), True, 'import parser_combinator as c\n'), ((80729, 80747), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (80745, 80747), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((82662, 82674), 'tokenlib.Etok.etok', 'Etok.etok', (['l'], {}), '(l)\n', (82671, 82674), False, 'from tokenlib import Etok\n'), ((83372, 83393), 'parser_combinator.first_word', 'first_word', (['"""fix let"""'], {}), "('fix let')\n", (83382, 83393), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((87774, 87810), 'parser_combinator.next_phrase', 'next_phrase', (['"""we proceed as follows"""'], {}), "('we proceed as follows')\n", (87785, 87810), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((88030, 88072), 'parser_combinator.next_phrase', 'next_phrase', (['"""the other cases are similar"""'], {}), "('the other cases are similar')\n", (88041, 88072), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((89036, 89051), 'parser_combinator.next_word', 'next_word', (['"""by"""'], {}), "('by')\n", (89045, 89051), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((90406, 90425), 'parser_combinator.next_word', 'next_word', (['"""indeed"""'], {}), "('indeed')\n", (90415, 90425), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((95317, 95333), 'parser_combinator.next_word', 'next_word', (['"""the"""'], {}), "('the')\n", (95326, 95333), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((100211, 100232), 'parser_combinator.next_type', 'c.next_type', (['"""SYMBOL"""'], {}), "('SYMBOL')\n", (100222, 100232), True, 'import parser_combinator as c\n'), ((103238, 103254), 'parser_combinator.next_value', 'next_value', (['""":="""'], {}), "(':=')\n", (103248, 103254), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((105423, 105446), 'parser_combinator.next_word', 'next_word', (['"""classifier"""'], {}), "('classifier')\n", (105432, 105446), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((112536, 112552), 'parser_combinator.next_word', 'next_word', (['"""and"""'], {}), "('and')\n", (112545, 112552), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((7906, 7924), 'parser_combinator.next_word', 'next_word', (['"""there"""'], {}), "('there')\n", (7915, 7924), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((8286, 8302), 'parser_combinator.next_word', 'next_word', (['"""the"""'], {}), "('the')\n", (8295, 8302), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((15119, 15137), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (15135, 15137), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((21143, 21159), 'parser_combinator.next_value', 'next_value', (['""":="""'], {}), "(':=')\n", (21153, 21159), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((33402, 33421), 'parser_combinator.next_type', 'c.next_type', (['"""TMID"""'], {}), "('TMID')\n", (33413, 33421), True, 'import parser_combinator as c\n'), ((38907, 38925), 'parser_combinator.next_type', 'c.next_type', (['"""ALT"""'], {}), "('ALT')\n", (38918, 38925), True, 'import parser_combinator as c\n'), ((42842, 42860), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (42858, 42860), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((45463, 45481), 'parser_combinator.next_type', 'c.next_type', (['"""MID"""'], {}), "('MID')\n", (45474, 45481), True, 'import parser_combinator as c\n'), ((48635, 48653), 'parser_combinator.next_type', 'c.next_type', (['"""ALT"""'], {}), "('ALT')\n", (48646, 48653), True, 'import parser_combinator as c\n'), ((49691, 49709), 'parser_combinator.next_type', 'c.next_type', (['"""ALT"""'], {}), "('ALT')\n", (49702, 49709), True, 'import parser_combinator as c\n'), ((50251, 50268), 'parser_combinator.next_word', 'next_word', (['"""with"""'], {}), "('with')\n", (50260, 50268), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((51797, 51818), 'parser_combinator.next_type', 'c.next_type', (['"""MAPSTO"""'], {}), "('MAPSTO')\n", (51808, 51818), True, 'import parser_combinator as c\n'), ((52076, 52097), 'parser_combinator.next_type', 'c.next_type', (['"""ASSIGN"""'], {}), "('ASSIGN')\n", (52087, 52097), True, 'import parser_combinator as c\n'), ((73198, 73216), 'parser_combinator.next_word', 'next_word', (['"""there"""'], {}), "('there')\n", (73207, 73216), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((82746, 82761), 'parser_combinator.next_word', 'next_word', (['"""be"""'], {}), "('be')\n", (82755, 82761), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((82791, 82809), 'parser_combinator.next_word', 'next_word', (['"""fixed"""'], {}), "('fixed')\n", (82800, 82809), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((87990, 88010), 'parser_combinator.next_word', 'next_word', (['"""follows"""'], {}), "('follows')\n", (87999, 88010), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((88092, 88119), 'parser_combinator.next_phrase', 'next_phrase', (['"""the proof is"""'], {}), "('the proof is')\n", (88103, 88119), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((88121, 88163), 'parser_combinator.first_word', 'first_word', (['"""obvious trivial easy routine"""'], {}), "('obvious trivial easy routine')\n", (88131, 88163), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((90700, 90717), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (90709, 90717), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((92521, 92555), 'parser_combinator.lazy_call', 'c.lazy_call', (['Proof_step.proof_body'], {}), '(Proof_step.proof_body)\n', (92532, 92555), True, 'import parser_combinator as c\n'), ((93044, 93079), 'parser_combinator.next_any_word_except', 'c.next_any_word_except', (['pattern_key'], {}), '(pattern_key)\n', (93066, 93079), True, 'import parser_combinator as c\n'), ((100359, 100380), 'parser_combinator.next_type', 'c.next_type', (['"""SYMBOL"""'], {}), "('SYMBOL')\n", (100370, 100380), True, 'import parser_combinator as c\n'), ((102692, 102714), 'parser_combinator.next_phrase', 'next_phrase', (['"""in this"""'], {}), "('in this')\n", (102703, 102714), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((103768, 103784), 'parser_combinator.next_value', 'next_value', (['""":="""'], {}), "(':=')\n", (103778, 103784), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((3956, 3974), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (3972, 3974), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9654, 9683), 'parser_combinator.first_word', 'first_word', (['"""record register"""'], {}), "('record register')\n", (9664, 9683), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9760, 9778), 'parser_combinator.Parse.word', 'Parse.word', (['"""that"""'], {}), "('that')\n", (9770, 9778), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((12510, 12528), 'parser_combinator.Parse.next_token', 'Parse.next_token', ([], {}), '()\n', (12526, 12528), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((17302, 17322), 'parser_combinator.next_word', 'next_word', (['"""synonym"""'], {}), "('synonym')\n", (17311, 17322), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((36664, 36685), 'parser_combinator.next_word', 'next_word', (['"""quotient"""'], {}), "('quotient')\n", (36673, 36685), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((41956, 41978), 'parser_combinator.next_word', 'next_word', (['"""structure"""'], {}), "('structure')\n", (41965, 41978), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((42084, 42101), 'parser_combinator.next_word', 'next_word', (['"""with"""'], {}), "('with')\n", (42093, 42101), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((50215, 50233), 'parser_combinator.next_word', 'next_word', (['"""match"""'], {}), "('match')\n", (50224, 50233), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((50791, 50812), 'parser_combinator.next_word', 'next_word', (['"""function"""'], {}), "('function')\n", (50800, 50812), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((52040, 52056), 'parser_combinator.next_word', 'next_word', (['"""fun"""'], {}), "('fun')\n", (52049, 52056), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((52522, 52543), 'parser_combinator.next_type', 'c.next_type', (['"""ASSIGN"""'], {}), "('ASSIGN')\n", (52533, 52543), True, 'import parser_combinator as c\n'), ((53078, 53095), 'parser_combinator.next_word', 'next_word', (['"""then"""'], {}), "('then')\n", (53087, 53095), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((72111, 72128), 'parser_combinator.next_word', 'next_word', (['"""type"""'], {}), "('type')\n", (72120, 72128), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((73230, 73245), 'parser_combinator.next_word', 'next_word', (['"""no"""'], {}), "('no')\n", (73239, 73245), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((78066, 78098), 'parser_combinator.balanced_condition', 'c.balanced_condition', (['not_period'], {}), '(not_period)\n', (78086, 78098), True, 'import parser_combinator as c\n'), ((82700, 82716), 'parser_combinator.next_word', 'next_word', (['"""let"""'], {}), "('let')\n", (82709, 82716), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((90327, 90345), 'parser_combinator.next_word', 'next_word', (['"""proof"""'], {}), "('proof')\n", (90336, 90345), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((91767, 91784), 'parser_combinator.next_word', 'next_word', (['"""case"""'], {}), "('case')\n", (91776, 91784), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((95572, 95587), 'parser_combinator.next_word', 'next_word', (['"""is"""'], {}), "('is')\n", (95581, 95587), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((95954, 95969), 'parser_combinator.next_word', 'next_word', (['"""is"""'], {}), "('is')\n", (95963, 95969), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((97044, 97060), 'parser_combinator.next_word', 'next_word', (['"""are"""'], {}), "('are')\n", (97053, 97060), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((99646, 99676), 'parser_combinator.next_phrase', 'next_phrase', (['"""with precedence"""'], {}), "('with precedence')\n", (99657, 99676), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((104411, 104430), 'parser_combinator.next_word', 'next_word', (['"""define"""'], {}), "('define')\n", (104420, 104430), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((104757, 104800), 'parser_combinator.next_any_word_except', 'c.next_any_word_except', (["['is', 'are', 'be']"], {}), "(['is', 'are', 'be'])\n", (104779, 104800), True, 'import parser_combinator as c\n'), ((106833, 106858), 'parser_combinator.next_phrase', 'c.next_phrase', (['"""the type"""'], {}), "('the type')\n", (106846, 106858), True, 'import parser_combinator as c\n'), ((9702, 9730), 'parser_combinator.Parse.word', 'Parse.word', (['"""identification"""'], {}), "('identification')\n", (9712, 9730), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((52470, 52486), 'parser_combinator.next_word', 'next_word', (['"""let"""'], {}), "('let')\n", (52479, 52486), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((53020, 53035), 'parser_combinator.next_word', 'next_word', (['"""if"""'], {}), "('if')\n", (53029, 53035), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((59402, 59418), 'parser_combinator.next_word', 'next_word', (['"""non"""'], {}), "('non')\n", (59411, 59418), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((65437, 65453), 'parser_combinator.next_word', 'next_word', (['"""fun"""'], {}), "('fun')\n", (65446, 65453), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((77065, 77082), 'parser_combinator.next_word', 'next_word', (['"""then"""'], {}), "('then')\n", (77074, 77082), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((78722, 78746), 'parser_combinator.next_word', 'c.next_word', (['"""inductive"""'], {}), "('inductive')\n", (78733, 78746), True, 'import parser_combinator as c\n'), ((80106, 80127), 'parser_combinator.next_word', 'next_word', (['"""moreover"""'], {}), "('moreover')\n", (80115, 80127), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((85436, 85457), 'parser_combinator.next_word', 'next_word', (['"""moreover"""'], {}), "('moreover')\n", (85445, 85457), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((87830, 87846), 'parser_combinator.next_word', 'next_word', (['"""the"""'], {}), "('the')\n", (87839, 87846), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((87867, 87923), 'parser_combinator.first_word', 'first_word', (['"""result lemma theorem proposition corollary"""'], {}), "('result lemma theorem proposition corollary')\n", (87877, 87923), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((88627, 88642), 'parser_combinator.next_word', 'next_word', (['"""by"""'], {}), "('by')\n", (88636, 88642), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((89072, 89120), 'parser_combinator.first_phrase', 'first_phrase', (["['contradiction', 'case analysis']"], {}), "(['contradiction', 'case analysis'])\n", (89084, 89120), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((90766, 90783), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (90775, 90783), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((95989, 96008), 'parser_combinator.next_word', 'next_word', (['"""called"""'], {}), "('called')\n", (95998, 96008), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((97076, 97095), 'parser_combinator.next_word', 'next_word', (['"""called"""'], {}), "('called')\n", (97085, 97095), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((98946, 98966), 'parser_combinator.next_type', 'c.next_type', (['"""BLANK"""'], {}), "('BLANK')\n", (98957, 98966), True, 'import parser_combinator as c\n'), ((105324, 105340), 'parser_combinator.next_word', 'next_word', (['"""let"""'], {}), "('let')\n", (105333, 105340), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((9608, 9624), 'parser_combinator.Parse.word', 'Parse.word', (['"""we"""'], {}), "('we')\n", (9618, 9624), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((28696, 28716), 'parser_combinator.next_type', 'c.next_type', (['"""ARROW"""'], {}), "('ARROW')\n", (28707, 28716), True, 'import parser_combinator as c\n'), ((36688, 36703), 'parser_combinator.next_word', 'next_word', (['"""of"""'], {}), "('of')\n", (36697, 36703), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((76871, 76887), 'parser_combinator.next_word', 'next_word', (['"""for"""'], {}), "('for')\n", (76880, 76887), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((87943, 87959), 'parser_combinator.next_word', 'next_word', (['"""now"""'], {}), "('now')\n", (87952, 87959), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((99679, 99701), 'parser_combinator.next_type', 'c.next_type', (['"""INTEGER"""'], {}), "('INTEGER')\n", (99690, 99701), True, 'import parser_combinator as c\n'), ((99799, 99825), 'parser_combinator.next_word', 'next_word', (['"""associativity"""'], {}), "('associativity')\n", (99808, 99825), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((69092, 69113), 'parser_combinator.next_word', 'next_word', (['"""pairwise"""'], {}), "('pairwise')\n", (69101, 69113), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((77008, 77023), 'parser_combinator.next_word', 'next_word', (['"""if"""'], {}), "('if')\n", (77017, 77023), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((89139, 89161), 'parser_combinator.next_word', 'next_word', (['"""induction"""'], {}), "('induction')\n", (89148, 89161), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((91440, 91461), 'parser_combinator.next_word', 'next_word', (['"""moreover"""'], {}), "('moreover')\n", (91449, 91461), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((93799, 93814), 'parser_combinator.next_word', 'next_word', (['"""or"""'], {}), "('or')\n", (93808, 93814), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((99739, 99755), 'parser_combinator.next_word', 'next_word', (['"""and"""'], {}), "('and')\n", (99748, 99755), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((109200, 109229), 'parser_combinator.next_phrase', 'next_phrase', (['"""let the binder"""'], {}), "('let the binder')\n", (109211, 109229), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((41890, 41913), 'parser_combinator.next_word', 'next_word', (['"""notational"""'], {}), "('notational')\n", (41899, 41913), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((89246, 89263), 'parser_combinator.next_word', 'next_word', (['"""that"""'], {}), "('that')\n", (89255, 89263), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n'), ((89183, 89198), 'parser_combinator.next_word', 'next_word', (['"""on"""'], {}), "('on')\n", (89192, 89198), False, 'from parser_combinator import Parse, next_word, next_any_word, next_value, first_word, first_phrase, next_phrase, pstream\n')]
|
from pyraf import iraf
import glob, os
import numpy as np
import pylab as py
import math, datetime
import pyfits
from gcwork import objects
from . import dar
def diffDarOnOff(cleanDir1, cleanDir2):
files1tmp = glob.glob(cleanDir1 + '/c????.fits')
files2tmp = glob.glob(cleanDir2 + '/c????.fits')
for f1 in files1tmp:
cname1 = f1.split('/')[-1]
for f2 in files2tmp:
cname2 = f2.split('/')[-1]
if (cname1 == cname2):
outname = cname1.replace('c', 'diff')
print('IMARITH: %s - %s = %s' % (cname1, cname2, outname))
if (os.path.exists(outname)):
iraf.imdelete(outname)
iraf.imarith(f1, '-', f2, outname)
def plotScalePosangOverNight(alignRoot, imgDir):
# Read in the list of images used in the alignment
listFile = open(alignRoot+'.list', 'r')
parang = []
for line in listFile:
_data = line.split()
lisFile = _data[0].split('/')[-1]
if (lisFile.startswith('mag')):
continue
fitsFile = imgDir + lisFile.split('_')[0] + '.fits'
# Get header info
hdr = pyfits.getheader( fitsFile )
parang.append( hdr['PARANG'] )
parang = np.array(parang)
numEpochs = len(parang)
# Load scales/angles
scale = np.zeros(numEpochs, float)
angle = np.zeros(numEpochs, float)
sgrax = np.zeros(numEpochs, float)
sgray = np.zeros(numEpochs, float)
scaleErr = np.zeros(numEpochs, float)
angleErr = np.zeros(numEpochs, float)
sgraxErr = np.zeros(numEpochs, float)
sgrayErr = np.zeros(numEpochs, float)
imgPA = np.zeros(numEpochs, float)
for e in range(numEpochs):
trans = objects.Transform()
trans.loadFromAbsolute(root='./', align=alignRoot + '.trans', idx=e+1)
trans.linearToSpherical(silent=1, override=False)
scale[e] = trans.scale
angle[e] = math.degrees(trans.angle)
scale *= 9.96
py.clf()
py.subplot(2, 1, 1)
py.plot(parang, scale, 'k.')
py.ylabel('Plate Scale (mas/pix)')
py.xlabel('Parallactic Angle (deg)')
py.title('Relative Transformation')
py.subplot(2, 1, 2)
py.plot(parang, angle, 'k.')
py.ylabel('Position Angle (deg)')
py.xlabel('Parallactic Angle (deg)')
py.savefig('plots/scale_pa_vs_parang.png')
def plotDarCoeffsVsZenith():
effWave = 2.12 # microns
utc = datetime.datetime(2008, 6, 15, 0, 0, 0)
utc2hst = datetime.timedelta(hours=-10)
hst = utc + utc2hst
(refA, refB) = dar.keckDARcoeffs(effWave, hst.year, hst.month, hst.day,
hst.hour, hst.minute)
elevation = np.arange(30.0, 90.0, 1.0)
tanz = np.tan((90.0 - elevation) * math.pi / 180.0)
tmp = 1.0 + tanz**2
darCoeffL = tmp * (refA + 3.0 * refB * tanz**2)
darCoeffQ = -tmp * (refA*tanz +
3.0 * refB * (tanz + 2.0*tanz**3))
# Convert DAR coefficients for use with arcseconds
darCoeffL *= 1.0
darCoeffQ *= 1.0 / 206265.0
# 1" sep
linear1 = darCoeffL * 1.0 * 10**3 # in mas
quadra1 = darCoeffQ * 1.0**2 * 10**3 # in mas
# 10" sep
linear2 = darCoeffL * 10.0 * 10**3 # in mas
quadra2 = darCoeffQ * 10.0**2 * 10**3 # in mas
# 60" sep
linear3 = darCoeffL * 60.0 * 10**3 # in mas
quadra3 = darCoeffQ * 60.0**2 * 10**3 # in mas
print(' Linear(mas) Quardatic(mas)')
print('1" sep %12.7f %12.7f' % (linear1.mean(), quadra1.mean()))
print('10" sep %12.7f %12.7f' % (linear2.mean(), quadra2.mean()))
print('60" sep %12.7f %12.7f' % (linear3.mean(), quadra3.mean()))
py.clf()
py.semilogy(elevation, linear1, 'r-')
py.semilogy(elevation, -quadra1, 'r--')
py.semilogy(elevation, linear2, 'b-')
py.semilogy(elevation, -quadra2, 'b--')
py.semilogy(elevation, linear3, 'g-')
py.semilogy(elevation, -quadra3, 'g--')
py.legend(('1" lin', '1" quad',
'10" lin', '10" quad', '60" lin', '60" quad'), loc='lower left')
py.xlabel('Elevation (deg)')
py.ylabel('Delta-R (mas)')
py.savefig('dar_linear_vs_quad_terms.png')
py.savefig('dar_linear_vs_quad_terms.eps')
|
[
"pyraf.iraf.imarith",
"numpy.arange",
"glob.glob",
"pylab.title",
"pylab.ylabel",
"os.path.exists",
"numpy.tan",
"datetime.timedelta",
"pylab.xlabel",
"pylab.legend",
"gcwork.objects.Transform",
"datetime.datetime",
"pylab.subplot",
"pylab.savefig",
"math.degrees",
"pyraf.iraf.imdelete",
"pylab.semilogy",
"numpy.zeros",
"pyfits.getheader",
"numpy.array",
"pylab.clf",
"pylab.plot"
] |
[((215, 251), 'glob.glob', 'glob.glob', (["(cleanDir1 + '/c????.fits')"], {}), "(cleanDir1 + '/c????.fits')\n", (224, 251), False, 'import glob, os\n'), ((268, 304), 'glob.glob', 'glob.glob', (["(cleanDir2 + '/c????.fits')"], {}), "(cleanDir2 + '/c????.fits')\n", (277, 304), False, 'import glob, os\n'), ((1292, 1308), 'numpy.array', 'np.array', (['parang'], {}), '(parang)\n', (1300, 1308), True, 'import numpy as np\n'), ((1375, 1401), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1383, 1401), True, 'import numpy as np\n'), ((1414, 1440), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1422, 1440), True, 'import numpy as np\n'), ((1453, 1479), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1461, 1479), True, 'import numpy as np\n'), ((1492, 1518), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1500, 1518), True, 'import numpy as np\n'), ((1534, 1560), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1542, 1560), True, 'import numpy as np\n'), ((1576, 1602), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1584, 1602), True, 'import numpy as np\n'), ((1618, 1644), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1626, 1644), True, 'import numpy as np\n'), ((1660, 1686), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1668, 1686), True, 'import numpy as np\n'), ((1699, 1725), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1707, 1725), True, 'import numpy as np\n'), ((2044, 2052), 'pylab.clf', 'py.clf', ([], {}), '()\n', (2050, 2052), True, 'import pylab as py\n'), ((2057, 2076), 'pylab.subplot', 'py.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2067, 2076), True, 'import pylab as py\n'), ((2081, 2109), 'pylab.plot', 'py.plot', (['parang', 'scale', '"""k."""'], {}), "(parang, scale, 'k.')\n", (2088, 2109), True, 'import pylab as py\n'), ((2114, 2148), 'pylab.ylabel', 'py.ylabel', (['"""Plate Scale (mas/pix)"""'], {}), "('Plate Scale (mas/pix)')\n", (2123, 2148), True, 'import pylab as py\n'), ((2153, 2189), 'pylab.xlabel', 'py.xlabel', (['"""Parallactic Angle (deg)"""'], {}), "('Parallactic Angle (deg)')\n", (2162, 2189), True, 'import pylab as py\n'), ((2194, 2229), 'pylab.title', 'py.title', (['"""Relative Transformation"""'], {}), "('Relative Transformation')\n", (2202, 2229), True, 'import pylab as py\n'), ((2235, 2254), 'pylab.subplot', 'py.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2245, 2254), True, 'import pylab as py\n'), ((2259, 2287), 'pylab.plot', 'py.plot', (['parang', 'angle', '"""k."""'], {}), "(parang, angle, 'k.')\n", (2266, 2287), True, 'import pylab as py\n'), ((2292, 2325), 'pylab.ylabel', 'py.ylabel', (['"""Position Angle (deg)"""'], {}), "('Position Angle (deg)')\n", (2301, 2325), True, 'import pylab as py\n'), ((2330, 2366), 'pylab.xlabel', 'py.xlabel', (['"""Parallactic Angle (deg)"""'], {}), "('Parallactic Angle (deg)')\n", (2339, 2366), True, 'import pylab as py\n'), ((2372, 2414), 'pylab.savefig', 'py.savefig', (['"""plots/scale_pa_vs_parang.png"""'], {}), "('plots/scale_pa_vs_parang.png')\n", (2382, 2414), True, 'import pylab as py\n'), ((2485, 2524), 'datetime.datetime', 'datetime.datetime', (['(2008)', '(6)', '(15)', '(0)', '(0)', '(0)'], {}), '(2008, 6, 15, 0, 0, 0)\n', (2502, 2524), False, 'import math, datetime\n'), ((2539, 2568), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(-10)'}), '(hours=-10)\n', (2557, 2568), False, 'import math, datetime\n'), ((2746, 2772), 'numpy.arange', 'np.arange', (['(30.0)', '(90.0)', '(1.0)'], {}), '(30.0, 90.0, 1.0)\n', (2755, 2772), True, 'import numpy as np\n'), ((2784, 2828), 'numpy.tan', 'np.tan', (['((90.0 - elevation) * math.pi / 180.0)'], {}), '((90.0 - elevation) * math.pi / 180.0)\n', (2790, 2828), True, 'import numpy as np\n'), ((3753, 3761), 'pylab.clf', 'py.clf', ([], {}), '()\n', (3759, 3761), True, 'import pylab as py\n'), ((3766, 3803), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear1', '"""r-"""'], {}), "(elevation, linear1, 'r-')\n", (3777, 3803), True, 'import pylab as py\n'), ((3808, 3847), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra1)', '"""r--"""'], {}), "(elevation, -quadra1, 'r--')\n", (3819, 3847), True, 'import pylab as py\n'), ((3853, 3890), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear2', '"""b-"""'], {}), "(elevation, linear2, 'b-')\n", (3864, 3890), True, 'import pylab as py\n'), ((3895, 3934), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra2)', '"""b--"""'], {}), "(elevation, -quadra2, 'b--')\n", (3906, 3934), True, 'import pylab as py\n'), ((3940, 3977), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear3', '"""g-"""'], {}), "(elevation, linear3, 'g-')\n", (3951, 3977), True, 'import pylab as py\n'), ((3982, 4021), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra3)', '"""g--"""'], {}), "(elevation, -quadra3, 'g--')\n", (3993, 4021), True, 'import pylab as py\n'), ((4027, 4127), 'pylab.legend', 'py.legend', (['(\'1" lin\', \'1" quad\', \'10" lin\', \'10" quad\', \'60" lin\', \'60" quad\')'], {'loc': '"""lower left"""'}), '((\'1" lin\', \'1" quad\', \'10" lin\', \'10" quad\', \'60" lin\',\n \'60" quad\'), loc=\'lower left\')\n', (4036, 4127), True, 'import pylab as py\n'), ((4144, 4172), 'pylab.xlabel', 'py.xlabel', (['"""Elevation (deg)"""'], {}), "('Elevation (deg)')\n", (4153, 4172), True, 'import pylab as py\n'), ((4177, 4203), 'pylab.ylabel', 'py.ylabel', (['"""Delta-R (mas)"""'], {}), "('Delta-R (mas)')\n", (4186, 4203), True, 'import pylab as py\n'), ((4209, 4251), 'pylab.savefig', 'py.savefig', (['"""dar_linear_vs_quad_terms.png"""'], {}), "('dar_linear_vs_quad_terms.png')\n", (4219, 4251), True, 'import pylab as py\n'), ((4256, 4298), 'pylab.savefig', 'py.savefig', (['"""dar_linear_vs_quad_terms.eps"""'], {}), "('dar_linear_vs_quad_terms.eps')\n", (4266, 4298), True, 'import pylab as py\n'), ((1210, 1236), 'pyfits.getheader', 'pyfits.getheader', (['fitsFile'], {}), '(fitsFile)\n', (1226, 1236), False, 'import pyfits\n'), ((1774, 1793), 'gcwork.objects.Transform', 'objects.Transform', ([], {}), '()\n', (1791, 1793), False, 'from gcwork import objects\n'), ((1994, 2019), 'math.degrees', 'math.degrees', (['trans.angle'], {}), '(trans.angle)\n', (2006, 2019), False, 'import math, datetime\n'), ((621, 644), 'os.path.exists', 'os.path.exists', (['outname'], {}), '(outname)\n', (635, 644), False, 'import glob, os\n'), ((706, 740), 'pyraf.iraf.imarith', 'iraf.imarith', (['f1', '"""-"""', 'f2', 'outname'], {}), "(f1, '-', f2, outname)\n", (718, 740), False, 'from pyraf import iraf\n'), ((667, 689), 'pyraf.iraf.imdelete', 'iraf.imdelete', (['outname'], {}), '(outname)\n', (680, 689), False, 'from pyraf import iraf\n')]
|
import re
import urllib
from collections import OrderedDict
from django.http import HttpResponseRedirect, FileResponse
from django.utils.text import slugify
from rest_framework import viewsets, renderers, mixins
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.reverse import reverse
from capapi.middleware import add_cache_header
from capdb import models
from capapi import serializers, filters, permissions, pagination
from capapi import renderers as capapi_renderers
from capdb.models import Citation
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
)
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
SearchFilterBackend,
)
from django_elasticsearch_dsl_drf.viewsets import BaseDocumentViewSet
from django_elasticsearch_dsl_drf.pagination import PageNumberPagination
from capapi.documents import CaseDocument
from capapi.serializers import CaseDocumentSerializer
class BaseViewSet(viewsets.ReadOnlyModelViewSet):
http_method_names = ['get']
class JurisdictionViewSet(BaseViewSet):
serializer_class = serializers.JurisdictionSerializer
filterset_class = filters.JurisdictionFilter
queryset = models.Jurisdiction.objects.order_by('name', 'pk')
lookup_field = 'slug'
class VolumeViewSet(BaseViewSet):
serializer_class = serializers.VolumeSerializer
queryset = models.VolumeMetadata.objects.order_by('pk').select_related(
'reporter'
).prefetch_related('reporter__jurisdictions')
class ReporterViewSet(BaseViewSet):
serializer_class = serializers.ReporterSerializer
filterset_class = filters.ReporterFilter
queryset = models.Reporter.objects.order_by('full_name', 'pk').prefetch_related('jurisdictions')
class CourtViewSet(BaseViewSet):
serializer_class = serializers.CourtSerializer
filterset_class = filters.CourtFilter
queryset = models.Court.objects.order_by('name', 'pk').select_related('jurisdiction')
lookup_field = 'slug'
class CitationViewSet(BaseViewSet):
serializer_class = serializers.CitationWithCaseSerializer
queryset = models.Citation.objects.order_by('pk')
class CaseViewSet(BaseViewSet):
serializer_class = serializers.CaseSerializer
queryset = models.CaseMetadata.objects.in_scope().select_related(
'volume',
'reporter',
).prefetch_related(
'citations'
).order_by(
'decision_date', 'id' # include id to get consistent ordering for cases with same date
)
renderer_classes = (
renderers.JSONRenderer,
capapi_renderers.BrowsableAPIRenderer,
capapi_renderers.XMLRenderer,
capapi_renderers.HTMLRenderer,
)
filterset_class = filters.CaseFilter
lookup_field = 'id'
def is_full_case_request(self):
return True if self.request.query_params.get('full_case', 'false').lower() == 'true' else False
def get_queryset(self):
if self.is_full_case_request():
return self.queryset.select_related('case_xml', 'body_cache')
else:
return self.queryset
def get_serializer_class(self, *args, **kwargs):
if self.is_full_case_request():
return serializers.CaseSerializerWithCasebody
else:
return self.serializer_class
def list(self, *args, **kwargs):
jur_value = self.request.query_params.get('jurisdiction', None)
jur_slug = slugify(jur_value)
if not jur_value or jur_slug == jur_value:
return super(CaseViewSet, self).list(*args, **kwargs)
query_string = urllib.parse.urlencode(dict(self.request.query_params, jurisdiction=jur_slug), doseq=True)
new_url = reverse('casemetadata-list') + "?" + query_string
return HttpResponseRedirect(new_url)
def retrieve(self, *args, **kwargs):
# for user's convenience, if user gets /cases/casecitation or /cases/Case Citation (or any non-numeric value)
# we redirect to /cases/?cite=casecitation
id = kwargs[self.lookup_field]
if re.search(r'\D', id):
normalized_cite = Citation.normalize_cite(id)
query_string = urllib.parse.urlencode(dict(self.request.query_params, cite=normalized_cite), doseq=True)
new_url = reverse('casemetadata-list') + "?" + query_string
return HttpResponseRedirect(new_url)
return super(CaseViewSet, self).retrieve(*args, **kwargs)
class CaseDocumentViewSet(BaseDocumentViewSet):
"""The CaseDocument view."""
document = CaseDocument
serializer_class = CaseDocumentSerializer
pagination_class = PageNumberPagination
lookup_field = 'id'
filter_backends = [
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
SearchFilterBackend,
]
# Define search fields
search_fields = (
'case_body__data__text',
'name',
'jurisdiction__name_long',
'court__name',
)
# Define filter fields
filter_fields = {
'id': {
'field': 'id',
# Note, that we limit the lookups of id field in this example,
# to `range`, `in`, `gt`, `gte`, `lt` and `lte` filters.
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
],
},
'name': 'name',
}
# Define ordering fields
ordering_fields = {
'decision_date': 'decision_date',
'name_abbreviation': 'name_abbreviation.raw',
'id': 'id',
}
# Specify default ordering
ordering = ('decision_date', 'name_abbreviation', 'id',)
def is_full_case_request(self):
return True if self.request.query_params.get('full_case', 'false').lower() == 'true' else False
def get_serializer_class(self, *args, **kwargs):
if self.is_full_case_request():
return serializers.CaseDocumentSerializerWithCasebody
else:
return self.serializer_class
class CaseExportViewSet(BaseViewSet):
serializer_class = serializers.CaseExportSerializer
queryset = models.CaseExport.objects.order_by('pk')
filterset_class = filters.CaseExportFilter
def list(self, request, *args, **kwargs):
# mark list requests to filter out superseded downloads by default
self.request.hide_old_by_default = True
return super().list(request, *args, **kwargs)
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
# filter out superseded downloads for list requests unless with_old=true
try:
if self.request.hide_old_by_default and self.request.GET.get('with_old') != 'true':
queryset = queryset.exclude_old()
except AttributeError:
pass
return queryset
@action(
methods=['get'],
detail=True,
renderer_classes=(capapi_renderers.PassthroughRenderer,),
permission_classes=(permissions.CanDownloadCaseExport,),
)
def download(self, *args, **kwargs):
instance = self.get_object()
# send file
response = FileResponse(instance.file.open(), content_type='application/zip')
response['Content-Length'] = instance.file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % instance.file_name
# public downloads are cacheable
if instance.public:
add_cache_header(response)
return response
class NgramViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
http_method_names = ['get']
queryset = models.Ngram.objects.order_by('pk').select_related('w1', 'w2', 'w3')
filterset_class = filters.NgramFilter
pagination_class = pagination.SmallCapPagination
renderer_classes = (
renderers.JSONRenderer,
capapi_renderers.NgramBrowsableAPIRenderer,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# cache translation table between jurisdiction slug and ID
self.jurisdiction_id_to_slug = {v:k for k,v in filters.jurisdiction_slug_to_id.items()}
self.jurisdiction_id_to_slug[None] = 'total'
def list(self, request, *args, **kwargs):
# without specific ngram search, return nothing
q = self.request.GET.get('q', '').strip()
if not q:
return Response({})
# fetch all unique ngrams for query, and paginate
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
# get counts for each ngram
out = OrderedDict()
if page:
# build lookup table
ngrams_by_id = {}
for ngram in page:
out[str(ngram)] = ngrams_by_id[ngram.pk] = {}
# fetch all observations, using same query parameters
observations = models.NgramObservation.objects.filter(ngram__in=page)
obs_filter = filters.NgramObservationFilter(data=request.query_params, queryset=observations, request=request)
if not obs_filter.is_valid():
raise obs_filter.errors
observations = list(obs_filter.qs.values_list('ngram_id', 'jurisdiction_id', 'year', 'instance_count', 'document_count'))
# sort with None values first
observations.sort(key=lambda x: [[y is not None, y] for y in x])
# organize all observations by ngram, then jurisdiction, then year
for ngram_id, jurisdiction_id, year, instance_count, document_count in observations:
jurs = ngrams_by_id[ngram_id]
jurisdiction_slug = self.jurisdiction_id_to_slug[jurisdiction_id]
if jurisdiction_slug not in jurs:
jurs[jurisdiction_slug] = OrderedDict()
years = jurs[jurisdiction_slug]
years[year or "total"] = [instance_count, document_count]
return self.get_paginated_response(out)
|
[
"capdb.models.Court.objects.order_by",
"capdb.models.VolumeMetadata.objects.order_by",
"capdb.models.CaseExport.objects.order_by",
"django.utils.text.slugify",
"capapi.filters.jurisdiction_slug_to_id.items",
"rest_framework.response.Response",
"django.http.HttpResponseRedirect",
"capdb.models.Reporter.objects.order_by",
"capapi.filters.NgramObservationFilter",
"capdb.models.Ngram.objects.order_by",
"capdb.models.Citation.objects.order_by",
"re.search",
"capdb.models.NgramObservation.objects.filter",
"rest_framework.reverse.reverse",
"capdb.models.CaseMetadata.objects.in_scope",
"capdb.models.Jurisdiction.objects.order_by",
"capdb.models.Citation.normalize_cite",
"capapi.middleware.add_cache_header",
"rest_framework.decorators.action",
"collections.OrderedDict"
] |
[((1442, 1492), 'capdb.models.Jurisdiction.objects.order_by', 'models.Jurisdiction.objects.order_by', (['"""name"""', '"""pk"""'], {}), "('name', 'pk')\n", (1478, 1492), False, 'from capdb import models\n'), ((2349, 2387), 'capdb.models.Citation.objects.order_by', 'models.Citation.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (2381, 2387), False, 'from capdb import models\n'), ((6501, 6541), 'capdb.models.CaseExport.objects.order_by', 'models.CaseExport.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (6535, 6541), False, 'from capdb import models\n'), ((7228, 7389), 'rest_framework.decorators.action', 'action', ([], {'methods': "['get']", 'detail': '(True)', 'renderer_classes': '(capapi_renderers.PassthroughRenderer,)', 'permission_classes': '(permissions.CanDownloadCaseExport,)'}), "(methods=['get'], detail=True, renderer_classes=(capapi_renderers.\n PassthroughRenderer,), permission_classes=(permissions.\n CanDownloadCaseExport,))\n", (7234, 7389), False, 'from rest_framework.decorators import action\n'), ((3662, 3680), 'django.utils.text.slugify', 'slugify', (['jur_value'], {}), '(jur_value)\n', (3669, 3680), False, 'from django.utils.text import slugify\n'), ((3997, 4026), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['new_url'], {}), '(new_url)\n', (4017, 4026), False, 'from django.http import HttpResponseRedirect, FileResponse\n'), ((4288, 4308), 're.search', 're.search', (['"""\\\\D"""', 'id'], {}), "('\\\\D', id)\n", (4297, 4308), False, 'import re\n'), ((9003, 9016), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9014, 9016), False, 'from collections import OrderedDict\n'), ((1904, 1955), 'capdb.models.Reporter.objects.order_by', 'models.Reporter.objects.order_by', (['"""full_name"""', '"""pk"""'], {}), "('full_name', 'pk')\n", (1936, 1955), False, 'from capdb import models\n'), ((2133, 2176), 'capdb.models.Court.objects.order_by', 'models.Court.objects.order_by', (['"""name"""', '"""pk"""'], {}), "('name', 'pk')\n", (2162, 2176), False, 'from capdb import models\n'), ((4340, 4367), 'capdb.models.Citation.normalize_cite', 'Citation.normalize_cite', (['id'], {}), '(id)\n', (4363, 4367), False, 'from capdb.models import Citation\n'), ((4576, 4605), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['new_url'], {}), '(new_url)\n', (4596, 4605), False, 'from django.http import HttpResponseRedirect, FileResponse\n'), ((7833, 7859), 'capapi.middleware.add_cache_header', 'add_cache_header', (['response'], {}), '(response)\n', (7849, 7859), False, 'from capapi.middleware import add_cache_header\n'), ((8002, 8037), 'capdb.models.Ngram.objects.order_by', 'models.Ngram.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (8031, 8037), False, 'from capdb import models\n'), ((8771, 8783), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (8779, 8783), False, 'from rest_framework.response import Response\n'), ((9285, 9339), 'capdb.models.NgramObservation.objects.filter', 'models.NgramObservation.objects.filter', ([], {'ngram__in': 'page'}), '(ngram__in=page)\n', (9323, 9339), False, 'from capdb import models\n'), ((9365, 9467), 'capapi.filters.NgramObservationFilter', 'filters.NgramObservationFilter', ([], {'data': 'request.query_params', 'queryset': 'observations', 'request': 'request'}), '(data=request.query_params, queryset=\n observations, request=request)\n', (9395, 9467), False, 'from capapi import serializers, filters, permissions, pagination\n'), ((3932, 3960), 'rest_framework.reverse.reverse', 'reverse', (['"""casemetadata-list"""'], {}), "('casemetadata-list')\n", (3939, 3960), False, 'from rest_framework.reverse import reverse\n'), ((8487, 8526), 'capapi.filters.jurisdiction_slug_to_id.items', 'filters.jurisdiction_slug_to_id.items', ([], {}), '()\n', (8524, 8526), False, 'from capapi import serializers, filters, permissions, pagination\n'), ((1622, 1666), 'capdb.models.VolumeMetadata.objects.order_by', 'models.VolumeMetadata.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (1660, 1666), False, 'from capdb import models\n'), ((4507, 4535), 'rest_framework.reverse.reverse', 'reverse', (['"""casemetadata-list"""'], {}), "('casemetadata-list')\n", (4514, 4535), False, 'from rest_framework.reverse import reverse\n'), ((10200, 10213), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10211, 10213), False, 'from collections import OrderedDict\n'), ((2487, 2525), 'capdb.models.CaseMetadata.objects.in_scope', 'models.CaseMetadata.objects.in_scope', ([], {}), '()\n', (2523, 2525), False, 'from capdb import models\n')]
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InvoiceLineSummary(object):
"""
Product items of the invoice
"""
def __init__(self, **kwargs):
"""
Initializes a new InvoiceLineSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param product:
The value to assign to the product property of this InvoiceLineSummary.
:type product: str
:param order_no:
The value to assign to the order_no property of this InvoiceLineSummary.
:type order_no: str
:param part_number:
The value to assign to the part_number property of this InvoiceLineSummary.
:type part_number: str
:param time_start:
The value to assign to the time_start property of this InvoiceLineSummary.
:type time_start: datetime
:param time_end:
The value to assign to the time_end property of this InvoiceLineSummary.
:type time_end: datetime
:param quantity:
The value to assign to the quantity property of this InvoiceLineSummary.
:type quantity: float
:param net_unit_price:
The value to assign to the net_unit_price property of this InvoiceLineSummary.
:type net_unit_price: float
:param total_price:
The value to assign to the total_price property of this InvoiceLineSummary.
:type total_price: float
:param currency:
The value to assign to the currency property of this InvoiceLineSummary.
:type currency: oci.osp_gateway.models.Currency
"""
self.swagger_types = {
'product': 'str',
'order_no': 'str',
'part_number': 'str',
'time_start': 'datetime',
'time_end': 'datetime',
'quantity': 'float',
'net_unit_price': 'float',
'total_price': 'float',
'currency': 'Currency'
}
self.attribute_map = {
'product': 'product',
'order_no': 'orderNo',
'part_number': 'partNumber',
'time_start': 'timeStart',
'time_end': 'timeEnd',
'quantity': 'quantity',
'net_unit_price': 'netUnitPrice',
'total_price': 'totalPrice',
'currency': 'currency'
}
self._product = None
self._order_no = None
self._part_number = None
self._time_start = None
self._time_end = None
self._quantity = None
self._net_unit_price = None
self._total_price = None
self._currency = None
@property
def product(self):
"""
**[Required]** Gets the product of this InvoiceLineSummary.
Product of the item
:return: The product of this InvoiceLineSummary.
:rtype: str
"""
return self._product
@product.setter
def product(self, product):
"""
Sets the product of this InvoiceLineSummary.
Product of the item
:param product: The product of this InvoiceLineSummary.
:type: str
"""
self._product = product
@property
def order_no(self):
"""
Gets the order_no of this InvoiceLineSummary.
Product of the item
:return: The order_no of this InvoiceLineSummary.
:rtype: str
"""
return self._order_no
@order_no.setter
def order_no(self, order_no):
"""
Sets the order_no of this InvoiceLineSummary.
Product of the item
:param order_no: The order_no of this InvoiceLineSummary.
:type: str
"""
self._order_no = order_no
@property
def part_number(self):
"""
Gets the part_number of this InvoiceLineSummary.
Part number
:return: The part_number of this InvoiceLineSummary.
:rtype: str
"""
return self._part_number
@part_number.setter
def part_number(self, part_number):
"""
Sets the part_number of this InvoiceLineSummary.
Part number
:param part_number: The part_number of this InvoiceLineSummary.
:type: str
"""
self._part_number = part_number
@property
def time_start(self):
"""
Gets the time_start of this InvoiceLineSummary.
Start date
:return: The time_start of this InvoiceLineSummary.
:rtype: datetime
"""
return self._time_start
@time_start.setter
def time_start(self, time_start):
"""
Sets the time_start of this InvoiceLineSummary.
Start date
:param time_start: The time_start of this InvoiceLineSummary.
:type: datetime
"""
self._time_start = time_start
@property
def time_end(self):
"""
Gets the time_end of this InvoiceLineSummary.
End date
:return: The time_end of this InvoiceLineSummary.
:rtype: datetime
"""
return self._time_end
@time_end.setter
def time_end(self, time_end):
"""
Sets the time_end of this InvoiceLineSummary.
End date
:param time_end: The time_end of this InvoiceLineSummary.
:type: datetime
"""
self._time_end = time_end
@property
def quantity(self):
"""
Gets the quantity of this InvoiceLineSummary.
Quantity of the ordered product
:return: The quantity of this InvoiceLineSummary.
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""
Sets the quantity of this InvoiceLineSummary.
Quantity of the ordered product
:param quantity: The quantity of this InvoiceLineSummary.
:type: float
"""
self._quantity = quantity
@property
def net_unit_price(self):
"""
Gets the net_unit_price of this InvoiceLineSummary.
Unit price of the ordered product
:return: The net_unit_price of this InvoiceLineSummary.
:rtype: float
"""
return self._net_unit_price
@net_unit_price.setter
def net_unit_price(self, net_unit_price):
"""
Sets the net_unit_price of this InvoiceLineSummary.
Unit price of the ordered product
:param net_unit_price: The net_unit_price of this InvoiceLineSummary.
:type: float
"""
self._net_unit_price = net_unit_price
@property
def total_price(self):
"""
Gets the total_price of this InvoiceLineSummary.
Total price of the ordered product (Net unit price x quantity)
:return: The total_price of this InvoiceLineSummary.
:rtype: float
"""
return self._total_price
@total_price.setter
def total_price(self, total_price):
"""
Sets the total_price of this InvoiceLineSummary.
Total price of the ordered product (Net unit price x quantity)
:param total_price: The total_price of this InvoiceLineSummary.
:type: float
"""
self._total_price = total_price
@property
def currency(self):
"""
Gets the currency of this InvoiceLineSummary.
:return: The currency of this InvoiceLineSummary.
:rtype: oci.osp_gateway.models.Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this InvoiceLineSummary.
:param currency: The currency of this InvoiceLineSummary.
:type: oci.osp_gateway.models.Currency
"""
self._currency = currency
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"oci.util.formatted_flat_dict"
] |
[((8398, 8423), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (8417, 8423), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
|
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
answer = input("Create a crater? Y/N ")
if answer == "Y":
pos = mc.player.getPos()
mc.setBlocks(pos.x + 1, pos.y + 1, pos.z + 1, pos.x - 1, pos.y - 1, pos.z - 1, 0)
mc.postToChat("Boom!")
|
[
"mcpi.minecraft.Minecraft.create"
] |
[((42, 60), 'mcpi.minecraft.Minecraft.create', 'Minecraft.create', ([], {}), '()\n', (58, 60), False, 'from mcpi.minecraft import Minecraft\n')]
|
import time
import logging
from typing import List
import json
from spaceone.inventory.connector.aws_sqs_connector.schema.data import QueData, RedrivePolicy
from spaceone.inventory.connector.aws_sqs_connector.schema.resource import SQSResponse, QueResource
from spaceone.inventory.connector.aws_sqs_connector.schema.service_type import CLOUD_SERVICE_TYPES
from spaceone.inventory.libs.connector import SchematicAWSConnector
_LOGGER = logging.getLogger(__name__)
class SQSConnector(SchematicAWSConnector):
service_name = 'sqs'
def get_resources(self) -> List[SQSResponse]:
print("** SQS START **")
resources = []
start_time = time.time()
collect_resource = {
'request_method': self.request_data,
'resource': QueResource,
'response_schema': SQSResponse
}
# init cloud service type
for cst in CLOUD_SERVICE_TYPES:
resources.append(cst)
# merge data
for region_name in self.region_names:
self.reset_region(region_name)
resources.extend(self.collect_data_by_region(self.service_name, region_name, collect_resource))
print(f' SQS Finished {time.time() - start_time} Seconds')
return resources
def request_data(self, region_name) -> List[QueData]:
resource = self.session.resource('sqs')
for que in resource.queues.all():
attr = que.attributes
if 'RedrivePolicy' in attr:
attr['RedrivePolicy'] = RedrivePolicy(json.loads(attr.get('RedrivePolicy')), strict=False)
result = QueData(attr)
result.region_name = region_name
result.url = que.url
result.account_id = self.account_id
yield result, result.name
|
[
"spaceone.inventory.connector.aws_sqs_connector.schema.data.QueData",
"logging.getLogger",
"time.time"
] |
[((436, 463), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'import logging\n'), ((662, 673), 'time.time', 'time.time', ([], {}), '()\n', (671, 673), False, 'import time\n'), ((1617, 1630), 'spaceone.inventory.connector.aws_sqs_connector.schema.data.QueData', 'QueData', (['attr'], {}), '(attr)\n', (1624, 1630), False, 'from spaceone.inventory.connector.aws_sqs_connector.schema.data import QueData, RedrivePolicy\n'), ((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n')]
|
from __future__ import unicode_literals
import importlib
import inspect
import json
from django.conf import settings
from django.contrib import messages
from django.db.models.base import ModelBase
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.text import slugify
from django.utils.timezone import now
from django.utils.translation import ugettext
from advanced_reports.backoffice.base import BackOfficeView
from .models import SavedQuery
from .builder import QueryBuilder
class QueryBuilderView(BackOfficeView):
template = 'backoffice/views/querybuilder.html'
def __init__(self):
qb_models = self.get_models_from_settings()
self.qb = QueryBuilder(models=qb_models)
def get_models_from_settings(self):
model_paths = getattr(settings, 'QUERYBUILDER_MODELS', [])
qb_models = []
for model_path in model_paths:
module_path, model_name = model_path.rsplit('.', 1)
module = importlib.import_module(module_path)
if model_name == '*':
model_classes = inspect.getmembers(module, lambda c: type(c) == ModelBase)
for model_tuple in model_classes:
qb_models.append(getattr(module, model_tuple[0]))
else:
model = getattr(module, model_name)
qb_models.append(model)
return qb_models
def get_models(self, request):
return self.qb.get_models()
def execute_query(self, request):
id = request.action_params.get('id')
query = request.action_params.get('query')
if id is not None:
sq = SavedQuery.objects.get(pk=int(id))
sq.last_run = now()
sq.save()
return self.qb.run(query)
def save_query(self, request):
query = request.action_params.get('query')
name = request.action_params.get('name')
existing_id = request.action_params.get('id')
if existing_id is not None:
sq = SavedQuery.objects.get(pk=existing_id)
sq.name = name
sq.query = json.dumps(query, indent=2)
sq.save()
else:
sq = SavedQuery.objects.create(
name=name,
query=json.dumps(query, indent=2),
created_by=request.user
)
messages.success(request, ugettext('Successfully saved query "%s"') % name)
return sq.serialize()
def get_saved_queries(self, request):
queries = SavedQuery.objects.filter(created_by=request.user).order_by('-last_run')
return {'queries': [query.serialize() for query in queries]}
def delete_query(self, request):
id = request.action_params.get('id')
sq = SavedQuery.objects.get(pk=int(id))
sq.delete()
messages.success(request, ugettext('Successfully deleted query "%s"') % sq.name)
def export_to_excel(self, request):
id = int(request.GET.get('id'))
sq = get_object_or_404(SavedQuery, pk=int(id))
query = json.loads(sq.query)
result = self.qb.run(query, stream=True)
import xlsxwriter
import StringIO
output = StringIO.StringIO()
wb = xlsxwriter.Workbook(filename=output, options=dict(in_memory=True))
ws = wb.add_worksheet()
for c, value in enumerate(query['values']):
ws.write(0, c, value.get('label', value['expression']))
ws.set_column(0, len(query['values'])-1, width=20)
for r, obj in enumerate(result['objects']):
for c, value in enumerate(query['values']):
ws.write(r + 1, c, obj[value['expression']])
wb.close()
output.seek(0)
filename = '%s.xlsx' % slugify(sq.name)
response = HttpResponse()
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response['Content-Type'] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
response.write(output.read())
return response
|
[
"json.loads",
"importlib.import_module",
"django.utils.timezone.now",
"json.dumps",
"django.utils.text.slugify",
"django.utils.translation.ugettext",
"StringIO.StringIO",
"django.http.response.HttpResponse"
] |
[((3088, 3108), 'json.loads', 'json.loads', (['sq.query'], {}), '(sq.query)\n', (3098, 3108), False, 'import json\n'), ((3226, 3245), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (3243, 3245), False, 'import StringIO\n'), ((3820, 3834), 'django.http.response.HttpResponse', 'HttpResponse', ([], {}), '()\n', (3832, 3834), False, 'from django.http.response import HttpResponse\n'), ((1013, 1049), 'importlib.import_module', 'importlib.import_module', (['module_path'], {}), '(module_path)\n', (1036, 1049), False, 'import importlib\n'), ((1743, 1748), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (1746, 1748), False, 'from django.utils.timezone import now\n'), ((2139, 2166), 'json.dumps', 'json.dumps', (['query'], {'indent': '(2)'}), '(query, indent=2)\n', (2149, 2166), False, 'import json\n'), ((3784, 3800), 'django.utils.text.slugify', 'slugify', (['sq.name'], {}), '(sq.name)\n', (3791, 3800), False, 'from django.utils.text import slugify\n'), ((2413, 2454), 'django.utils.translation.ugettext', 'ugettext', (['"""Successfully saved query "%s\\""""'], {}), '(\'Successfully saved query "%s"\')\n', (2421, 2454), False, 'from django.utils.translation import ugettext\n'), ((2881, 2924), 'django.utils.translation.ugettext', 'ugettext', (['"""Successfully deleted query "%s\\""""'], {}), '(\'Successfully deleted query "%s"\')\n', (2889, 2924), False, 'from django.utils.translation import ugettext\n'), ((2296, 2323), 'json.dumps', 'json.dumps', (['query'], {'indent': '(2)'}), '(query, indent=2)\n', (2306, 2323), False, 'import json\n')]
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from typing import Dict
from jinja2 import Template
from tempfile import NamedTemporaryFile, TemporaryDirectory
from .base import RedHatFamily, RedHatFamilyPrimitives, REPO_CONFIGURATION_TEMPLATE
class CentOs7Primitives(RedHatFamilyPrimitives):
"""
Represent locations of needed primitives
from the CentOS 7 distributions.
"""
def __new__(cls) -> Dict[str, str]:
"""
:return: None
"""
return super(CentOs7Primitives, cls).__new__(cls, rpm_gpg_key='<KEY>')
class CentOs7(RedHatFamily):
"""
Represents a CentOS 7 distribution.
"""
__abstract__: bool = False
def __init__(self, source_path: str, architecture: str = 'x86_64') -> None:
"""
:param source_path: String local path or remote uri
:param architecture: String targeted architecture
:returns: None
"""
super(CentOs7, self).__init__(
source_path,
'centos',
7,
0,
architecture
)
self._primitives: CentOs7Primitives = CentOs7Primitives()
def _update_version(self) -> None:
"""
:return: None
"""
template: Template = Template(REPO_CONFIGURATION_TEMPLATE)
if self.is_remote:
context: Dict[str, str] = {
'base_url': self._source_path,
'gpg_key': os.path.join(self._source_path, self._primitives['rpm_gpg_key'])
}
else:
context: Dict[str, str] = {
'base_url': 'file://{}'.format(self._source_path),
'gpg_key': 'file://{}'.format(os.path.join(self._source_path, self._primitives['rpm_gpg_key']))
}
rendered: str = template.render(context)
with TemporaryDirectory() as repo_directory:
with NamedTemporaryFile() as repo_configuration:
repo_configuration.write(rendered.encode())
repo_configuration.flush()
output: bytes = subprocess.check_output([
'yum',
'--disableplugin=*',
'--installroot', repo_directory,
'-c', repo_configuration.name,
'--disablerepo=*',
'--enablerepo=temp',
'info', 'centos-release'
])
if b'Release' in output:
for line in output.split(b'\n'):
if line.startswith(b'Version'):
major: int = int(line.split(b'Version : ')[1])
self.major: int = major
elif line.startswith(b'Release'):
minor: int = int(line.split(b'Release : ')[1].split(b'.')[0])
self.minor: int = minor
break
else:
raise RuntimeError('Could not update OS version')
|
[
"jinja2.Template",
"tempfile.NamedTemporaryFile",
"tempfile.TemporaryDirectory",
"subprocess.check_output",
"os.path.join"
] |
[((1820, 1857), 'jinja2.Template', 'Template', (['REPO_CONFIGURATION_TEMPLATE'], {}), '(REPO_CONFIGURATION_TEMPLATE)\n', (1828, 1857), False, 'from jinja2 import Template\n'), ((2390, 2410), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (2408, 2410), False, 'from tempfile import NamedTemporaryFile, TemporaryDirectory\n'), ((2000, 2064), 'os.path.join', 'os.path.join', (['self._source_path', "self._primitives['rpm_gpg_key']"], {}), "(self._source_path, self._primitives['rpm_gpg_key'])\n", (2012, 2064), False, 'import os\n'), ((2447, 2467), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (2465, 2467), False, 'from tempfile import NamedTemporaryFile, TemporaryDirectory\n'), ((2627, 2818), 'subprocess.check_output', 'subprocess.check_output', (["['yum', '--disableplugin=*', '--installroot', repo_directory, '-c',\n repo_configuration.name, '--disablerepo=*', '--enablerepo=temp', 'info',\n 'centos-release']"], {}), "(['yum', '--disableplugin=*', '--installroot',\n repo_directory, '-c', repo_configuration.name, '--disablerepo=*',\n '--enablerepo=temp', 'info', 'centos-release'])\n", (2650, 2818), False, 'import subprocess\n'), ((2246, 2310), 'os.path.join', 'os.path.join', (['self._source_path', "self._primitives['rpm_gpg_key']"], {}), "(self._source_path, self._primitives['rpm_gpg_key'])\n", (2258, 2310), False, 'import os\n')]
|
import glob, os, random
import keras
import numpy as np
from keras import backend as K
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.applications import MobileNet
from keras.applications.mobilenet import preprocess_input
import matplotlib.pyplot as plt
# Adjust these
NUM_CLASSES = 6
NAMES = ["cardboard", "glass", "metal", "paper", "plastic", "trash"]
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
TRAINING_DIR = 'dataset/train'
VALIDATION_DIR = 'dataset/test'
img_list = glob.glob(os.path.join(TRAINING_DIR, '*/*.jpg'))
for i, img_path in enumerate(random.sample(img_list, 6)):
img = image.load_img(img_path, target_size=(224, 224))
img = image.img_to_array(img, dtype=np.uint8)
plt.subplot(2, 3, i+1)
plt.imshow(img.squeeze())
base_model=keras.applications.mobilenet.MobileNet(input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, 3), alpha = 0.75,depth_multiplier = 1, dropout = 0.001,include_top = False, weights = "imagenet", classes = 1000)
# Additional Layers
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(128,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
x=Dropout(0.7)(x)
x=Dense(64, activation='relu')(x) #dense layer 3
preds=Dense(NUM_CLASSES, activation='softmax')(x) #final layer with softmax activation
model=Model(inputs=base_model.input,outputs=preds)
for i,layer in enumerate(model.layers):
print(i,layer.name)
# or if we want to set the first 20 layers of the network to be non-trainable
for layer in model.layers[:86]:
layer.trainable=False
for layer in model.layers[86:]:
layer.trainable=True
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input, validation_split=0.1) #included in our dependencies
# train_datagen = ImageDataGenerator( rescale = 1./255,
# rotation_range=45,
# width_shift_range=0.1,
# height_shift_range=0.1,
# shear_range=0.1,
# zoom_range=[0.9, 1.2],
# horizontal_flip=True,
# vertical_flip=False,
# fill_mode='constant',
# brightness_range=[0.7, 1.3])
train_generator=train_datagen.flow_from_directory(TRAINING_DIR,
target_size=(IMAGE_WIDTH,IMAGE_HEIGHT),
batch_size=16,
class_mode='categorical', subset='training',
seed=0
# save_to_dir='dataset/gen',
# save_prefix='gen-',
# save_format='jpeg'
)
validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input, validation_split=0.1)
# validation_datagen = ImageDataGenerator(rescale = 1./255,
# rotation_range=45,
# zoom_range=[0.9, 1.2],
# shear_range=0.1,)
validation_generator = validation_datagen.flow_from_directory(TRAINING_DIR,
target_size=(IMAGE_WIDTH,IMAGE_HEIGHT),
subset='validation', seed=0,
batch_size=16,
class_mode='categorical'
)
# model.summary()
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer, loss function will be categorical cross entropy, evaluation metric will be accuracy
step_size_train = (train_generator.n//train_generator.batch_size)
validation_steps = (validation_generator.n//validation_generator.batch_size)
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=10,
#workers=4,
validation_data = validation_generator,
validation_steps = validation_steps,
verbose = 1)
model.save('garbageclassifier.h5')
#model.load_weights('logoclassifier.h5')
# def prepare_test_image(file):
# img_path = 'dataset/test/'
# img = image.load_img(img_path + file, target_size=(IMAGE_WIDTH, IMAGE_HEIGHT))
# img_array = image.img_to_array(img)
# img_array_expanded_dims = np.expand_dims(img_array, axis=0)
# return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
# print("\n=========")
# preprocessed_image = prepare_test_image('01-dycodex/x0.jpg')
# predictions = model.predict(preprocessed_image)
# print("Dycode {0:.2f}".format(predictions[0][0]*100))
# print("DycodeX {0:.2f}".format(predictions[0][1]*100))
# predIdx = np.argmax(predictions, axis=1)[0]
# print("Prediction class: {:d} - {}".format(predIdx, NAMES[predIdx]))
# print("=========")
# preprocessed_image = prepare_test_image('02-adidas/a0.jpg')
# predictions = model.predict(preprocessed_image)
# print("Adidas {0:.2f}".format(predictions[0][2]*100))
# print("DycodeX {0:.2f}".format(predictions[0][1]*100))
# predIdx = np.argmax(predictions, axis=1)[0]
# print("Prediction class: {:d} - {}".format(predIdx, NAMES[predIdx]))
# print("=========")
# preprocessed_image = prepare_test_image('00-dycode/d0.jpg')
# predictions = model.predict(preprocessed_image)
# print("Adidas {0:.2f}".format(predictions[0][2]*100))
# print("Dycode {0:.2f}".format(predictions[0][0]*100))
# predIdx = np.argmax(predictions, axis=1)[0]
# print("Prediction class: {:d} - {}".format(predIdx, NAMES[predIdx]))
|
[
"keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.subplot",
"random.sample",
"keras.layers.Dropout",
"keras.applications.mobilenet.MobileNet",
"keras.models.Model",
"keras.layers.GlobalAveragePooling2D",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"keras.layers.Dense",
"os.path.join"
] |
[((1038, 1228), 'keras.applications.mobilenet.MobileNet', 'keras.applications.mobilenet.MobileNet', ([], {'input_shape': '(IMAGE_WIDTH, IMAGE_HEIGHT, 3)', 'alpha': '(0.75)', 'depth_multiplier': '(1)', 'dropout': '(0.001)', 'include_top': '(False)', 'weights': '"""imagenet"""', 'classes': '(1000)'}), "(input_shape=(IMAGE_WIDTH,\n IMAGE_HEIGHT, 3), alpha=0.75, depth_multiplier=1, dropout=0.001,\n include_top=False, weights='imagenet', classes=1000)\n", (1076, 1228), False, 'import keras\n'), ((1603, 1648), 'keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'preds'}), '(inputs=base_model.input, outputs=preds)\n', (1608, 1648), False, 'from keras.models import Model\n'), ((1922, 2007), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input', 'validation_split': '(0.1)'}), '(preprocessing_function=preprocess_input,\n validation_split=0.1)\n', (1940, 2007), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3294, 3379), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input', 'validation_split': '(0.1)'}), '(preprocessing_function=preprocess_input,\n validation_split=0.1)\n', (3312, 3379), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((762, 799), 'os.path.join', 'os.path.join', (['TRAINING_DIR', '"""*/*.jpg"""'], {}), "(TRAINING_DIR, '*/*.jpg')\n", (774, 799), False, 'import glob, os, random\n'), ((830, 856), 'random.sample', 'random.sample', (['img_list', '(6)'], {}), '(img_list, 6)\n', (843, 856), False, 'import glob, os, random\n'), ((869, 917), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (883, 917), False, 'from keras.preprocessing import image\n'), ((928, 967), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (946, 967), False, 'from keras.preprocessing import image\n'), ((973, 997), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (984, 997), True, 'import matplotlib.pyplot as plt\n'), ((1274, 1298), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1296, 1298), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((1304, 1333), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1309, 1333), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((1443, 1455), 'keras.layers.Dropout', 'Dropout', (['(0.7)'], {}), '(0.7)\n', (1450, 1455), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((1461, 1489), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1466, 1489), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((1514, 1554), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (1519, 1554), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n')]
|
import operator
from functools import reduce
MSG_LEN = 27
IDLE = bytes.fromhex("436d640001001200010404000a000000808080802020202000550f")
CMD_PREFIX = IDLE[:-11]
def _c(cmd: str) -> bytes:
data = bytes.fromhex(cmd)
assert len(data) == MSG_LEN - len(CMD_PREFIX)
return CMD_PREFIX + data
def checksum(msg: bytes) -> bytes:
msg = msg[: MSG_LEN - 2]
b9 = reduce(operator.xor, msg, 0xb9) & 0xFF
b10 = reduce(operator.add, msg, b9) & 0xFF
return bytes((b9, b10))
class SymaMSGs:
INIT = b"\x00"
IDLE = _c("808080802020202000550f")
POWER_TOGGLE = _c("808080802020202010652f")
LAND = _c("8080808020202020085d1f")
LIFT = _c("808080802020206000958f")
CALIBRATE = _c("808080802020202020754f")
|
[
"functools.reduce"
] |
[((375, 405), 'functools.reduce', 'reduce', (['operator.xor', 'msg', '(185)'], {}), '(operator.xor, msg, 185)\n', (381, 405), False, 'from functools import reduce\n'), ((424, 453), 'functools.reduce', 'reduce', (['operator.add', 'msg', 'b9'], {}), '(operator.add, msg, b9)\n', (430, 453), False, 'from functools import reduce\n')]
|
import json
import time
def get_key(store, key):
while True:
res = store.get(key+"_output")
if res is None:
time.sleep(0.5)
else:
result = json.loads(res.decode('utf-8'))
store.delete(key)
store.delete(key+"_output")
break
return result
|
[
"time.sleep"
] |
[((142, 157), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (152, 157), False, 'import time\n')]
|
import json
import os
import uuid
import mongoengine
from flask import Blueprint, jsonify, request
from werkzeug.utils import secure_filename
from tess.config import UPLOAD_FILES
from tess.server.models import SummarizationDocument
summarization_bp = Blueprint('summarization_api', __name__)
@summarization_bp.route('', methods=['GET', 'POST'])
def summarize():
if request.method == 'POST':
body = request.form.to_dict()
if 'file' not in request.files:
return jsonify({'error': 400, 'message': 'Must supply a file'}), 400
if not allowed_file(request.files['file'].filename):
return jsonify({'error': 400, 'message': 'The file must be .txt'}), 400
if 'algorithm' not in body:
return jsonify({'error': 400, 'message': 'Algorithm should be specified'}), 400
file = request.files['file']
filename = secure_filename(file.filename)
renamed_file = f"{uuid.uuid4().hex}.{get_extension(filename)}"
file.save(os.path.join(UPLOAD_FILES, renamed_file))
doc = SummarizationDocument(document_path=os.path.join(UPLOAD_FILES, renamed_file), algorithm=body['algorithm'])
if 'tokenized' in body and body['tokenized']:
doc.tokenized = True
with open(os.path.join(UPLOAD_FILES, renamed_file)) as f:
doc.processed_text = f.read().strip()
if 'tokenizer' in body:
doc.tokenizer = body['tokenizer']
if 'target' in request.files:
target_summary = request.files['target'].stream.read().decode()
doc.target_summary = target_summary
doc.status = 'NEW'
doc.save()
return jsonify(json.loads(doc.to_json())), 201
if request.method == 'GET':
documents = []
for doc in SummarizationDocument.objects():
documents.append({'id': str(doc.id),
'status': doc.status,
'algorithm': doc.algorithm,
'created_at': doc.created_at
})
return jsonify(documents)
@summarization_bp.route('/<doc_id>', methods=['GET'])
def get_summary(doc_id):
try:
document = SummarizationDocument.objects.get(pk=doc_id)
return jsonify(json.loads(document.to_json()))
except mongoengine.errors.DoesNotExist as _:
return jsonify({'error': 400, 'message': 'ID does not exist'})
except mongoengine.errors.ValidationError as _:
return jsonify({'error': 400, 'message': 'ID is invalid format'})
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'txt'
def get_extension(filename):
return filename.rsplit('.', 1)[1].lower()
|
[
"tess.server.models.SummarizationDocument.objects",
"uuid.uuid4",
"flask.Blueprint",
"tess.server.models.SummarizationDocument.objects.get",
"werkzeug.utils.secure_filename",
"flask.jsonify",
"flask.request.form.to_dict",
"os.path.join"
] |
[((254, 294), 'flask.Blueprint', 'Blueprint', (['"""summarization_api"""', '__name__'], {}), "('summarization_api', __name__)\n", (263, 294), False, 'from flask import Blueprint, jsonify, request\n'), ((415, 437), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (435, 437), False, 'from flask import Blueprint, jsonify, request\n'), ((889, 919), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (904, 919), False, 'from werkzeug.utils import secure_filename\n'), ((1806, 1837), 'tess.server.models.SummarizationDocument.objects', 'SummarizationDocument.objects', ([], {}), '()\n', (1835, 1837), False, 'from tess.server.models import SummarizationDocument\n'), ((2104, 2122), 'flask.jsonify', 'jsonify', (['documents'], {}), '(documents)\n', (2111, 2122), False, 'from flask import Blueprint, jsonify, request\n'), ((2232, 2276), 'tess.server.models.SummarizationDocument.objects.get', 'SummarizationDocument.objects.get', ([], {'pk': 'doc_id'}), '(pk=doc_id)\n', (2265, 2276), False, 'from tess.server.models import SummarizationDocument\n'), ((1009, 1049), 'os.path.join', 'os.path.join', (['UPLOAD_FILES', 'renamed_file'], {}), '(UPLOAD_FILES, renamed_file)\n', (1021, 1049), False, 'import os\n'), ((2396, 2451), 'flask.jsonify', 'jsonify', (["{'error': 400, 'message': 'ID does not exist'}"], {}), "({'error': 400, 'message': 'ID does not exist'})\n", (2403, 2451), False, 'from flask import Blueprint, jsonify, request\n'), ((2519, 2577), 'flask.jsonify', 'jsonify', (["{'error': 400, 'message': 'ID is invalid format'}"], {}), "({'error': 400, 'message': 'ID is invalid format'})\n", (2526, 2577), False, 'from flask import Blueprint, jsonify, request\n'), ((497, 553), 'flask.jsonify', 'jsonify', (["{'error': 400, 'message': 'Must supply a file'}"], {}), "({'error': 400, 'message': 'Must supply a file'})\n", (504, 553), False, 'from flask import Blueprint, jsonify, request\n'), ((639, 698), 'flask.jsonify', 'jsonify', (["{'error': 400, 'message': 'The file must be .txt'}"], {}), "({'error': 400, 'message': 'The file must be .txt'})\n", (646, 698), False, 'from flask import Blueprint, jsonify, request\n'), ((759, 826), 'flask.jsonify', 'jsonify', (["{'error': 400, 'message': 'Algorithm should be specified'}"], {}), "({'error': 400, 'message': 'Algorithm should be specified'})\n", (766, 826), False, 'from flask import Blueprint, jsonify, request\n'), ((1102, 1142), 'os.path.join', 'os.path.join', (['UPLOAD_FILES', 'renamed_file'], {}), '(UPLOAD_FILES, renamed_file)\n', (1114, 1142), False, 'import os\n'), ((946, 958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (956, 958), False, 'import uuid\n'), ((1283, 1323), 'os.path.join', 'os.path.join', (['UPLOAD_FILES', 'renamed_file'], {}), '(UPLOAD_FILES, renamed_file)\n', (1295, 1323), False, 'import os\n')]
|
# Source: https://medium.com/@datamonsters/text-preprocessing-in-python-steps-tools-and-examples-bf025f872908
# reading level of posts coming out as negative for most posts because of the way they are written
import re
import pandas as pd
import numpy as np
import time
import nltk
nltk.download('wordnet') #TODO: should add this to the make file?
# for sentiment analysis
from textblob import TextBlob
# for readability score
import textstat
DEBUG = True
TIME_DEBUG = True
INCLUDE_SENTIMENT_FEATURE = False
INCLUDE_READABILITY_FEATURE = False
PATH_TO_DATA = '../../data/'
DATA_FILE_NAME = 'rspct.tsv'
DATA = PATH_TO_DATA+DATA_FILE_NAME
OUTPUT_FILE_NAME = 'rspct_preprocessed_sentiment_readability_stemmed.tsv'
def lowercase(df):
if DEBUG:
print('Lowercasing the dataset')
df=df.apply(lambda x: x.astype(str).str.lower())
return df
def remove_nums(df):
if DEBUG:
print('Removing numbers from all attributes except id')
for col in df.columns:
if col not in ['id']:
df[col] = df[col].str.replace('\d+', '')
return df
def remove_tags_puncts_whites(text):
text = text.strip()
# to remove > tags. TODO: there might be other such tags that need to be removed
p1 = re.compile(r'>|&|<')
text = p1.sub(' ', text)
# to remove tags inside {}, [] and HTML tags
p2 = re.compile(r'[<{\[].*?[>}\]]')
text = p2.sub(' ', text)
# remove single quotes only if they preceded or follow a word
text1 = re.sub(r"((?P<a>\s)'(?P<x>\w))|((?P<y>\w)'(?P<b>\s))|((?P<c>\s)'(?P<d>\s))", r'\g<a>\g<x>\g<y>\g<b>\g<c>\g<d>', text)
text = re.sub(r"((?P<a>\s)'(?P<x>\w))|((?P<y>\w)'(?P<b>\s))|((?P<c>\s)'(?P<d>\s))", r'\g<a>\g<x>\g<y>\g<b>\g<c>\g<d>', text1)
text = text.strip("'")
# to remove punctuations (after removing tags etc.)
#puncts_to_remove = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
puncts_to_remove = """!"#$%&()*+,-./:;<=>?@[\]^_`{|}~"""
text = text.translate({ord(c): ' ' for c in puncts_to_remove})
# print('ret:', text.strip())
return text.strip()
def remove_tags_puncts_whitespaces(data):
"""
Removes punctuations, HTML tags, and other tags inside {} or [] brackets and whitespaces
"""
if DEBUG:
print('Removing punctuations, tags and whitespaces')
for col in data.columns:
if col not in ['id', 'subreddit', 'sentiment_val', 'readability_score']:
data[col] = data[col].apply(remove_tags_puncts_whites)
return data
def stem_text(text):
stemmer = nltk.stem.PorterStemmer()
tokenized_text = nltk.tokenize.word_tokenize(text)
stemmed_words = [stemmer.stem(word) for word in tokenized_text]
#print('stemmed=',' '.join(stemmed_words))
return ' '.join(stemmed_words)
def stem_data(data):
"""
Replace all words with their stem words
"""
if DEBUG:
print ('Stemming the data')
for col in data.columns:
if col not in ['id', 'subreddit', 'sentiment_val', 'readability_score']:
data[col] = data[col].apply(stem_text)
return data
def lemmatize_text(text):
lemmatizer = nltk.stem.WordNetLemmatizer()
tokenized_text=nltk.tokenize.word_tokenize(text)
lemmatized_words = [lemmatizer.lemmatize(word) for word in tokenized_text]
#print('lemmatized:',' '.join(lemmatized_words))
return ' '.join(lemmatized_words)
def lemmatize_data(data):
if DEBUG:
print ('Lemmatizing the data')
for col in data.columns:
if col not in ['id', 'subreddit', 'sentiment_val', 'readability_score']:
data[col] = data[col].apply(lemmatize_text)
return data
def text_sentiment(text):
"""
Given input text, returns a scalar estimate of the sentiment of that text.
Polarity is float which lies in the range of [-1,1] where 1 means positive statement and -1 means a negative statement.
"""
# return indicoio.sentiment_hq(text)
return TextBlob(text).sentiment.polarity
def include_sentiment(data):
data['title_selftext'] = data['title'] + ' ' + data['selftext']
data['sentiment_val'] = data['title_selftext'].apply(text_sentiment)
data = data.drop(['title_selftext'], 1)
return data
def text_readability_score(text):
return textstat.flesch_reading_ease(text)
def include_readability_score(data):
data['title_selftext'] = data['title'] + ' ' + data['selftext']
data['readability_score'] = data['title_selftext'].apply(text_readability_score)
data = data.drop(['title_selftext'], 1)
return data
def preprocess(data):
t0 = time.time()
data = lowercase(data)
t1 = time.time()
if TIME_DEBUG:
print('Lowercasing took time: {}'.format(t1-t0))
t0 = time.time()
if INCLUDE_SENTIMENT_FEATURE:
data = include_sentiment(data)
if TIME_DEBUG:
print('Sentiment calculations took time: {}'.format(time.time()-t0))
t0 = time.time()
if INCLUDE_READABILITY_FEATURE:
data = include_readability_score(data)
if TIME_DEBUG:
print('Readability score calculations took time: {}'.format(time.time()-t0))
# t0 = time.time()
# data = remove_nums(data)
# t1 = time.time()
# if TIME_DEBUG:
# print('That took time: {}'.format(t1-t0))
"""
t0 = time.time()
data = remove_tags_puncts_whitespaces(data)
t1 = time.time()
if TIME_DEBUG:
print('Removing punctuations took time: {}'.format(t1-t0))
"""
"""
t0 = time.time()
data = stem_data(data)
t1 = time.time()
if DEBUG:
print('Stemming took time: {}'.format(t1-t0))
"""
"""
t0 = time.time()
data = lemmatize_data(data)
t1 = time.time()
if DEBUG:
print('Lemmatization took time: {}'.format(t1-t0))
"""
return data
def main():
if DEBUG:
print('Reading the data')
t0 = time.time()
df = pd.read_csv(DATA, sep='\t')
# df = pd.read_csv(DATA, sep='\t', nrows=1000)
preprocessed_df = preprocess(df)
# preprocessed_df.to_csv(OUTPUT_FILE_NAME, sep='\t', index=False)
t1 = time.time()
if TIME_DEBUG:
print('Total time taken: {}'.format(t1-t0))
if __name__ == '__main__':
main()
|
[
"textstat.flesch_reading_ease",
"nltk.stem.PorterStemmer",
"nltk.stem.WordNetLemmatizer",
"pandas.read_csv",
"time.time",
"textblob.TextBlob",
"nltk.download",
"re.sub",
"nltk.tokenize.word_tokenize",
"re.compile"
] |
[((288, 312), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (301, 312), False, 'import nltk\n'), ((1202, 1228), 're.compile', 're.compile', (['""">|&|<"""'], {}), "('>|&|<')\n", (1212, 1228), False, 'import re\n'), ((1309, 1340), 're.compile', 're.compile', (['"""[<{\\\\[].*?[>}\\\\]]"""'], {}), "('[<{\\\\[].*?[>}\\\\]]')\n", (1319, 1340), False, 'import re\n'), ((1440, 1576), 're.sub', 're.sub', (['"""((?P<a>\\\\s)\'(?P<x>\\\\w))|((?P<y>\\\\w)\'(?P<b>\\\\s))|((?P<c>\\\\s)\'(?P<d>\\\\s))"""', '"""\\\\g<a>\\\\g<x>\\\\g<y>\\\\g<b>\\\\g<c>\\\\g<d>"""', 'text'], {}), '(\n "((?P<a>\\\\s)\'(?P<x>\\\\w))|((?P<y>\\\\w)\'(?P<b>\\\\s))|((?P<c>\\\\s)\'(?P<d>\\\\s))",\n \'\\\\g<a>\\\\g<x>\\\\g<y>\\\\g<b>\\\\g<c>\\\\g<d>\', text)\n', (1446, 1576), False, 'import re\n'), ((1566, 1703), 're.sub', 're.sub', (['"""((?P<a>\\\\s)\'(?P<x>\\\\w))|((?P<y>\\\\w)\'(?P<b>\\\\s))|((?P<c>\\\\s)\'(?P<d>\\\\s))"""', '"""\\\\g<a>\\\\g<x>\\\\g<y>\\\\g<b>\\\\g<c>\\\\g<d>"""', 'text1'], {}), '(\n "((?P<a>\\\\s)\'(?P<x>\\\\w))|((?P<y>\\\\w)\'(?P<b>\\\\s))|((?P<c>\\\\s)\'(?P<d>\\\\s))",\n \'\\\\g<a>\\\\g<x>\\\\g<y>\\\\g<b>\\\\g<c>\\\\g<d>\', text1)\n', (1572, 1703), False, 'import re\n'), ((2413, 2438), 'nltk.stem.PorterStemmer', 'nltk.stem.PorterStemmer', ([], {}), '()\n', (2436, 2438), False, 'import nltk\n'), ((2457, 2490), 'nltk.tokenize.word_tokenize', 'nltk.tokenize.word_tokenize', (['text'], {}), '(text)\n', (2484, 2490), False, 'import nltk\n'), ((2946, 2975), 'nltk.stem.WordNetLemmatizer', 'nltk.stem.WordNetLemmatizer', ([], {}), '()\n', (2973, 2975), False, 'import nltk\n'), ((2992, 3025), 'nltk.tokenize.word_tokenize', 'nltk.tokenize.word_tokenize', (['text'], {}), '(text)\n', (3019, 3025), False, 'import nltk\n'), ((4019, 4053), 'textstat.flesch_reading_ease', 'textstat.flesch_reading_ease', (['text'], {}), '(text)\n', (4047, 4053), False, 'import textstat\n'), ((4325, 4336), 'time.time', 'time.time', ([], {}), '()\n', (4334, 4336), False, 'import time\n'), ((4369, 4380), 'time.time', 'time.time', ([], {}), '()\n', (4378, 4380), False, 'import time\n'), ((4455, 4466), 'time.time', 'time.time', ([], {}), '()\n', (4464, 4466), False, 'import time\n'), ((4627, 4638), 'time.time', 'time.time', ([], {}), '()\n', (4636, 4638), False, 'import time\n'), ((5456, 5467), 'time.time', 'time.time', ([], {}), '()\n', (5465, 5467), False, 'import time\n'), ((5475, 5502), 'pandas.read_csv', 'pd.read_csv', (['DATA'], {'sep': '"""\t"""'}), "(DATA, sep='\\t')\n", (5486, 5502), True, 'import pandas as pd\n'), ((5662, 5673), 'time.time', 'time.time', ([], {}), '()\n', (5671, 5673), False, 'import time\n'), ((3721, 3735), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (3729, 3735), False, 'from textblob import TextBlob\n'), ((4603, 4614), 'time.time', 'time.time', ([], {}), '()\n', (4612, 4614), False, 'import time\n'), ((4793, 4804), 'time.time', 'time.time', ([], {}), '()\n', (4802, 4804), False, 'import time\n')]
|
import re
from binascii import unhexlify
from datetime import datetime, timedelta, date
from decimal import Decimal
from email.mime.text import MIMEText
from fractions import Fraction
from uuid import UUID
import pytest
from cbor2.compat import timezone
from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder
from cbor2.types import CBORTag, undefined, CBORSimpleValue
@pytest.mark.parametrize('value, expected', [
(0, '00'),
(1, '01'),
(10, '0a'),
(23, '17'),
(24, '1818'),
(100, '1864'),
(1000, '1903e8'),
(1000000, '1a000f4240'),
(1000000000000, '1b000000e8d4a51000'),
(18446744073709551615, '1bffffffffffffffff'),
(18446744073709551616, 'c249010000000000000000'),
(-18446744073709551616, '3bffffffffffffffff'),
(-18446744073709551617, 'c349010000000000000000'),
(-1, '20'),
(-10, '29'),
(-100, '3863'),
(-1000, '3903e7')
])
def test_integer(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
@pytest.mark.parametrize('value, expected', [
(1.1, 'fb3ff199999999999a'),
(1.0e+300, 'fb7e37e43c8800759c'),
(-4.1, 'fbc010666666666666'),
(float('inf'), 'f97c00'),
(float('nan'), 'f97e00'),
(float('-inf'), 'f9fc00')
])
def test_float(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
@pytest.mark.parametrize('value, expected', [
(b'', '40'),
(b'\x01\x02\x03\x04', '4401020304'),
])
def test_bytestring(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
def test_bytearray():
expected = unhexlify('4401020304')
assert dumps(bytearray(b'\x01\x02\x03\x04')) == expected
@pytest.mark.parametrize('value, expected', [
(u'', '60'),
(u'a', '6161'),
(u'IETF', '6449455446'),
(u'"\\', '62225c'),
(u'\u00fc', '62c3bc'),
(u'\u6c34', '63e6b0b4')
])
def test_string(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
@pytest.mark.parametrize('value, expected', [
(False, 'f4'),
(True, 'f5'),
(None, 'f6'),
(undefined, 'f7')
], ids=['false', 'true', 'null', 'undefined'])
def test_special(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
@pytest.mark.parametrize('value, expected', [
(CBORSimpleValue(0), 'e0'),
(CBORSimpleValue(2), 'e2'),
(CBORSimpleValue(19), 'f3'),
(CBORSimpleValue(32), 'f820')
])
def test_simple_value(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
#
# Tests for extension tags
#
@pytest.mark.parametrize('value, as_timestamp, expected', [
(datetime(2013, 3, 21, 20, 4, 0, tzinfo=timezone.utc), False,
'c074323031332d30332d32315432303a30343a30305a'),
(datetime(2013, 3, 21, 20, 4, 0, 380841, tzinfo=timezone.utc), False,
'c0781b323031332d30332d32315432303a30343a30302e3338303834315a'),
(datetime(2013, 3, 21, 22, 4, 0, tzinfo=timezone(timedelta(hours=2))), False,
'c07819323031332d30332d32315432323a30343a30302b30323a3030'),
(datetime(2013, 3, 21, 20, 4, 0), False, 'c074323031332d30332d32315432303a30343a30305a'),
(datetime(2013, 3, 21, 20, 4, 0, tzinfo=timezone.utc), True, 'c11a514b67b0'),
(datetime(2013, 3, 21, 22, 4, 0, tzinfo=timezone(timedelta(hours=2))), True, 'c11a514b67b0')
], ids=['datetime/utc', 'datetime+micro/utc', 'datetime/eet', 'naive', 'timestamp/utc',
'timestamp/eet'])
def test_datetime(value, as_timestamp, expected):
expected = unhexlify(expected)
assert dumps(value, datetime_as_timestamp=as_timestamp, timezone=timezone.utc) == expected
def test_date():
expected = unhexlify('c074323031332d30332d32315430303a30303a30305a')
assert dumps(date(2013, 3, 21), timezone=timezone.utc) == expected
def test_naive_datetime():
"""Test that naive datetimes are gracefully rejected when no timezone has been set."""
exc = pytest.raises(CBOREncodeError, dumps, datetime(2013, 3, 21))
exc.match('naive datetime encountered and no default timezone has been set')
@pytest.mark.parametrize('value, expected', [
(Decimal('14.123'), 'c4822219372b'),
(Decimal('NaN'), 'f97e00'),
(Decimal('Infinity'), 'f97c00'),
(Decimal('-Infinity'), 'f9fc00')
], ids=['normal', 'nan', 'inf', 'neginf'])
def test_decimal(value, expected):
expected = unhexlify(expected)
assert dumps(value) == expected
def test_rational():
expected = unhexlify('d81e820205')
assert dumps(Fraction(2, 5)) == expected
def test_regex():
expected = unhexlify('d8236d68656c6c6f2028776f726c6429')
assert dumps(re.compile(u'hello (world)')) == expected
def test_mime():
expected = unhexlify(
'd824787b436f6e74656e742d547970653a20746578742f706c61696e3b20636861727365743d2269736f2d38'
'3835392d3135220a4d494d452d56657273696f6e3a20312e300a436f6e74656e742d5472616e736665722d456'
'e636f64696e673a2071756f7465642d7072696e7461626c650a0a48656c6c6f203d413475726f')
message = MIMEText(u'Hello \u20acuro', 'plain', 'iso-8859-15')
assert dumps(message) == expected
def test_uuid():
expected = unhexlify('d825505eaffac8b51e480581277fdcc7842faf')
assert dumps(UUID(hex='5eaffac8b51e480581277fdcc7842faf')) == expected
def test_custom_tag():
expected = unhexlify('d917706548656c6c6f')
assert dumps(CBORTag(6000, u'Hello')) == expected
def test_cyclic_array():
"""Test that an array that contains itself can be serialized with value sharing enabled."""
expected = unhexlify('d81c81d81c81d81d00')
a = [[]]
a[0].append(a)
assert dumps(a, value_sharing=True) == expected
def test_cyclic_array_nosharing():
"""Test that serializing a cyclic structure w/o value sharing will blow up gracefully."""
a = []
a.append(a)
exc = pytest.raises(CBOREncodeError, dumps, a)
exc.match('cyclic data structure detected but value sharing is disabled')
def test_cyclic_map():
"""Test that a dict that contains itself can be serialized with value sharing enabled."""
expected = unhexlify('d81ca100d81d00')
a = {}
a[0] = a
assert dumps(a, value_sharing=True) == expected
def test_cyclic_map_nosharing():
"""Test that serializing a cyclic structure w/o value sharing will fail gracefully."""
a = {}
a[0] = a
exc = pytest.raises(CBOREncodeError, dumps, a)
exc.match('cyclic data structure detected but value sharing is disabled')
@pytest.mark.parametrize('value_sharing, expected', [
(False, '828080'),
(True, 'd81c82d81c80d81d01')
], ids=['nosharing', 'sharing'])
def test_not_cyclic_same_object(value_sharing, expected):
"""Test that the same shareable object can be included twice if not in a cyclic structure."""
expected = unhexlify(expected)
a = []
b = [a, a]
assert dumps(b, value_sharing=value_sharing) == expected
def test_unsupported_type():
exc = pytest.raises(CBOREncodeError, dumps, lambda: None)
exc.match('cannot serialize type function')
def test_default():
class DummyType(object):
def __init__(self, state):
self.state = state
def default_encoder(encoder, value):
encoder.encode(value.state)
expected = unhexlify('820305')
obj = DummyType([3, 5])
serialized = dumps(obj, default=default_encoder)
assert serialized == expected
def test_default_cyclic():
class DummyType(object):
def __init__(self, value=None):
self.value = value
@shareable_encoder
def default_encoder(encoder, value):
state = encoder.encode_to_bytes(value.value)
encoder.encode(CBORTag(3000, state))
expected = unhexlify('D81CD90BB849D81CD90BB843D81D00')
obj = DummyType()
obj2 = DummyType(obj)
obj.value = obj2
serialized = dumps(obj, value_sharing=True, default=default_encoder)
assert serialized == expected
def test_dump_to_file(tmpdir):
path = tmpdir.join('testdata.cbor')
with path.open('wb') as fp:
dump([1, 10], fp)
assert path.read_binary() == b'\x82\x01\x0a'
|
[
"decimal.Decimal",
"email.mime.text.MIMEText",
"datetime.date",
"cbor2.types.CBORTag",
"datetime.datetime",
"pytest.raises",
"binascii.unhexlify",
"cbor2.encoder.dumps",
"cbor2.encoder.dump",
"cbor2.types.CBORSimpleValue",
"uuid.UUID",
"datetime.timedelta",
"pytest.mark.parametrize",
"fractions.Fraction",
"re.compile"
] |
[((393, 875), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, expected"""', "[(0, '00'), (1, '01'), (10, '0a'), (23, '17'), (24, '1818'), (100, '1864'),\n (1000, '1903e8'), (1000000, '1a000f4240'), (1000000000000,\n '1b000000e8d4a51000'), (18446744073709551615, '1bffffffffffffffff'), (\n 18446744073709551616, 'c249010000000000000000'), (-18446744073709551616,\n '3bffffffffffffffff'), (-18446744073709551617, 'c349010000000000000000'\n ), (-1, '20'), (-10, '29'), (-100, '3863'), (-1000, '3903e7')]"], {}), "('value, expected', [(0, '00'), (1, '01'), (10, '0a'\n ), (23, '17'), (24, '1818'), (100, '1864'), (1000, '1903e8'), (1000000,\n '1a000f4240'), (1000000000000, '1b000000e8d4a51000'), (\n 18446744073709551615, '1bffffffffffffffff'), (18446744073709551616,\n 'c249010000000000000000'), (-18446744073709551616, '3bffffffffffffffff'\n ), (-18446744073709551617, 'c349010000000000000000'), (-1, '20'), (-10,\n '29'), (-100, '3863'), (-1000, '3903e7')])\n", (416, 875), False, 'import pytest\n'), ((1378, 1477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, expected"""', "[(b'', '40'), (b'\\x01\\x02\\x03\\x04', '4401020304')]"], {}), "('value, expected', [(b'', '40'), (\n b'\\x01\\x02\\x03\\x04', '4401020304')])\n", (1401, 1477), False, 'import pytest\n'), ((1720, 1885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, expected"""', '[(u\'\', \'60\'), (u\'a\', \'6161\'), (u\'IETF\', \'6449455446\'), (u\'"\\\\\', \'62225c\'),\n (u\'ü\', \'62c3bc\'), (u\'水\', \'63e6b0b4\')]'], {}), '(\'value, expected\', [(u\'\', \'60\'), (u\'a\', \'6161\'), (\n u\'IETF\', \'6449455446\'), (u\'"\\\\\', \'62225c\'), (u\'ü\', \'62c3bc\'), (u\'水\',\n \'63e6b0b4\')])\n', (1743, 1885), False, 'import pytest\n'), ((2021, 2181), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, expected"""', "[(False, 'f4'), (True, 'f5'), (None, 'f6'), (undefined, 'f7')]"], {'ids': "['false', 'true', 'null', 'undefined']"}), "('value, expected', [(False, 'f4'), (True, 'f5'), (\n None, 'f6'), (undefined, 'f7')], ids=['false', 'true', 'null', 'undefined']\n )\n", (2044, 2181), False, 'import pytest\n'), ((6474, 6610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value_sharing, expected"""', "[(False, '828080'), (True, 'd81c82d81c80d81d01')]"], {'ids': "['nosharing', 'sharing']"}), "('value_sharing, expected', [(False, '828080'), (\n True, 'd81c82d81c80d81d01')], ids=['nosharing', 'sharing'])\n", (6497, 6610), False, 'import pytest\n'), ((969, 988), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (978, 988), False, 'from binascii import unhexlify\n'), ((1319, 1338), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (1328, 1338), False, 'from binascii import unhexlify\n'), ((1537, 1556), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (1546, 1556), False, 'from binascii import unhexlify\n'), ((1632, 1655), 'binascii.unhexlify', 'unhexlify', (['"""4401020304"""'], {}), "('4401020304')\n", (1641, 1655), False, 'from binascii import unhexlify\n'), ((1962, 1981), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (1971, 1981), False, 'from binascii import unhexlify\n'), ((2240, 2259), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (2249, 2259), False, 'from binascii import unhexlify\n'), ((2533, 2552), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (2542, 2552), False, 'from binascii import unhexlify\n'), ((3547, 3566), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (3556, 3566), False, 'from binascii import unhexlify\n'), ((3696, 3753), 'binascii.unhexlify', 'unhexlify', (['"""c074323031332d30332d32315430303a30303a30305a"""'], {}), "('c074323031332d30332d32315430303a30303a30305a')\n", (3705, 3753), False, 'from binascii import unhexlify\n'), ((4385, 4404), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (4394, 4404), False, 'from binascii import unhexlify\n'), ((4479, 4502), 'binascii.unhexlify', 'unhexlify', (['"""d81e820205"""'], {}), "('d81e820205')\n", (4488, 4502), False, 'from binascii import unhexlify\n'), ((4583, 4628), 'binascii.unhexlify', 'unhexlify', (['"""d8236d68656c6c6f2028776f726c6429"""'], {}), "('d8236d68656c6c6f2028776f726c6429')\n", (4592, 4628), False, 'from binascii import unhexlify\n'), ((4722, 4999), 'binascii.unhexlify', 'unhexlify', (['"""d824787b436f6e74656e742d547970653a20746578742f706c61696e3b20636861727365743d2269736f2d383835392d3135220a4d494d452d56657273696f6e3a20312e300a436f6e74656e742d5472616e736665722d456e636f64696e673a2071756f7465642d7072696e7461626c650a0a48656c6c6f203d413475726f"""'], {}), "(\n 'd824787b436f6e74656e742d547970653a20746578742f706c61696e3b20636861727365743d2269736f2d383835392d3135220a4d494d452d56657273696f6e3a20312e300a436f6e74656e742d5472616e736665722d456e636f64696e673a2071756f7465642d7072696e7461626c650a0a48656c6c6f203d413475726f'\n )\n", (4731, 4999), False, 'from binascii import unhexlify\n'), ((5035, 5082), 'email.mime.text.MIMEText', 'MIMEText', (['u"""Hello €uro"""', '"""plain"""', '"""iso-8859-15"""'], {}), "(u'Hello €uro', 'plain', 'iso-8859-15')\n", (5043, 5082), False, 'from email.mime.text import MIMEText\n'), ((5160, 5211), 'binascii.unhexlify', 'unhexlify', (['"""d825505eaffac8b51e480581277fdcc7842faf"""'], {}), "('d825505eaffac8b51e480581277fdcc7842faf')\n", (5169, 5211), False, 'from binascii import unhexlify\n'), ((5327, 5358), 'binascii.unhexlify', 'unhexlify', (['"""d917706548656c6c6f"""'], {}), "('d917706548656c6c6f')\n", (5336, 5358), False, 'from binascii import unhexlify\n'), ((5551, 5582), 'binascii.unhexlify', 'unhexlify', (['"""d81c81d81c81d81d00"""'], {}), "('d81c81d81c81d81d00')\n", (5560, 5582), False, 'from binascii import unhexlify\n'), ((5835, 5875), 'pytest.raises', 'pytest.raises', (['CBOREncodeError', 'dumps', 'a'], {}), '(CBOREncodeError, dumps, a)\n', (5848, 5875), False, 'import pytest\n'), ((6088, 6115), 'binascii.unhexlify', 'unhexlify', (['"""d81ca100d81d00"""'], {}), "('d81ca100d81d00')\n", (6097, 6115), False, 'from binascii import unhexlify\n'), ((6352, 6392), 'pytest.raises', 'pytest.raises', (['CBOREncodeError', 'dumps', 'a'], {}), '(CBOREncodeError, dumps, a)\n', (6365, 6392), False, 'import pytest\n'), ((6787, 6806), 'binascii.unhexlify', 'unhexlify', (['expected'], {}), '(expected)\n', (6796, 6806), False, 'from binascii import unhexlify\n'), ((6935, 6987), 'pytest.raises', 'pytest.raises', (['CBOREncodeError', 'dumps', '(lambda : None)'], {}), '(CBOREncodeError, dumps, lambda : None)\n', (6948, 6987), False, 'import pytest\n'), ((7246, 7265), 'binascii.unhexlify', 'unhexlify', (['"""820305"""'], {}), "('820305')\n", (7255, 7265), False, 'from binascii import unhexlify\n'), ((7311, 7346), 'cbor2.encoder.dumps', 'dumps', (['obj'], {'default': 'default_encoder'}), '(obj, default=default_encoder)\n', (7316, 7346), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((7689, 7732), 'binascii.unhexlify', 'unhexlify', (['"""D81CD90BB849D81CD90BB843D81D00"""'], {}), "('D81CD90BB849D81CD90BB843D81D00')\n", (7698, 7732), False, 'from binascii import unhexlify\n'), ((7819, 7874), 'cbor2.encoder.dumps', 'dumps', (['obj'], {'value_sharing': '(True)', 'default': 'default_encoder'}), '(obj, value_sharing=True, default=default_encoder)\n', (7824, 7874), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((1000, 1012), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (1005, 1012), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((1350, 1362), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (1355, 1362), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((1568, 1580), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (1573, 1580), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((1993, 2005), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (1998, 2005), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((2271, 2283), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (2276, 2283), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((2564, 2576), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (2569, 2576), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((3578, 3649), 'cbor2.encoder.dumps', 'dumps', (['value'], {'datetime_as_timestamp': 'as_timestamp', 'timezone': 'timezone.utc'}), '(value, datetime_as_timestamp=as_timestamp, timezone=timezone.utc)\n', (3583, 3649), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((3993, 4014), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(21)'], {}), '(2013, 3, 21)\n', (4001, 4014), False, 'from datetime import datetime, timedelta, date\n'), ((4416, 4428), 'cbor2.encoder.dumps', 'dumps', (['value'], {}), '(value)\n', (4421, 4428), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((5099, 5113), 'cbor2.encoder.dumps', 'dumps', (['message'], {}), '(message)\n', (5104, 5113), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((5626, 5654), 'cbor2.encoder.dumps', 'dumps', (['a'], {'value_sharing': '(True)'}), '(a, value_sharing=True)\n', (5631, 5654), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((6151, 6179), 'cbor2.encoder.dumps', 'dumps', (['a'], {'value_sharing': '(True)'}), '(a, value_sharing=True)\n', (6156, 6179), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((6844, 6881), 'cbor2.encoder.dumps', 'dumps', (['b'], {'value_sharing': 'value_sharing'}), '(b, value_sharing=value_sharing)\n', (6849, 6881), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((8022, 8039), 'cbor2.encoder.dump', 'dump', (['[1, 10]', 'fp'], {}), '([1, 10], fp)\n', (8026, 8039), False, 'from cbor2.encoder import dumps, CBOREncodeError, dump, shareable_encoder\n'), ((2349, 2367), 'cbor2.types.CBORSimpleValue', 'CBORSimpleValue', (['(0)'], {}), '(0)\n', (2364, 2367), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((2381, 2399), 'cbor2.types.CBORSimpleValue', 'CBORSimpleValue', (['(2)'], {}), '(2)\n', (2396, 2399), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((2413, 2432), 'cbor2.types.CBORSimpleValue', 'CBORSimpleValue', (['(19)'], {}), '(19)\n', (2428, 2432), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((2446, 2465), 'cbor2.types.CBORSimpleValue', 'CBORSimpleValue', (['(32)'], {}), '(32)\n', (2461, 2465), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((2688, 2740), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(21)', '(20)', '(4)', '(0)'], {'tzinfo': 'timezone.utc'}), '(2013, 3, 21, 20, 4, 0, tzinfo=timezone.utc)\n', (2696, 2740), False, 'from datetime import datetime, timedelta, date\n'), ((2808, 2868), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(21)', '(20)', '(4)', '(0)', '(380841)'], {'tzinfo': 'timezone.utc'}), '(2013, 3, 21, 20, 4, 0, 380841, tzinfo=timezone.utc)\n', (2816, 2868), False, 'from datetime import datetime, timedelta, date\n'), ((3100, 3131), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(21)', '(20)', '(4)', '(0)'], {}), '(2013, 3, 21, 20, 4, 0)\n', (3108, 3131), False, 'from datetime import datetime, timedelta, date\n'), ((3194, 3246), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(21)', '(20)', '(4)', '(0)'], {'tzinfo': 'timezone.utc'}), '(2013, 3, 21, 20, 4, 0, tzinfo=timezone.utc)\n', (3202, 3246), False, 'from datetime import datetime, timedelta, date\n'), ((3771, 3788), 'datetime.date', 'date', (['(2013)', '(3)', '(21)'], {}), '(2013, 3, 21)\n', (3775, 3788), False, 'from datetime import datetime, timedelta, date\n'), ((4150, 4167), 'decimal.Decimal', 'Decimal', (['"""14.123"""'], {}), "('14.123')\n", (4157, 4167), False, 'from decimal import Decimal\n'), ((4191, 4205), 'decimal.Decimal', 'Decimal', (['"""NaN"""'], {}), "('NaN')\n", (4198, 4205), False, 'from decimal import Decimal\n'), ((4223, 4242), 'decimal.Decimal', 'Decimal', (['"""Infinity"""'], {}), "('Infinity')\n", (4230, 4242), False, 'from decimal import Decimal\n'), ((4260, 4280), 'decimal.Decimal', 'Decimal', (['"""-Infinity"""'], {}), "('-Infinity')\n", (4267, 4280), False, 'from decimal import Decimal\n'), ((4520, 4534), 'fractions.Fraction', 'Fraction', (['(2)', '(5)'], {}), '(2, 5)\n', (4528, 4534), False, 'from fractions import Fraction\n'), ((4646, 4674), 're.compile', 're.compile', (['u"""hello (world)"""'], {}), "(u'hello (world)')\n", (4656, 4674), False, 'import re\n'), ((5229, 5273), 'uuid.UUID', 'UUID', ([], {'hex': '"""5eaffac8b51e480581277fdcc7842faf"""'}), "(hex='5eaffac8b51e480581277fdcc7842faf')\n", (5233, 5273), False, 'from uuid import UUID\n'), ((5376, 5399), 'cbor2.types.CBORTag', 'CBORTag', (['(6000)', 'u"""Hello"""'], {}), "(6000, u'Hello')\n", (5383, 5399), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((7651, 7671), 'cbor2.types.CBORTag', 'CBORTag', (['(3000)', 'state'], {}), '(3000, state)\n', (7658, 7671), False, 'from cbor2.types import CBORTag, undefined, CBORSimpleValue\n'), ((3000, 3018), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (3009, 3018), False, 'from datetime import datetime, timedelta, date\n'), ((3324, 3342), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (3333, 3342), False, 'from datetime import datetime, timedelta, date\n')]
|
import pytest
from opentrons.protocol_api.module_validation_and_errors import (
validate_heater_shaker_temperature,
validate_heater_shaker_speed,
InvalidTargetTemperatureError,
InvalidTargetSpeedError,
)
@pytest.mark.parametrize("valid_celsius_value", [37.0, 37.1, 50, 94.99, 95])
def test_validate_heater_shaker_temperature(valid_celsius_value: float) -> None:
"""It should return the validated temperature value."""
validated = validate_heater_shaker_temperature(celsius=valid_celsius_value)
assert validated == valid_celsius_value
@pytest.mark.parametrize("invalid_celsius_value", [-1, 0, 36.99, 95.01])
def test_validate_heater_shaker_temperature_raises(
invalid_celsius_value: float,
) -> None:
"""It should raise an error for invalid temperature values."""
with pytest.raises(InvalidTargetTemperatureError):
validate_heater_shaker_temperature(celsius=invalid_celsius_value)
@pytest.mark.parametrize("valid_rpm_value", [200, 201, 1000, 2999, 3000])
def test_validate_heater_shaker_speed(valid_rpm_value: int) -> None:
"""It should return the validated speed value."""
validated = validate_heater_shaker_speed(rpm=valid_rpm_value)
assert validated == valid_rpm_value
@pytest.mark.parametrize("invalid_rpm_value", [0, 199, 3001])
def test_validate_heater_shaker_speed_raises(invalid_rpm_value: int) -> None:
"""It should raise an error for invalid speed values."""
with pytest.raises(InvalidTargetSpeedError):
validate_heater_shaker_speed(rpm=invalid_rpm_value)
|
[
"pytest.mark.parametrize",
"pytest.raises",
"opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_speed",
"opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_temperature"
] |
[((224, 299), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""valid_celsius_value"""', '[37.0, 37.1, 50, 94.99, 95]'], {}), "('valid_celsius_value', [37.0, 37.1, 50, 94.99, 95])\n", (247, 299), False, 'import pytest\n'), ((568, 639), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_celsius_value"""', '[-1, 0, 36.99, 95.01]'], {}), "('invalid_celsius_value', [-1, 0, 36.99, 95.01])\n", (591, 639), False, 'import pytest\n'), ((936, 1008), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""valid_rpm_value"""', '[200, 201, 1000, 2999, 3000]'], {}), "('valid_rpm_value', [200, 201, 1000, 2999, 3000])\n", (959, 1008), False, 'import pytest\n'), ((1241, 1301), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_rpm_value"""', '[0, 199, 3001]'], {}), "('invalid_rpm_value', [0, 199, 3001])\n", (1264, 1301), False, 'import pytest\n'), ((457, 520), 'opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_temperature', 'validate_heater_shaker_temperature', ([], {'celsius': 'valid_celsius_value'}), '(celsius=valid_celsius_value)\n', (491, 520), False, 'from opentrons.protocol_api.module_validation_and_errors import validate_heater_shaker_temperature, validate_heater_shaker_speed, InvalidTargetTemperatureError, InvalidTargetSpeedError\n'), ((1148, 1197), 'opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_speed', 'validate_heater_shaker_speed', ([], {'rpm': 'valid_rpm_value'}), '(rpm=valid_rpm_value)\n', (1176, 1197), False, 'from opentrons.protocol_api.module_validation_and_errors import validate_heater_shaker_temperature, validate_heater_shaker_speed, InvalidTargetTemperatureError, InvalidTargetSpeedError\n'), ((813, 857), 'pytest.raises', 'pytest.raises', (['InvalidTargetTemperatureError'], {}), '(InvalidTargetTemperatureError)\n', (826, 857), False, 'import pytest\n'), ((867, 932), 'opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_temperature', 'validate_heater_shaker_temperature', ([], {'celsius': 'invalid_celsius_value'}), '(celsius=invalid_celsius_value)\n', (901, 932), False, 'from opentrons.protocol_api.module_validation_and_errors import validate_heater_shaker_temperature, validate_heater_shaker_speed, InvalidTargetTemperatureError, InvalidTargetSpeedError\n'), ((1450, 1488), 'pytest.raises', 'pytest.raises', (['InvalidTargetSpeedError'], {}), '(InvalidTargetSpeedError)\n', (1463, 1488), False, 'import pytest\n'), ((1498, 1549), 'opentrons.protocol_api.module_validation_and_errors.validate_heater_shaker_speed', 'validate_heater_shaker_speed', ([], {'rpm': 'invalid_rpm_value'}), '(rpm=invalid_rpm_value)\n', (1526, 1549), False, 'from opentrons.protocol_api.module_validation_and_errors import validate_heater_shaker_temperature, validate_heater_shaker_speed, InvalidTargetTemperatureError, InvalidTargetSpeedError\n')]
|
from jupyterthemes import jtplot
import numpy as np
import os
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.ndimage import filters
from textwrap import wrap
import torch
import vectorized_agents as va
import vectorized_env as ve
jtplot.style()
DEVICE = torch.device('cuda')
if DEVICE == torch.device('cpu'):
os.environ['OMP_NUM_THREADS'] = '4'
n_envs = 50
else:
os.environ['OMP_NUM_THREADS'] = '8'
n_envs = 200
ENV_KWARGS = dict(
n_envs=n_envs,
env_device=DEVICE,
out_device=DEVICE,
reward_type=ve.EVERY_STEP_EV_ZEROSUM
)
all_ensemble_names = ['a3c_agent_small_8_32', 'awac_agent_small_8_64_32_1_norm', 'a3c_agent_small_8_64_32_2']
PLAYER_1s = [
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=False, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=True, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=True, deterministic_policy=True),
### LEFT OFF HERE:
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=True, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=True, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False)
]
PLAYER_2s = [
va.BasicThompsonSampling(),
va.PullVegasSlotMachines(),
va.PullVegasSlotMachinesImproved(),
va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', deterministic_policy=True),
va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False),
#va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
#va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
]
def wrap_title(title):
return '\n'.join(wrap(title, 55, break_long_words=True))
if __name__ == '__main__':
for player_1 in PLAYER_1s:
for player_2 in PLAYER_2s:
if player_1 == player_2:
continue
p1_score, rewards_over_time = va.run_vectorized_vs(player_1, player_2, display_out=True, **ENV_KWARGS)
rewards_over_time = rewards_over_time.cpu().numpy().squeeze()
cumulative_ymax = 10
expected_ymax = 0.10
q = np.linspace(0., 100., 11)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
n_rows = 2
n_cols = 2
fig, axes = plt.subplots(n_rows, n_cols, figsize=(8 * n_cols, 8 * n_rows))
fig_title = (f'{player_1.name} -vs- {player_2.name}\n'
f'{p1_score * 100.:.2f}% winrate over {rewards_over_time.shape[1]} games')
fig.suptitle(wrap_title(fig_title))
axes = axes.ravel()
axes[0].plot(np.cumsum(rewards_over_time.mean(axis=1)))
axes[0].set_ylim((-cumulative_ymax, cumulative_ymax))
axes[0].set_title(wrap_title(f"{player_1.name} cumulative expected advantage"))
window_size = 50
axes[1].plot(filters.uniform_filter1d(rewards_over_time.mean(axis=1), window_size, mode='constant'))
axes[1].set_ylim((-expected_ymax, expected_ymax))
axes[1].set_title(wrap_title(f"{player_1.name} per-step expected advantage"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(np.cumsum(rewards_over_time,
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[2].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[2].set_ylim((-cumulative_ymax * 5, cumulative_ymax * 5))
if len(q) <= 5:
axes[2].legend()
axes[2].set_title(wrap_title(f"{player_1.name} cumulative expected advantage (percentiles)"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(filters.uniform_filter1d(rewards_over_time,
window_size * 5,
mode='reflect',
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[3].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[3].set_ylim((-expected_ymax, expected_ymax))
if len(q) <= 5:
axes[3].legend()
axes[3].set_title(wrap_title(f"{player_1.name} per-step expected advantage over time (percentiles)"))
plt.tight_layout(rect=[0., 0., 1., 0.9])
p_names_abbrev = []
for p in (player_1, player_2):
if type(p) == va.SavedRLAgent:
p_names_abbrev.append(p.agent_name)
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
elif type(p) in (va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
if type(p) == va.SavedRLAgentEnsemble:
p_names_abbrev.append(f'ensemble_{p.ensemble_name}')
else:
p_names_abbrev.append(f'multiObsEnsemble_{p.ensemble_name}')
if p.ensemble_model.weight_logits:
p_names_abbrev[-1] += '_weight_logits'
else:
p_names_abbrev[-1] += '_weight_probs'
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
else:
p_names_abbrev.append(p.name)
save_fig_title = f'{p_names_abbrev[0]}__{p_names_abbrev[1]}'
if type(player_1) in (va.SavedRLAgent, va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
save_fig_folder = f'saved_figures/{p_names_abbrev[0]}'
else:
save_fig_folder = 'saved_figures'
Path(save_fig_folder).mkdir(exist_ok=True)
fig.savefig(f'{save_fig_folder}/{save_fig_title}.png', dpi=100)
plt.close(fig)
|
[
"matplotlib.pyplot.tight_layout",
"jupyterthemes.jtplot.style",
"vectorized_agents.SavedRLAgent",
"textwrap.wrap",
"vectorized_agents.run_vectorized_vs",
"matplotlib.pyplot.close",
"vectorized_agents.PullVegasSlotMachines",
"matplotlib.pyplot.subplots",
"numpy.cumsum",
"pathlib.Path",
"scipy.ndimage.filters.uniform_filter1d",
"vectorized_agents.SavedRLAgentMultiObsEnsemble",
"torch.device",
"numpy.linspace",
"vectorized_agents.SavedRLAgentEnsemble",
"vectorized_agents.PullVegasSlotMachinesImproved",
"vectorized_agents.BasicThompsonSampling"
] |
[((253, 267), 'jupyterthemes.jtplot.style', 'jtplot.style', ([], {}), '()\n', (265, 267), False, 'from jupyterthemes import jtplot\n'), ((278, 298), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (290, 298), False, 'import torch\n'), ((312, 331), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (324, 331), False, 'import torch\n'), ((1064, 1167), 'vectorized_agents.SavedRLAgentMultiObsEnsemble', 'va.SavedRLAgentMultiObsEnsemble', (['all_ensemble_names'], {'weight_logits': '(False)', 'deterministic_policy': '(True)'}), '(all_ensemble_names, weight_logits=False,\n deterministic_policy=True)\n', (1095, 1167), True, 'import vectorized_agents as va\n'), ((1545, 1647), 'vectorized_agents.SavedRLAgentMultiObsEnsemble', 'va.SavedRLAgentMultiObsEnsemble', (['all_ensemble_names'], {'weight_logits': '(True)', 'deterministic_policy': '(True)'}), '(all_ensemble_names, weight_logits=True,\n deterministic_policy=True)\n', (1576, 1647), True, 'import vectorized_agents as va\n'), ((2333, 2359), 'vectorized_agents.BasicThompsonSampling', 'va.BasicThompsonSampling', ([], {}), '()\n', (2357, 2359), True, 'import vectorized_agents as va\n'), ((2365, 2391), 'vectorized_agents.PullVegasSlotMachines', 'va.PullVegasSlotMachines', ([], {}), '()\n', (2389, 2391), True, 'import vectorized_agents as va\n'), ((2397, 2431), 'vectorized_agents.PullVegasSlotMachinesImproved', 'va.PullVegasSlotMachinesImproved', ([], {}), '()\n', (2429, 2431), True, 'import vectorized_agents as va\n'), ((2437, 2526), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""a3c_agent_small_8_32-790"""'], {'device': 'DEVICE', 'deterministic_policy': '(True)'}), "('a3c_agent_small_8_32-790', device=DEVICE,\n deterministic_policy=True)\n", (2452, 2526), True, 'import vectorized_agents as va\n'), ((2528, 2616), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""awac_agent_small_8_64_32_1_norm_v1-230"""'], {'deterministic_policy': '(True)'}), "('awac_agent_small_8_64_32_1_norm_v1-230',\n deterministic_policy=True)\n", (2543, 2616), True, 'import vectorized_agents as va\n'), ((2618, 2715), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""a3c_agent_small_8_64_32_2_v2-30"""'], {'device': 'DEVICE', 'deterministic_policy': '(False)'}), "('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE,\n deterministic_policy=False)\n", (2633, 2715), True, 'import vectorized_agents as va\n'), ((2833, 2951), 'vectorized_agents.SavedRLAgentEnsemble', 'va.SavedRLAgentEnsemble', (['"""a3c_agent_small_8_64_32_2"""'], {'weight_logits': '(True)', 'device': 'DEVICE', 'deterministic_policy': '(True)'}), "('a3c_agent_small_8_64_32_2', weight_logits=True,\n device=DEVICE, deterministic_policy=True)\n", (2856, 2951), True, 'import vectorized_agents as va\n'), ((3125, 3163), 'textwrap.wrap', 'wrap', (['title', '(55)'], {'break_long_words': '(True)'}), '(title, 55, break_long_words=True)\n', (3129, 3163), False, 'from textwrap import wrap\n'), ((3364, 3436), 'vectorized_agents.run_vectorized_vs', 'va.run_vectorized_vs', (['player_1', 'player_2'], {'display_out': '(True)'}), '(player_1, player_2, display_out=True, **ENV_KWARGS)\n', (3384, 3436), True, 'import vectorized_agents as va\n'), ((3594, 3621), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(11)'], {}), '(0.0, 100.0, 11)\n', (3605, 3621), True, 'import numpy as np\n'), ((3762, 3824), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'figsize': '(8 * n_cols, 8 * n_rows)'}), '(n_rows, n_cols, figsize=(8 * n_cols, 8 * n_rows))\n', (3774, 3824), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0.0, 0.0, 1.0, 0.9]'}), '(rect=[0.0, 0.0, 1.0, 0.9])\n', (6472, 6499), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8186), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8181, 8186), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4764), 'numpy.cumsum', 'np.cumsum', (['rewards_over_time'], {'axis': '(0)'}), '(rewards_over_time, axis=0)\n', (4737, 4764), True, 'import numpy as np\n'), ((5564, 5652), 'scipy.ndimage.filters.uniform_filter1d', 'filters.uniform_filter1d', (['rewards_over_time', '(window_size * 5)'], {'mode': '"""reflect"""', 'axis': '(0)'}), "(rewards_over_time, window_size * 5, mode='reflect',\n axis=0)\n", (5588, 5652), False, 'from scipy.ndimage import filters\n'), ((8041, 8062), 'pathlib.Path', 'Path', (['save_fig_folder'], {}), '(save_fig_folder)\n', (8045, 8062), False, 'from pathlib import Path\n')]
|
from unittest import TestCase
import numpy as np
import xarray as xr
from xarray.testing import assert_equal, assert_allclose
import numpy.testing as npt
from sklearn_xarray import wrap
from sklearn.base import clone
from sklearn.preprocessing import StandardScaler, KernelCenterer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVC
from tests.mocks import (
DummyEstimator,
DummyTransformer,
ReshapingEstimator,
)
class EstimatorWrapperTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_update_restore_dims(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0, 5)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
X = self.X.var_3d
estimator.fit(X)
X_out = estimator.estimator_.transform(X.values)
dims_new = estimator._update_dims(X, X_out)
Xt = xr.DataArray(X_out, dims=dims_new)
assert dims_new == ["sample", "feature"]
Xr_out = estimator.estimator_.inverse_transform(X_out)
dims_old = estimator._restore_dims(Xt, Xr_out)
assert dims_old == ["sample", "feat_1", "feat_2"]
def test_update_coords(self):
pass
def test_params(self):
estimator = StandardScaler(with_mean=False)
params = estimator.get_params()
params.update(
{"estimator": estimator, "reshapes": None, "sample_dim": None}
)
# check params set in constructor
wrapper = wrap(estimator)
self.assertEqual(wrapper.get_params(), params)
self.assertEqual(wrapper.with_mean, False)
# check params set by attribute
wrapper.with_std = False
params.update({"with_std": False})
self.assertEqual(wrapper.get_params(), params)
# check params set with set_params
wrapper.set_params(copy=False)
params.update({"copy": False})
self.assertEqual(wrapper.get_params(), params)
def test_attributes(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.fit(self.X.var_2d.values)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check DataArray wrapper
estimator.fit(self.X.var_2d)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check Dataset wrapper
estimator.fit(self.X.var_2d.to_dataset())
npt.assert_allclose(
estimator.mean_["var_2d"],
estimator.estimator_dict_["var_2d"].mean_,
)
class PublicInterfaceTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_dummy_transformer(self):
estimator = wrap(DummyTransformer())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.transform(X_ds)
assert_equal(yp, X_ds)
def test_wrapped_transformer(self):
estimator = wrap(StandardScaler())
# test DataArray
X_da = self.X.var_2d
estimator.partial_fit(X_da)
assert_allclose(
X_da, estimator.inverse_transform(estimator.transform(X_da))
)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
estimator.fit(X_ds)
assert_allclose(
X_ds, estimator.inverse_transform(estimator.transform(X_ds))
)
def test_ndim_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_3d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_transformer(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_3d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.transform(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_estimator_singleton(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, 0].drop("feat_1")
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X
y = X_ds.var_2d[:, 0].drop("feat_1")
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_ndim_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 5, 0)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
# test DataArray
X_da = self.X.var_3d
Xt = (
X_da[:, :5, 0]
.drop(["feat_1", "feat_2"])
.rename({"feat_1": "feature"})
)
Xt["dummy"] = Xt.dummy[:, 0]
estimator.fit(X_da)
Xt_da = estimator.transform(X_da)
estimator.inverse_transform(Xt_da)
assert_allclose(Xt_da, Xt)
# test Dataset
X_ds = self.X.var_3d.to_dataset()
y = X_ds.var_3d[:, :5, 0].drop(["feat_1", "feat_2"])
y = y.rename({"feat_1": "feature"})
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_3d
assert_allclose(yp, y)
def test_sample_dim(self):
from sklearn.decomposition import PCA
estimator = wrap(
PCA(n_components=5), reshapes="feat_1", sample_dim="sample"
)
# test DataArray
X_da = self.X.var_2d
Xt_da = estimator.fit_transform(X_da)
Xr_da = estimator.inverse_transform(Xt_da)
npt.assert_equal(Xt_da.shape, (100, 5))
npt.assert_equal(Xr_da.shape, (100, 10))
# test Dataset
X_ds = self.X.var_2d.to_dataset()
Xt = estimator.fit_transform(X_ds)
npt.assert_equal(Xt.var_2d.shape, (100, 5))
def test_score(self):
from sklearn.linear_model import LinearRegression
estimator = wrap(LinearRegression, reshapes="feat_1")
# test DataArray
X_da = self.X.var_2d
y = np.random.random(100)
estimator.fit(X_da, y)
estimator.score(X_da, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
wrapper = estimator.fit(X_ds, y)
wrapper.score(X_ds, y)
def test_partial_fit(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.partial_fit(self.X.var_2d.values)
assert hasattr(estimator, "mean_")
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
# check DataArray wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
assert hasattr(estimator, "mean_")
# check Dataset wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d.to_dataset())
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
assert hasattr(estimator, "mean_")
def test_classifier():
lr = wrap(LogisticRegression)
# wrappers don't pass check_estimator anymore because estimators
# "should not set any attribute apart from parameters during init"
assert hasattr(lr, "predict")
assert hasattr(lr, "decision_function")
lr = wrap(LogisticRegression)
assert hasattr(lr, "C")
svc_proba = wrap(SVC(probability=True))
# check_estimator(svc_proba) fails because the wrapper is not excluded
# from tests that are known to fail for SVC...
assert hasattr(svc_proba, "predict_proba")
assert hasattr(svc_proba, "predict_log_proba")
def test_regressor():
lr = wrap(LinearRegression, compat=True)
assert hasattr(lr, "predict")
assert hasattr(lr, "score")
lr = wrap(LinearRegression)
assert hasattr(lr, "normalize")
def test_transformer():
wrap(KernelCenterer, compat=True)
tr = wrap(KernelCenterer)
assert hasattr(tr, "transform")
ss = wrap(StandardScaler)
# check_estimator(ss) fails because the wrapper is not excluded
# from tests that are known to fail for StandardScaler...
assert hasattr(ss, "partial_fit")
assert hasattr(ss, "inverse_transform")
assert hasattr(ss, "fit_transform")
|
[
"sklearn.base.clone",
"xarray.testing.assert_equal",
"sklearn.preprocessing.StandardScaler",
"tests.mocks.ReshapingEstimator",
"tests.mocks.DummyEstimator",
"xarray.testing.assert_allclose",
"numpy.random.random",
"xarray.DataArray",
"numpy.testing.assert_equal",
"sklearn.svm.SVC",
"sklearn.decomposition.PCA",
"numpy.testing.assert_allclose",
"tests.mocks.DummyTransformer",
"sklearn_xarray.wrap"
] |
[((10179, 10203), 'sklearn_xarray.wrap', 'wrap', (['LogisticRegression'], {}), '(LogisticRegression)\n', (10183, 10203), False, 'from sklearn_xarray import wrap\n'), ((10432, 10456), 'sklearn_xarray.wrap', 'wrap', (['LogisticRegression'], {}), '(LogisticRegression)\n', (10436, 10456), False, 'from sklearn_xarray import wrap\n'), ((10788, 10823), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {'compat': '(True)'}), '(LinearRegression, compat=True)\n', (10792, 10823), False, 'from sklearn_xarray import wrap\n'), ((10900, 10922), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {}), '(LinearRegression)\n', (10904, 10922), False, 'from sklearn_xarray import wrap\n'), ((10990, 11023), 'sklearn_xarray.wrap', 'wrap', (['KernelCenterer'], {'compat': '(True)'}), '(KernelCenterer, compat=True)\n', (10994, 11023), False, 'from sklearn_xarray import wrap\n'), ((11034, 11054), 'sklearn_xarray.wrap', 'wrap', (['KernelCenterer'], {}), '(KernelCenterer)\n', (11038, 11054), False, 'from sklearn_xarray import wrap\n'), ((11101, 11121), 'sklearn_xarray.wrap', 'wrap', (['StandardScaler'], {}), '(StandardScaler)\n', (11105, 11121), False, 'from sklearn_xarray import wrap\n'), ((1423, 1457), 'xarray.DataArray', 'xr.DataArray', (['X_out'], {'dims': 'dims_new'}), '(X_out, dims=dims_new)\n', (1435, 1457), True, 'import xarray as xr\n'), ((1784, 1815), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (1798, 1815), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((2025, 2040), 'sklearn_xarray.wrap', 'wrap', (['estimator'], {}), '(estimator)\n', (2029, 2040), False, 'from sklearn_xarray import wrap\n'), ((2662, 2726), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimator.mean_', 'estimator.estimator_.mean_'], {}), '(estimator.mean_, estimator.estimator_.mean_)\n', (2681, 2726), True, 'import numpy.testing as npt\n'), ((2807, 2871), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimator.mean_', 'estimator.estimator_.mean_'], {}), '(estimator.mean_, estimator.estimator_.mean_)\n', (2826, 2871), True, 'import numpy.testing as npt\n'), ((2963, 3057), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (["estimator.mean_['var_2d']", "estimator.estimator_dict_['var_2d'].mean_"], {}), "(estimator.mean_['var_2d'], estimator.estimator_dict_[\n 'var_2d'].mean_)\n", (2982, 3057), True, 'import numpy.testing as npt\n'), ((3879, 3901), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (3891, 3901), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4023, 4045), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (4035, 4045), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4263, 4285), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (4275, 4285), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4409, 4431), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (4421, 4431), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5138, 5160), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (5150, 5160), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5282, 5304), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (5294, 5304), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5657, 5679), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (5672, 5679), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5910, 5932), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (5925, 5932), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6289, 6311), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6304, 6311), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6544, 6566), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6559, 6566), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6892, 6914), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6907, 6914), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7089, 7111), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (7104, 7111), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7656, 7682), 'xarray.testing.assert_allclose', 'assert_allclose', (['Xt_da', 'Xt'], {}), '(Xt_da, Xt)\n', (7671, 7682), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7972, 7994), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (7987, 7994), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((8345, 8384), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xt_da.shape', '(100, 5)'], {}), '(Xt_da.shape, (100, 5))\n', (8361, 8384), True, 'import numpy.testing as npt\n'), ((8393, 8433), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xr_da.shape', '(100, 10)'], {}), '(Xr_da.shape, (100, 10))\n', (8409, 8433), True, 'import numpy.testing as npt\n'), ((8553, 8596), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xt.var_2d.shape', '(100, 5)'], {}), '(Xt.var_2d.shape, (100, 5))\n', (8569, 8596), True, 'import numpy.testing as npt\n'), ((8704, 8745), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {'reshapes': '"""feat_1"""'}), "(LinearRegression, reshapes='feat_1')\n", (8708, 8745), False, 'from sklearn_xarray import wrap\n'), ((8814, 8835), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (8830, 8835), True, 'import numpy as np\n'), ((9487, 9503), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9492, 9503), False, 'from sklearn.base import clone\n'), ((9832, 9848), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9837, 9848), False, 'from sklearn.base import clone\n'), ((10507, 10528), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (10510, 10528), False, 'from sklearn.svm import SVC\n'), ((1139, 1179), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 0, 5)'}), '(new_shape=(-1, 0, 5))\n', (1157, 1179), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((2554, 2570), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2568, 2570), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((3731, 3747), 'tests.mocks.DummyEstimator', 'DummyEstimator', ([], {}), '()\n', (3745, 3747), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((4111, 4129), 'tests.mocks.DummyTransformer', 'DummyTransformer', ([], {}), '()\n', (4127, 4129), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((4499, 4515), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4513, 4515), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((4990, 5006), 'tests.mocks.DummyEstimator', 'DummyEstimator', ([], {}), '()\n', (5004, 5006), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((5385, 5422), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 2)'}), '(new_shape=(-1, 2))\n', (5403, 5422), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((6015, 6052), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 2)'}), '(new_shape=(-1, 2))\n', (6033, 6052), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((6657, 6694), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 0)'}), '(new_shape=(-1, 0))\n', (6675, 6694), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((7197, 7237), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 5, 0)'}), '(new_shape=(-1, 5, 0))\n', (7215, 7237), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((8113, 8132), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(5)'}), '(n_components=5)\n', (8116, 8132), False, 'from sklearn.decomposition import PCA\n'), ((9101, 9117), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9115, 9117), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((633, 660), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (649, 660), True, 'import numpy as np\n'), ((763, 794), 'numpy.random.random', 'np.random.random', (['(100, 10, 10)'], {}), '((100, 10, 10))\n', (779, 794), True, 'import numpy as np\n'), ((1004, 1031), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (1020, 1031), True, 'import numpy as np\n'), ((3242, 3269), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (3258, 3269), True, 'import numpy as np\n'), ((3372, 3403), 'numpy.random.random', 'np.random.random', (['(100, 10, 10)'], {}), '((100, 10, 10))\n', (3388, 3403), True, 'import numpy as np\n'), ((3613, 3640), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (3629, 3640), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import sys
import pickle
import nose
from nose.tools.trivial import eq_
from nose.tools.trivial import ok_
from jpgrep.util import binary2unicode
from jpgrep.util import FileObjectWrapper
from jpgrep.util import ByteWrapper
class Test_binary2unicode(object):
def test(self):
""" バイト列をユニコード文字列に変換する """
expect = u'吾輩は猫である'
binary = expect.encode('utf-8')
text = binary2unicode(binary)
eq_(text, expect)
class Test_FileObjectWrapper(object):
def test_str(self):
""" パスからラッパーオブジェクトを作る """
wrapper = FileObjectWrapper('/dev/null')
eq_(wrapper.name, '/dev/null')
with wrapper.file as f:
f.read(1)
def test_file(self):
""" ファイルオブジェクトからラッパーオブジェクトを作る """
f = open('/dev/null')
wrapper = FileObjectWrapper(f)
eq_(wrapper.name, '/dev/null')
with wrapper.file as f:
f.read(1)
def test_std(self):
""" sys.stdin からラッパーオブジェクトを作る """
wrapper = FileObjectWrapper(sys.stdin)
eq_(wrapper.name, '<stdin>')
ok_(hasattr(wrapper.file, 'read'))
with wrapper.file as _:
pass
def test_pickle(self):
""" Pickle 化、非 Pickle 化する """
wrapper = FileObjectWrapper('/dev/null')
binary = pickle.dumps(wrapper)
restored_object = pickle.loads(binary)
eq_(restored_object.name, '/dev/null')
with restored_object.file as f:
f.read(1)
def test_pickle_std(self):
""" sts.stdin のラッパーオブジェクトの Pickle を確認する """
wrapper = FileObjectWrapper(sys.stdin)
binary = pickle.dumps(wrapper)
restored_object = pickle.loads(binary)
ok_(hasattr(restored_object.file, 'read'))
def test_pickle_unicode_file(self):
""" 文字列モードで開いたファイルからバイト列を取り出す """
message = u'こんにちは、世界'
file_ = io.StringIO(message)
byte_wrapper = ByteWrapper(file_)
file_wrapper = FileObjectWrapper(byte_wrapper)
with file_wrapper.file as f:
binary = f.read()
expect = message.encode(encoding='utf-8')
eq_(expect, binary)
class Test_ByteWrapper(object):
def test(self):
""" 文字列モードのファイルライクオブジェクトからバイト列を取り出す """
message = u'こんにちは、世界'
file_ = io.StringIO(message)
wrapper = ByteWrapper(file_)
with wrapper as f:
data = f.read()
expect = message.encode('utf-8')
eq_(data, expect)
if __name__ == '__main__':
nose.main(argv=['nosetests', '-s', '-v'], defaultTest=__file__)
|
[
"pickle.loads",
"io.StringIO",
"nose.main",
"jpgrep.util.binary2unicode",
"jpgrep.util.ByteWrapper",
"nose.tools.trivial.eq_",
"jpgrep.util.FileObjectWrapper",
"pickle.dumps"
] |
[((2571, 2634), 'nose.main', 'nose.main', ([], {'argv': "['nosetests', '-s', '-v']", 'defaultTest': '__file__'}), "(argv=['nosetests', '-s', '-v'], defaultTest=__file__)\n", (2580, 2634), False, 'import nose\n'), ((460, 482), 'jpgrep.util.binary2unicode', 'binary2unicode', (['binary'], {}), '(binary)\n', (474, 482), False, 'from jpgrep.util import binary2unicode\n'), ((492, 509), 'nose.tools.trivial.eq_', 'eq_', (['text', 'expect'], {}), '(text, expect)\n', (495, 509), False, 'from nose.tools.trivial import eq_\n'), ((627, 657), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['"""/dev/null"""'], {}), "('/dev/null')\n", (644, 657), False, 'from jpgrep.util import FileObjectWrapper\n'), ((667, 697), 'nose.tools.trivial.eq_', 'eq_', (['wrapper.name', '"""/dev/null"""'], {}), "(wrapper.name, '/dev/null')\n", (670, 697), False, 'from nose.tools.trivial import eq_\n'), ((869, 889), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['f'], {}), '(f)\n', (886, 889), False, 'from jpgrep.util import FileObjectWrapper\n'), ((899, 929), 'nose.tools.trivial.eq_', 'eq_', (['wrapper.name', '"""/dev/null"""'], {}), "(wrapper.name, '/dev/null')\n", (902, 929), False, 'from nose.tools.trivial import eq_\n'), ((1070, 1098), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['sys.stdin'], {}), '(sys.stdin)\n', (1087, 1098), False, 'from jpgrep.util import FileObjectWrapper\n'), ((1108, 1136), 'nose.tools.trivial.eq_', 'eq_', (['wrapper.name', '"""<stdin>"""'], {}), "(wrapper.name, '<stdin>')\n", (1111, 1136), False, 'from nose.tools.trivial import eq_\n'), ((1314, 1344), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['"""/dev/null"""'], {}), "('/dev/null')\n", (1331, 1344), False, 'from jpgrep.util import FileObjectWrapper\n'), ((1363, 1384), 'pickle.dumps', 'pickle.dumps', (['wrapper'], {}), '(wrapper)\n', (1375, 1384), False, 'import pickle\n'), ((1411, 1431), 'pickle.loads', 'pickle.loads', (['binary'], {}), '(binary)\n', (1423, 1431), False, 'import pickle\n'), ((1441, 1479), 'nose.tools.trivial.eq_', 'eq_', (['restored_object.name', '"""/dev/null"""'], {}), "(restored_object.name, '/dev/null')\n", (1444, 1479), False, 'from nose.tools.trivial import eq_\n'), ((1645, 1673), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['sys.stdin'], {}), '(sys.stdin)\n', (1662, 1673), False, 'from jpgrep.util import FileObjectWrapper\n'), ((1692, 1713), 'pickle.dumps', 'pickle.dumps', (['wrapper'], {}), '(wrapper)\n', (1704, 1713), False, 'import pickle\n'), ((1740, 1760), 'pickle.loads', 'pickle.loads', (['binary'], {}), '(binary)\n', (1752, 1760), False, 'import pickle\n'), ((1942, 1962), 'io.StringIO', 'io.StringIO', (['message'], {}), '(message)\n', (1953, 1962), False, 'import io\n'), ((1986, 2004), 'jpgrep.util.ByteWrapper', 'ByteWrapper', (['file_'], {}), '(file_)\n', (1997, 2004), False, 'from jpgrep.util import ByteWrapper\n'), ((2028, 2059), 'jpgrep.util.FileObjectWrapper', 'FileObjectWrapper', (['byte_wrapper'], {}), '(byte_wrapper)\n', (2045, 2059), False, 'from jpgrep.util import FileObjectWrapper\n'), ((2187, 2206), 'nose.tools.trivial.eq_', 'eq_', (['expect', 'binary'], {}), '(expect, binary)\n', (2190, 2206), False, 'from nose.tools.trivial import eq_\n'), ((2356, 2376), 'io.StringIO', 'io.StringIO', (['message'], {}), '(message)\n', (2367, 2376), False, 'import io\n'), ((2395, 2413), 'jpgrep.util.ByteWrapper', 'ByteWrapper', (['file_'], {}), '(file_)\n', (2406, 2413), False, 'from jpgrep.util import ByteWrapper\n'), ((2520, 2537), 'nose.tools.trivial.eq_', 'eq_', (['data', 'expect'], {}), '(data, expect)\n', (2523, 2537), False, 'from nose.tools.trivial import eq_\n')]
|
import socket
ip = socket.gethostbyname('localhost.localdomain')
port = 10000
buffer_size = 1024
with open("payload.c", "r") as file:
message_list = file.read()
message = str.encode(message_list)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(message)
data = s.recv(buffer_size)
s.close()
data = str(data)
print(data[2:-1])
|
[
"socket.socket",
"socket.gethostbyname"
] |
[((20, 65), 'socket.gethostbyname', 'socket.gethostbyname', (['"""localhost.localdomain"""'], {}), "('localhost.localdomain')\n", (40, 65), False, 'import socket\n'), ((210, 259), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (223, 259), False, 'import socket\n')]
|
from tokenizer_tools.conllz.iterator_reader import read_conllz_iterator
from tokenizer_tools.conll.writer import write_conll
def conllz_to_conll(conllz_file, conll_file):
sentence_iterator = read_conllz_iterator(conllz_file)
conll_data = []
for sentence in sentence_iterator:
conll_data.append((sentence.word_lines, sentence.get_attribute_by_index(0)))
write_conll(conll_data, conll_file)
|
[
"tokenizer_tools.conllz.iterator_reader.read_conllz_iterator",
"tokenizer_tools.conll.writer.write_conll"
] |
[((197, 230), 'tokenizer_tools.conllz.iterator_reader.read_conllz_iterator', 'read_conllz_iterator', (['conllz_file'], {}), '(conllz_file)\n', (217, 230), False, 'from tokenizer_tools.conllz.iterator_reader import read_conllz_iterator\n'), ((382, 417), 'tokenizer_tools.conll.writer.write_conll', 'write_conll', (['conll_data', 'conll_file'], {}), '(conll_data, conll_file)\n', (393, 417), False, 'from tokenizer_tools.conll.writer import write_conll\n')]
|
#!/usr/bin/env pythonw
import numpy as np
from astropy.visualization import stretch, interval
from astropy.io import fits
from astropy import wcs
from reproject import reproject_interp
from matplotlib import pyplot as plt
def scaleImage(image, a=1, stretch_type='asinh'):
reagon = interval.AsymmetricPercentileInterval(10., 99.95)
vmin, vmax = reagon.get_limits(image)
if stretch_type == 'log':
scale = stretch.LogStretch(a=a)
elif stretch_type == 'asinh':
scale = stretch.AsinhStretch(a=a)
elif stretch_type == 'sqrt':
scale = stretch.SqrtStretch()
image_scaled = (scale + reagon)(image)
return image_scaled
def removeNaN(data):
bdx = ~np.isfinite(data)
data[bdx] = 0
def make_images(base, index_cut=1300, filters='gri', gzip=False, **kwargs):
hdus = []
images_scaled = []
for fdx, filt in enumerate(filters):
file_name = '{0}-{1}.fits'.format(base, filt)
if gzip:
file_name += '.gz'
hdu = fits.open(file_name)
w = wcs.WCS(hdu[0].header)
newf = fits.PrimaryHDU()
newf.data = hdu[0].data[index_cut:-index_cut, index_cut:-index_cut]
newf.header = hdu[0].header
newf.header.update(w[index_cut:-index_cut, index_cut:-index_cut].to_header())
hdus.append(newf)
if fdx > 0:
scidata, footprint = reproject_interp(newf, hdus[0].header)
scidata = newf.data
scidata[scidata < 0] = 0
image = scaleImage(scidata, **kwargs)
removeNaN(image)
images_scaled.append(image)
plt.imsave('{0}_{1}_{2}.png'.format(base, filt, kwargs.get('stretch_type', 'asinh')), image, cmap='Greys_r', origin='lower')
RGB_image = np.zeros([images_scaled[0].shape[0], images_scaled[0].shape[1], 3])
RGB_image[:, :, 0] = images_scaled[2]
RGB_image[:, :, 1] = images_scaled[1]
RGB_image[:, :, 2] = images_scaled[0]
RGB_image[RGB_image > 1] = 1
RGB_image[RGB_image < 0] = 0
plt.imsave('{0}_{1}_{2}.png'.format(base, filters, kwargs.get('stretch_type', 'asinh')), RGB_image, origin='lower')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Create single band and false color images from fits files'
)
parser.add_argument(
'base_name',
type=str,
help='the base name of the fits files (note: all files must be named `{base_name}-{filter_letter}`)'
)
parser.add_argument(
'-c',
'--crop',
type=int,
default=1,
help='an integer used to corp the fits images (by index of array)'
)
parser.add_argument(
'-f',
'--filters',
type=str,
default='gri',
choices=['gri', 'rbi', 'ugr'],
help='a three letter string representing the filters contained in each fits file'
)
parser.add_argument(
'-a',
type=float,
default=0.1,
help='the `a` parameter used in the streact function'
)
parser.add_argument(
'-s',
'--stretch',
type=str,
default='asinh',
choices=['asinh', 'log', 'sqrt'],
help='the type of stretch to use for the fits image'
)
parser.add_argument(
'-g',
'--gzip',
action='store_true',
help='use this flag if the input files are gzipped'
)
args = parser.parse_args()
make_images(
args.base_name,
index_cut=args.crop,
filters=args.filters,
gzip=args.gzip,
a=args.a,
stretch_type=args.stretch
)
|
[
"astropy.visualization.stretch.LogStretch",
"astropy.visualization.interval.AsymmetricPercentileInterval",
"argparse.ArgumentParser",
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"numpy.isfinite",
"astropy.visualization.stretch.AsinhStretch",
"astropy.wcs.WCS",
"reproject.reproject_interp",
"astropy.io.fits.open",
"astropy.visualization.stretch.SqrtStretch"
] |
[((288, 338), 'astropy.visualization.interval.AsymmetricPercentileInterval', 'interval.AsymmetricPercentileInterval', (['(10.0)', '(99.95)'], {}), '(10.0, 99.95)\n', (325, 338), False, 'from astropy.visualization import stretch, interval\n'), ((1729, 1796), 'numpy.zeros', 'np.zeros', (['[images_scaled[0].shape[0], images_scaled[0].shape[1], 3]'], {}), '([images_scaled[0].shape[0], images_scaled[0].shape[1], 3])\n', (1737, 1796), True, 'import numpy as np\n'), ((2171, 2272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create single band and false color images from fits files"""'}), "(description=\n 'Create single band and false color images from fits files')\n", (2194, 2272), False, 'import argparse\n'), ((426, 449), 'astropy.visualization.stretch.LogStretch', 'stretch.LogStretch', ([], {'a': 'a'}), '(a=a)\n', (444, 449), False, 'from astropy.visualization import stretch, interval\n'), ((698, 715), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (709, 715), True, 'import numpy as np\n'), ((1006, 1026), 'astropy.io.fits.open', 'fits.open', (['file_name'], {}), '(file_name)\n', (1015, 1026), False, 'from astropy.io import fits\n'), ((1039, 1061), 'astropy.wcs.WCS', 'wcs.WCS', (['hdu[0].header'], {}), '(hdu[0].header)\n', (1046, 1061), False, 'from astropy import wcs\n'), ((1077, 1094), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (1092, 1094), False, 'from astropy.io import fits\n'), ((500, 525), 'astropy.visualization.stretch.AsinhStretch', 'stretch.AsinhStretch', ([], {'a': 'a'}), '(a=a)\n', (520, 525), False, 'from astropy.visualization import stretch, interval\n'), ((1372, 1410), 'reproject.reproject_interp', 'reproject_interp', (['newf', 'hdus[0].header'], {}), '(newf, hdus[0].header)\n', (1388, 1410), False, 'from reproject import reproject_interp\n'), ((575, 596), 'astropy.visualization.stretch.SqrtStretch', 'stretch.SqrtStretch', ([], {}), '()\n', (594, 596), False, 'from astropy.visualization import stretch, interval\n')]
|
import os
import subprocess
# Test different input formats
for ifile, odir in [
('input.fasta', 'output_bin_fa'),
('input.fasta.gz', 'output_bin_gz'),
('input.fasta.bz2', 'output_bin_bz2'),
('input.fasta.xz', 'output_bin_xz'),
]:
odir = f'test-outputs/{odir}'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--model', 'test/bin_data/model.h5',
'-i', f'test/bin_data/{ifile}',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(f'{odir}/output_bins')) > 0
assert len(os.listdir(f'{odir}/output_recluster_bins')) > 0
ifile = 'input.fasta'
odir = 'test-outputs/no_recluster'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--no-recluster',
'--model', 'test/bin_data/model.h5',
'-i', f'test/bin_data/{ifile}',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(f'{odir}/output_bins')) > 0
assert not os.path.exists(f'{odir}/output_recluster_bins')
# Different pretrained models
for env,odir in [
('human_gut', 'output_human_gut'),
('dog_gut', 'output_dog_gut'),
('ocean', 'output_ocean'),
]:
odir = f'test-outputs/{odir}'
subprocess.check_call(
['SemiBin', 'bin',
'--data', 'test/bin_data/data.csv',
'--minfasta-kbs', '200',
'--max-edges', '20',
'--max-node', '1',
'--environment', env,
'-i', 'test/bin_data/input.fasta.xz',
'-o', odir,
'-m', '2500',
'--ratio', '0.05',
'-p', '1'])
assert len(os.listdir(odir+'/output_bins')) > 0
assert len(os.listdir(odir+'/output_recluster_bins')) > 0
|
[
"os.path.exists",
"os.listdir",
"subprocess.check_call"
] |
[((852, 1153), 'subprocess.check_call', 'subprocess.check_call', (["['SemiBin', 'bin', '--data', 'test/bin_data/data.csv', '--minfasta-kbs',\n '200', '--max-edges', '20', '--max-node', '1', '--no-recluster',\n '--model', 'test/bin_data/model.h5', '-i', f'test/bin_data/{ifile}',\n '-o', odir, '-m', '2500', '--ratio', '0.05', '-p', '1']"], {}), "(['SemiBin', 'bin', '--data', 'test/bin_data/data.csv',\n '--minfasta-kbs', '200', '--max-edges', '20', '--max-node', '1',\n '--no-recluster', '--model', 'test/bin_data/model.h5', '-i',\n f'test/bin_data/{ifile}', '-o', odir, '-m', '2500', '--ratio', '0.05',\n '-p', '1'])\n", (873, 1153), False, 'import subprocess\n'), ((309, 588), 'subprocess.check_call', 'subprocess.check_call', (["['SemiBin', 'bin', '--data', 'test/bin_data/data.csv', '--minfasta-kbs',\n '200', '--max-edges', '20', '--max-node', '1', '--model',\n 'test/bin_data/model.h5', '-i', f'test/bin_data/{ifile}', '-o', odir,\n '-m', '2500', '--ratio', '0.05', '-p', '1']"], {}), "(['SemiBin', 'bin', '--data', 'test/bin_data/data.csv',\n '--minfasta-kbs', '200', '--max-edges', '20', '--max-node', '1',\n '--model', 'test/bin_data/model.h5', '-i', f'test/bin_data/{ifile}',\n '-o', odir, '-m', '2500', '--ratio', '0.05', '-p', '1'])\n", (330, 588), False, 'import subprocess\n'), ((1259, 1306), 'os.path.exists', 'os.path.exists', (['f"""{odir}/output_recluster_bins"""'], {}), "(f'{odir}/output_recluster_bins')\n", (1273, 1306), False, 'import os\n'), ((1522, 1792), 'subprocess.check_call', 'subprocess.check_call', (["['SemiBin', 'bin', '--data', 'test/bin_data/data.csv', '--minfasta-kbs',\n '200', '--max-edges', '20', '--max-node', '1', '--environment', env,\n '-i', 'test/bin_data/input.fasta.xz', '-o', odir, '-m', '2500',\n '--ratio', '0.05', '-p', '1']"], {}), "(['SemiBin', 'bin', '--data', 'test/bin_data/data.csv',\n '--minfasta-kbs', '200', '--max-edges', '20', '--max-node', '1',\n '--environment', env, '-i', 'test/bin_data/input.fasta.xz', '-o', odir,\n '-m', '2500', '--ratio', '0.05', '-p', '1'])\n", (1543, 1792), False, 'import subprocess\n'), ((1209, 1242), 'os.listdir', 'os.listdir', (['f"""{odir}/output_bins"""'], {}), "(f'{odir}/output_bins')\n", (1219, 1242), False, 'import os\n'), ((691, 724), 'os.listdir', 'os.listdir', (['f"""{odir}/output_bins"""'], {}), "(f'{odir}/output_bins')\n", (701, 724), False, 'import os\n'), ((745, 788), 'os.listdir', 'os.listdir', (['f"""{odir}/output_recluster_bins"""'], {}), "(f'{odir}/output_recluster_bins')\n", (755, 788), False, 'import os\n'), ((1895, 1928), 'os.listdir', 'os.listdir', (["(odir + '/output_bins')"], {}), "(odir + '/output_bins')\n", (1905, 1928), False, 'import os\n'), ((1947, 1990), 'os.listdir', 'os.listdir', (["(odir + '/output_recluster_bins')"], {}), "(odir + '/output_recluster_bins')\n", (1957, 1990), False, 'import os\n')]
|
"""
## box2lake_sensor.py
Example using Box.com API.
- Demonstrates a Box sensor for file availability before proceeding with ETL.
### References
Box APIs used
- REST: https://developer.box.com/reference/
- Python SDK: https://box-python-sdk.readthedocs.io/en/stable/boxsdk.html
"""
from datetime import datetime, timedelta
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import (
KubernetesPodOperator
)
from airflow.kubernetes.secret import Secret
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from bsh_azure.sensors.box_sensor import BoxSensor, BoxItemType
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=5),
# 'retry_exponential_backoff': True,
'queue': 'airq2',
'catchup': False,
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(minutes=30),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
with DAG('box2lake_sensor',
default_args=default_args,
description='Example using Box.com api',
schedule_interval="0 0 * * *", # "0 0 * * *" or "@daily" or timedelta(hours=2)
start_date=days_ago(1),
tags=['azure', 'aks', 'box.com']
) as dag:
dag.doc_md = __doc__
wait_for_box_daily = BoxSensor(
task_id='wait_for_daily_box_task',
box_item_path='Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November/Branch Scheduled Hours Breakdown_11_15_2020.xlsx',
box_item_type=BoxItemType.FILE,
poke_interval=5,
timeout=600,
mode='poke'
)
wait_for_box_weekly = BoxSensor(
task_id='wait_for_weekly_box_task',
box_item_path='Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13/Telephony Usage By Branch 11.13.2020.xlsx',
box_item_type=BoxItemType.FILE,
poke_interval=5,
timeout=300,
mode='poke'
)
box2adls_pod_task = KubernetesPodOperator(
task_id="box2adls_pod_task",
namespace='airflow-tls',
service_account_name='airflow-rbac',
name='boxflow',
image='rkoH1pVL.azurecr.io/box2adls:latest',
image_pull_policy='Always',
labels={'name': 'boxflow', 'instance': 'boxflow-pod',
'version': '1.0.0', 'component': 'batch-service',
'part-of': 'pods'},
env_vars={
"SIMMER": "False",
"BROKER_URL": "redis://airflow-redis-service:6379/0",
"BOX_CONFIG": "/opt/airflow/box-sec/box-auth",
"BOX_FOLDER_PATH": "Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November",
"BOX_FOLDER_PATH2": "Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13",
"BOX_FILE_MASK": "Branch Scheduled Hours Breakdown_11_14_2020.xlsx",
"BOX_FILE_MASK2": "Telephony Usage By Branch 11.13.2020.xlsx",
"BOX_FILE_RENAME": "Branch Scheduled Hours Breakdown_af-on-k8s.xlsx",
"WS_PREV_NAME": "PriorMonth",
"WS_CURR_NAME": "CurrentMonth",
"WS_NEXT_NAME": "NextMonth",
"BOX_FILE_RENAME2": "Telephony Usage By Branch_af-on-k8s.xlsx",
"WS_HIDDEN_NAME": "{0} Tele Stats",
"WS_HIDDEN_RENAME": "Tele Stats",
"LAKE_ACCOUNT_NAME": "airflowstoragesandbox",
# "LAKE_ACCOUNT_KEY": "",
"LAKE_CONTAINER_NAME": "enterprisedata",
"LAKE_FOLDER_PATH": "Raw/BOX Reports"
},
secrets=[
Secret(deploy_type='env', deploy_target='LAKE_ACCOUNT_KEY',
secret='az-file-secret', key='azurestorageaccountkey'),
Secret(deploy_type='volume', deploy_target='/opt/airflow/box-sec',
secret='box-secret', key=None)
],
resources={
'request_memory': '200Mi', 'request_cpu': '200m',
'limit_memory': '2Gi', 'limit_cpu': '2000m'
},
in_cluster=True,
is_delete_operator_pod=True,
get_logs=True,
log_events_on_failure=True
# config_file='/opt/airflow/dags/config/kube.config',
# NOTE: this will not work until 1.10.13
# pod_template_file='/opt/airflow/dags/config/aks-geonames.yaml'
)
# body = """
# Log: <a href="{{ ti.log_url }}">Link</a><br>
# Host: {{ ti.hostname }}<br>
# Log file: {{ ti.log_filepath }}<br>
# Mark success: <a href="{{ ti.mark_success_url }}">Link</a><br>
# """
#
# email_task = EmailOperator(
# task_id= 'email_task',
# to='<EMAIL>',
# subject="Test from Airflow: {{ ti.xcom_pull(task_ids='wait_for_box_daily') }}",
# html_content=body,
# pool='utility_pool',
# )
print_date2 = BashOperator(
task_id='print_date2',
bash_command="echo {{ ts }}"
)
[wait_for_box_daily, wait_for_box_weekly] >> box2adls_pod_task
box2adls_pod_task >> print_date2
|
[
"bsh_azure.sensors.box_sensor.BoxSensor",
"datetime.timedelta",
"airflow.operators.bash_operator.BashOperator",
"airflow.utils.dates.days_ago",
"airflow.kubernetes.secret.Secret"
] |
[((968, 988), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (977, 988), False, 'from datetime import datetime, timedelta\n'), ((1884, 2159), 'bsh_azure.sensors.box_sensor.BoxSensor', 'BoxSensor', ([], {'task_id': '"""wait_for_daily_box_task"""', 'box_item_path': '"""Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November/Branch Scheduled Hours Breakdown_11_15_2020.xlsx"""', 'box_item_type': 'BoxItemType.FILE', 'poke_interval': '(5)', 'timeout': '(600)', 'mode': '"""poke"""'}), "(task_id='wait_for_daily_box_task', box_item_path=\n 'Utilization Reports/Daily Schedule Status Reports/2020 Reports/11-November/Branch Scheduled Hours Breakdown_11_15_2020.xlsx'\n , box_item_type=BoxItemType.FILE, poke_interval=5, timeout=600, mode='poke'\n )\n", (1893, 2159), False, 'from bsh_azure.sensors.box_sensor import BoxSensor, BoxItemType\n'), ((2226, 2506), 'bsh_azure.sensors.box_sensor.BoxSensor', 'BoxSensor', ([], {'task_id': '"""wait_for_weekly_box_task"""', 'box_item_path': '"""Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13/Telephony Usage By Branch 11.13.2020.xlsx"""', 'box_item_type': 'BoxItemType.FILE', 'poke_interval': '(5)', 'timeout': '(300)', 'mode': '"""poke"""'}), "(task_id='wait_for_weekly_box_task', box_item_path=\n 'Utilization Reports/Weekly Utilization Reports/2020 Reports/11-November/November - 13/Telephony Usage By Branch 11.13.2020.xlsx'\n , box_item_type=BoxItemType.FILE, poke_interval=5, timeout=300, mode='poke'\n )\n", (2235, 2506), False, 'from bsh_azure.sensors.box_sensor import BoxSensor, BoxItemType\n'), ((5414, 5479), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""print_date2"""', 'bash_command': '"""echo {{ ts }}"""'}), "(task_id='print_date2', bash_command='echo {{ ts }}')\n", (5426, 5479), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((1758, 1769), 'airflow.utils.dates.days_ago', 'days_ago', (['(1)'], {}), '(1)\n', (1766, 1769), False, 'from airflow.utils.dates import days_ago\n'), ((4166, 4285), 'airflow.kubernetes.secret.Secret', 'Secret', ([], {'deploy_type': '"""env"""', 'deploy_target': '"""LAKE_ACCOUNT_KEY"""', 'secret': '"""az-file-secret"""', 'key': '"""azurestorageaccountkey"""'}), "(deploy_type='env', deploy_target='LAKE_ACCOUNT_KEY', secret=\n 'az-file-secret', key='azurestorageaccountkey')\n", (4172, 4285), False, 'from airflow.kubernetes.secret import Secret\n'), ((4313, 4415), 'airflow.kubernetes.secret.Secret', 'Secret', ([], {'deploy_type': '"""volume"""', 'deploy_target': '"""/opt/airflow/box-sec"""', 'secret': '"""box-secret"""', 'key': 'None'}), "(deploy_type='volume', deploy_target='/opt/airflow/box-sec', secret=\n 'box-secret', key=None)\n", (4319, 4415), False, 'from airflow.kubernetes.secret import Secret\n')]
|
"""."""
import pytest
from .hash_table import HashTable as HT
from .left_join import left_join
def test_left_join_true(six_key_ht, five_key_ht):
"""True case for left join."""
result = left_join(six_key_ht, five_key_ht)
assert result.get('cost') == (0, None)
def test_both_empty_hash_table():
"""Result when both inputs are empty HashTable."""
hash1 = HT()
hash2 = HT()
result = left_join(hash1, hash2)
for i in range(0, 1023):
assert result.buckets[i]._len == 0
def test_value_error(five_key_ht):
"""Value error check."""
with pytest.raises(ValueError) as err:
left_join(five_key_ht)
assert err == 'At least one input must be HashTable'
def test_type_error_one(six_key_ht):
"""Type Error check when only one input is not HashTable."""
with pytest.raises(TypeError) as err:
left_join(six_key_ht, 15)
assert err == 'Input must be HashTable.'
|
[
"pytest.raises"
] |
[((584, 609), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (597, 609), False, 'import pytest\n'), ((823, 847), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (836, 847), False, 'import pytest\n')]
|
import math
import pytz
import sys
import time
from datetime import date
from . import wait_times, util, arrival_history, trip_times, errors, constants, timetables, routeconfig
import pandas as pd
import numpy as np
# Represents a range of days with a time range within each day.
# RouteMetrics can calculate various statistics over a range.
class Range:
def __init__(self, dates: list, start_time_str: str, end_time_str: str, tz: pytz.timezone):
self.dates = dates # list of datetime.date objects
self.start_time_str = start_time_str # if None, no start time filter
self.end_time_str = end_time_str # if None, no end time filter
self.tz = tz
# RouteMetrics allows computing various metrics for a particular route,
# such as headways, wait times, and trip times,
# including over various date and time ranges.
#
# It caches the arrival history and data frames so that the different
# metrics calculations can reuse the same arrivals data without
# needing to reload it from disk each time.
#
class RouteMetrics:
def __init__(self, agency_id, route_id):
self.agency_id = agency_id
self.route_id = route_id
self.arrival_histories = {}
self.data_frames = {}
self.timetables = {}
def get_arrival_history(self, d):
if d in self.arrival_histories:
return self.arrival_histories[d]
print(f'loading arrival history for route {self.route_id} on {d}', file=sys.stderr)
try:
self.arrival_histories[d] = history = arrival_history.get_by_date(self.agency_id, self.route_id, d)
except FileNotFoundError as ex:
print(f'Arrival history not found for route {self.route_id} on {d}', file=sys.stderr)
history = arrival_history.ArrivalHistory(self.agency_id, self.route_id, {});
return history
def get_history_data_frame(self, d, direction_id=None, stop_id=None):
key = f'history_{str(d)}_{stop_id}_{direction_id}'
if key in self.data_frames:
return self.data_frames[key]
history = self.get_arrival_history(d)
print(f'loading data frame {key} for route {self.route_id}', file=sys.stderr)
df = history.get_data_frame(stop_id=stop_id, direction_id=direction_id)
self.data_frames[key] = df
return df
def get_timetable(self, d):
if d not in self.timetables.keys():
self.timetables[d] = timetables.get_by_date(self.agency_id, self.route_id, d)
return self.timetables[d]
def get_timetable_data_frame(self, d, direction_id=None, stop_id=None):
timetable = self.get_timetable(d)
timetable_key = f'timetable_{str(d)}_{stop_id}_{direction_id}'
if timetable_key not in self.data_frames:
self.data_frames[timetable_key] = timetable.get_data_frame(stop_id=stop_id, direction_id=direction_id)
return self.data_frames[timetable_key]
def get_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_wait_time_stats(self, direction_id, stop_id, rng: Range, get_data_frame):
wait_stats_arr = []
for d in rng.dates:
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
df = get_data_frame(d, stop_id=stop_id, direction_id=direction_id)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
wait_stats = wait_times.get_stats(departure_time_values, start_time, end_time)
wait_stats_arr.append(wait_stats)
if len(wait_stats_arr) == 1:
return wait_stats_arr[0]
else:
return wait_times.combine_stats(wait_stats_arr)
def get_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'TIME')
def get_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'DEPARTURE_TIME')
def get_scheduled_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'TIME')
def get_scheduled_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'DEPARTURE_TIME')
def _get_count(self, direction_id, stop_id, rng: Range, get_data_frame, time_field):
if stop_id is None:
return None
count = 0
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
df = df[df[time_field] >= start_time]
if end_time is not None:
df = df[df[time_field] < end_time]
count += len(df)
return count
def get_departure_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'DEPARTURE_TIME')
def get_arrival_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'TIME')
def _get_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range, time_field):
if stop_id is None:
return None
compared_timetable_arr = []
now = time.time()
for d in rng.dates:
stop_timetable = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
stop_arrivals = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
scheduled_time_values = np.sort(stop_timetable[time_field].values)
actual_time_values = np.sort(stop_arrivals[time_field].values)
comparison_df = timetables.match_schedule_to_actual_times(
scheduled_time_values,
actual_time_values,
early_sec = early_sec,
late_sec = late_sec,
)
comparison_df[time_field] = scheduled_time_values
if len(comparison_df) and comparison_df[time_field].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df[time_field] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df[time_field] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df[time_field] < end_time]
compared_timetable_arr.append(comparison_df)
return pd.concat(compared_timetable_arr)
def get_headway_schedule_deltas(self, direction_id, stop_id, rng: Range):
headway_delta_arr = []
now = time.time()
for d in rng.dates:
timetable_df = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
history_df = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
departure_time_values = np.sort(history_df['DEPARTURE_TIME'].values)
scheduled_departure_time_values = np.sort(timetable_df['DEPARTURE_TIME'].values)
comparison_df = timetables.match_actual_times_to_schedule(
departure_time_values,
scheduled_departure_time_values
)
comparison_df['DEPARTURE_TIME'] = departure_time_values
comparison_df['headway'] = np.r_[np.nan, compute_headway_minutes(departure_time_values)]
comparison_df = comparison_df[np.isfinite(comparison_df['headway'].values) & np.isfinite(comparison_df['closest_scheduled_headway'].values)]
if len(comparison_df) and comparison_df['DEPARTURE_TIME'].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < end_time]
headway_delta = comparison_df['headway'].values - comparison_df['closest_scheduled_headway'].values
headway_delta_arr.append(headway_delta)
return np.concatenate(headway_delta_arr)
def get_scheduled_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_timetable_data_frame)
def get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_history_data_frame)
def _get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range, get_data_frame):
completed_trips_arr = []
if end_stop_id is None:
return None
is_loop = False
route_config = routeconfig.get_route_config(self.agency_id, self.route_id)
if route_config is not None:
if direction_id is not None:
dir_info = route_config.get_direction_info(direction_id)
else:
direction_ids = route_config.get_directions_for_stop(start_stop_id)
dir_info = route_config.get_direction_info(direction_ids[0]) if len(direction_ids) > 0 else None
if dir_info is not None:
is_loop = dir_info.is_loop()
for d in rng.dates:
s1_df = get_data_frame(d, stop_id=start_stop_id, direction_id=direction_id)
s2_df = get_data_frame(d, stop_id=end_stop_id, direction_id=direction_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] < end_time]
completed_trip_times = trip_times.get_completed_trip_times(
s1_df['TRIP'].values,
s1_df['DEPARTURE_TIME'].values,
s2_df['TRIP'].values,
s2_df['TIME'].values,
is_loop = is_loop
)
completed_trips_arr.append(completed_trip_times)
return np.concatenate(completed_trips_arr)
def get_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_headways(self, direction_id, stop_id, rng: Range, get_data_frame):
headway_min_arr = []
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
headway_min = compute_headway_minutes(departure_time_values, start_time, end_time)
headway_min_arr.append(headway_min)
return np.concatenate(headway_min_arr)
def compute_headway_minutes(time_values, start_time=None, end_time=None):
if start_time is not None:
start_index = np.searchsorted(time_values, start_time, 'left')
else:
start_index = 0
if end_time is not None:
end_index = np.searchsorted(time_values, end_time, 'left')
else:
end_index = len(time_values)
if start_index == 0:
start_index = 1
if start_index > end_index:
end_index = start_index
return (time_values[start_index:end_index] - time_values[start_index - 1 : end_index - 1]) / 60
|
[
"numpy.searchsorted",
"numpy.isfinite",
"time.time",
"numpy.sort",
"pandas.concat",
"numpy.concatenate"
] |
[((6056, 6067), 'time.time', 'time.time', ([], {}), '()\n', (6065, 6067), False, 'import time\n'), ((7407, 7440), 'pandas.concat', 'pd.concat', (['compared_timetable_arr'], {}), '(compared_timetable_arr)\n', (7416, 7440), True, 'import pandas as pd\n'), ((7567, 7578), 'time.time', 'time.time', ([], {}), '()\n', (7576, 7578), False, 'import time\n'), ((9264, 9297), 'numpy.concatenate', 'np.concatenate', (['headway_delta_arr'], {}), '(headway_delta_arr)\n', (9278, 9297), True, 'import numpy as np\n'), ((11392, 11427), 'numpy.concatenate', 'np.concatenate', (['completed_trips_arr'], {}), '(completed_trips_arr)\n', (11406, 11427), True, 'import numpy as np\n'), ((12366, 12397), 'numpy.concatenate', 'np.concatenate', (['headway_min_arr'], {}), '(headway_min_arr)\n', (12380, 12397), True, 'import numpy as np\n'), ((12526, 12574), 'numpy.searchsorted', 'np.searchsorted', (['time_values', 'start_time', '"""left"""'], {}), "(time_values, start_time, 'left')\n", (12541, 12574), True, 'import numpy as np\n'), ((12659, 12705), 'numpy.searchsorted', 'np.searchsorted', (['time_values', 'end_time', '"""left"""'], {}), "(time_values, end_time, 'left')\n", (12674, 12705), True, 'import numpy as np\n'), ((3744, 3780), 'numpy.sort', 'np.sort', (["df['DEPARTURE_TIME'].values"], {}), "(df['DEPARTURE_TIME'].values)\n", (3751, 3780), True, 'import numpy as np\n'), ((6343, 6385), 'numpy.sort', 'np.sort', (['stop_timetable[time_field].values'], {}), '(stop_timetable[time_field].values)\n', (6350, 6385), True, 'import numpy as np\n'), ((6419, 6460), 'numpy.sort', 'np.sort', (['stop_arrivals[time_field].values'], {}), '(stop_arrivals[time_field].values)\n', (6426, 6460), True, 'import numpy as np\n'), ((7849, 7893), 'numpy.sort', 'np.sort', (["history_df['DEPARTURE_TIME'].values"], {}), "(history_df['DEPARTURE_TIME'].values)\n", (7856, 7893), True, 'import numpy as np\n'), ((7941, 7987), 'numpy.sort', 'np.sort', (["timetable_df['DEPARTURE_TIME'].values"], {}), "(timetable_df['DEPARTURE_TIME'].values)\n", (7948, 7987), True, 'import numpy as np\n'), ((12168, 12204), 'numpy.sort', 'np.sort', (["df['DEPARTURE_TIME'].values"], {}), "(df['DEPARTURE_TIME'].values)\n", (12175, 12204), True, 'import numpy as np\n'), ((8374, 8418), 'numpy.isfinite', 'np.isfinite', (["comparison_df['headway'].values"], {}), "(comparison_df['headway'].values)\n", (8385, 8418), True, 'import numpy as np\n'), ((8421, 8483), 'numpy.isfinite', 'np.isfinite', (["comparison_df['closest_scheduled_headway'].values"], {}), "(comparison_df['closest_scheduled_headway'].values)\n", (8432, 8483), True, 'import numpy as np\n')]
|
from flask.config import Config
import os
ds_settings = os.getenv(
"DS_SETTINGS", "project.config.data_science_config.DsDevelopmentConfig"
)
ds_config=Config(None)
ds_config.from_object(ds_settings)
|
[
"flask.config.Config",
"os.getenv"
] |
[((57, 143), 'os.getenv', 'os.getenv', (['"""DS_SETTINGS"""', '"""project.config.data_science_config.DsDevelopmentConfig"""'], {}), "('DS_SETTINGS',\n 'project.config.data_science_config.DsDevelopmentConfig')\n", (66, 143), False, 'import os\n'), ((157, 169), 'flask.config.Config', 'Config', (['None'], {}), '(None)\n', (163, 169), False, 'from flask.config import Config\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013, <NAME>
# Copyright (c) 2014-2015, <NAME>
# Copyright (c) 2013-2015, B2CK
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""a parser for MT940 files
"""
__version__ = '0.2'
__all__ = ['MT940', 'rabo_description', 'abn_amro_description',
'ing_description']
import datetime
import re
from collections import defaultdict, namedtuple
from decimal import Decimal
SECTIONS = {
'begin': [':940:'],
'statement': [':20:'],
'account': [':25:'],
'information': [':28:', ':28C:'],
'start_balance': [':60F:'],
'transaction': [':61:'],
'description': [':86:'],
'end_balance': [':62F:'],
}
def _parse_date(date):
return datetime.datetime.strptime(date, '%y%m%d').date()
def _parse_amount(amount, sign='C'):
amount = Decimal(amount.replace(',', '.'))
if sign in ('D', 'RC'):
return -amount
return amount
TRANSACTION_RE = re.compile(r"""
(?P<date>\d{6})
(?P<booking>\d{4})?
(?P<sign>D|C|RC|RD)
(?P<code>\w)?? # ING skips this mandatory field
(?P<amount>(\d|,){1,15})
(?P<id>\w{4})
(?P<reference>.{0,34})""", re.VERBOSE)
class MT940(object):
def __init__(self, name):
self.statements = []
with open(name, 'rU') as f:
values = defaultdict(str)
transactions = []
for line in self._readline(f):
for name, sections in SECTIONS.iteritems():
if name == 'begin':
continue
for section in sections:
if line.startswith(section):
if name in values and name == 'statement':
self._set_statement(values, transactions)
if name.endswith('_balance'):
values[name] = self._get_balance(
line[len(section):])
elif name == 'transaction':
transactions.append(
self._get_transaction(line[len(section):]))
elif name == 'description':
transactions[-1] = (transactions[-1][:-1]
+ (line[len(section):],))
else:
values[name] += line[len(section):]
if values:
self._set_statement(values, transactions)
@staticmethod
def _readline(f):
buf = []
for line in f:
line = line.strip('\n')
if buf:
if (line.startswith(':')
or line.startswith('-')):
yield '\n'.join(buf)
del buf[:]
buf.append(line)
if buf:
yield '\n'.join(buf)
@staticmethod
def _get_balance(balance):
date = _parse_date(balance[1:7])
amount = _parse_amount(balance[10:], balance[0])
return Balance(date=date, amount=amount, currency=balance[7:10])
@staticmethod
def _get_transaction(transaction):
lines = transaction.splitlines()
if len(lines) == 1:
transaction, = lines
additional_data = None
else:
transaction, additional_data = lines
transaction = TRANSACTION_RE.match(transaction)
date = _parse_date(transaction.group('date'))
if transaction.group('booking'):
booking = _parse_date(
transaction.group('date')[:2]
+ transaction.group('booking'))
else:
booking = None
amount = _parse_amount(transaction.group('amount'),
transaction.group('sign'))
id_ = transaction.group('id')
reference = transaction.group('reference')
reference, _, institution_reference = reference.partition('//')
return (date, booking, amount, id_, reference,
institution_reference, additional_data, '')
def _set_statement(self, values, transactions):
self.statements.append(
Statement(
transactions=[Transaction(*t) for t in transactions],
**values))
values.clear()
del transactions[:]
Statement = namedtuple('Statement', ['statement', 'account', 'information',
'start_balance', 'transactions', 'end_balance'])
Balance = namedtuple('Balance', ['date', 'amount', 'currency'])
Transaction = namedtuple('Transaction', ['date', 'booking', 'amount', 'id',
'reference', 'institution_reference', 'additional_data',
'description'])
def _find_swift_tags(tags, description):
values = {}
for tag, name in tags:
if description.startswith(tag):
description = description[len(tag):]
try:
i = description.index('/')
except ValueError:
i = len(description)
values[name] = description[:i]
description = description[i:]
if not description:
break
return values
RABO_TAGS = [
('/MARF/', 'marf'),
('/EREF/', 'eref'),
('/PREF/', 'pref'),
('/BENM/', 'benm'),
('/ORDP/', 'ordp'),
('/NAME/', 'name'),
('/ID/', 'id'),
('/ADDR/', 'addr'),
('/REMI/', 'remi'),
('/CDTRREFTP//CD/SCOR/ISSR/CUR/CDTRREF/', 'cdtrref'),
('/CSID/', 'csid'),
('/ISDT/', 'isdt'),
('/RTRN/', 'rtrn'),
]
def rabo_description(description):
"Return dictionnary with Rabo informations"
description = ''.join(description.splitlines())
return _find_swift_tags(RABO_TAGS, description)
ABN_AMRO_ACCOUNT = re.compile(r"""
^([0-9]{1,3}\.[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,3})""", re.VERBOSE)
ABN_AMRO_GIRO = re.compile(r"""
^GIRO\ +([0-9]+)""", re.VERBOSE)
ABN_AMRO_TAGS = [
('/TRTP/', 'trtp'),
('/IBAN/', 'iban'),
('/BIC/', 'bic'),
('/CSID', 'csid'),
('/NAME/', 'name'),
('/REMI/', 'remi'),
('/EREF/', 'eref'),
('/ORDP//ID/', 'ordp'),
('/BENM//ID/', 'benm'),
]
def abn_amro_description(description):
"Retrun dictionnary with ABN AMRO informations"
description = ''.join(description.splitlines())
values = {}
m = ABN_AMRO_ACCOUNT.match(description)
if m:
values['account'] = m.group(1).replace('.', '')
m = ABN_AMRO_GIRO.match(description)
if m:
values['account'] = m.group(1)
values.update(_find_swift_tags(ABN_AMRO_TAGS, description))
return values
ING_TAGS = re.compile(r'/(RTRN|EREF|PREF|MARF|CSID|CNTP|REMI|PURP|ULT[CD])/')
ING_TAGS_DEFINITION = {
'RTRN': ('rtrn', []),
'EREF': ('eref', []),
'PREF': ('pref', []),
'MARF': ('marf', []),
'CSID': ('csid', []),
'CNTP': ('cntp', ['account_number', 'bic', 'name', 'city']),
'REMI': ('remi', ['code', 'issuer', 'remittance_info']),
'PURP': ('purp', []),
'ULTC': ('ultc', ['name', 'id']),
'ULTD': ('ultd', ['name', 'id']),
}
def ing_description(description):
"Return dictionnary with ING informations"
description = ''.join(description.splitlines())
values = {}
ing_tags = iter(ING_TAGS.split(description)[1:])
for tag, tag_value in zip(ing_tags, ing_tags):
tag_value = tag_value[:-1]
name, subfields = ING_TAGS_DEFINITION[tag]
if not subfields:
values[name] = tag_value
continue
values[name] = {}
if 'name' in subfields or 'remittance_info' in subfields:
special_tag = 'name' if 'name' in subfields else 'remittance_info'
tag_idx = subfields.index(special_tag)
subtags = tag_value.split('/', tag_idx)
for sf_name, sf_value in zip(subfields[:tag_idx], subtags[:-1]):
values[name][sf_name] = sf_value
subtags = subtags[-1].rsplit('/', len(subfields) - tag_idx - 1)
for sf_name, sf_value in zip(subfields[tag_idx:], subtags):
values[name][sf_name] = sf_value
else:
subtags = tag_value.split('/')
for sf_name, sf_value in zip(subfields, subtags):
values[name][sf_name] = sf_value
return values
|
[
"collections.defaultdict",
"datetime.datetime.strptime",
"collections.namedtuple",
"re.compile"
] |
[((2415, 2655), 're.compile', 're.compile', (['"""\n (?P<date>\\\\d{6})\n (?P<booking>\\\\d{4})?\n (?P<sign>D|C|RC|RD)\n (?P<code>\\\\w)?? # ING skips this mandatory field\n (?P<amount>(\\\\d|,){1,15})\n (?P<id>\\\\w{4})\n (?P<reference>.{0,34})"""', 're.VERBOSE'], {}), '(\n """\n (?P<date>\\\\d{6})\n (?P<booking>\\\\d{4})?\n (?P<sign>D|C|RC|RD)\n (?P<code>\\\\w)?? # ING skips this mandatory field\n (?P<amount>(\\\\d|,){1,15})\n (?P<id>\\\\w{4})\n (?P<reference>.{0,34})"""\n , re.VERBOSE)\n', (2425, 2655), False, 'import re\n'), ((5812, 5928), 'collections.namedtuple', 'namedtuple', (['"""Statement"""', "['statement', 'account', 'information', 'start_balance', 'transactions',\n 'end_balance']"], {}), "('Statement', ['statement', 'account', 'information',\n 'start_balance', 'transactions', 'end_balance'])\n", (5822, 5928), False, 'from collections import defaultdict, namedtuple\n'), ((5943, 5996), 'collections.namedtuple', 'namedtuple', (['"""Balance"""', "['date', 'amount', 'currency']"], {}), "('Balance', ['date', 'amount', 'currency'])\n", (5953, 5996), False, 'from collections import defaultdict, namedtuple\n'), ((6011, 6149), 'collections.namedtuple', 'namedtuple', (['"""Transaction"""', "['date', 'booking', 'amount', 'id', 'reference', 'institution_reference',\n 'additional_data', 'description']"], {}), "('Transaction', ['date', 'booking', 'amount', 'id', 'reference',\n 'institution_reference', 'additional_data', 'description'])\n", (6021, 6149), False, 'from collections import defaultdict, namedtuple\n'), ((7187, 7278), 're.compile', 're.compile', (['"""\n ^([0-9]{1,3}\\\\.[0-9]{1,2}\\\\.[0-9]{1,2}\\\\.[0-9]{1,3})"""', 're.VERBOSE'], {}), '("""\n ^([0-9]{1,3}\\\\.[0-9]{1,2}\\\\.[0-9]{1,2}\\\\.[0-9]{1,3})""",\n re.VERBOSE)\n', (7197, 7278), False, 'import re\n'), ((7289, 7341), 're.compile', 're.compile', (['"""\n ^GIRO\\\\ +([0-9]+)"""', 're.VERBOSE'], {}), '("""\n ^GIRO\\\\ +([0-9]+)""", re.VERBOSE)\n', (7299, 7341), False, 'import re\n'), ((8042, 8107), 're.compile', 're.compile', (['"""/(RTRN|EREF|PREF|MARF|CSID|CNTP|REMI|PURP|ULT[CD])/"""'], {}), "('/(RTRN|EREF|PREF|MARF|CSID|CNTP|REMI|PURP|ULT[CD])/')\n", (8052, 8107), False, 'import re\n'), ((2192, 2234), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%y%m%d"""'], {}), "(date, '%y%m%d')\n", (2218, 2234), False, 'import datetime\n'), ((2783, 2799), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (2794, 2799), False, 'from collections import defaultdict, namedtuple\n')]
|
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#ovs-vsctl -- --id=@ft create Flow_Table flow_limit=100 overflow_policy=refuse -- set Bridge br0 flow_tables=0=@ft
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.lib.addresses import IPAddr, EthAddr
log = core.getLogger()
#Tabela de enderecos mac->porta
tabela_mac = {}
def _handle_ConnectionUp (event):
####################### REGRAS PRINCIPAIS #############################
#Regra de encaminhamento para o controlador
#msgc = of.ofp_flow_mod()
#msgc.match.in_port = 3
#msgc.priority = 2
#msgc.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
#event.connection.send(msgc)
if (dpidToStr(event.dpid) == '00-e0-4c-2a-33-4f'):
log.info("Switch UL conectado.")
else:
log.info("Switch %s conectado.", dpidToStr(event.dpid))
def _handle_PacketIn (event):
global tabela_mac
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Pacote incompleto!")
return
packet_in = event.ofp # The actual ofp_packet_in message.
#Aprendendo a porta de origem, caso ela nao esteja na tabela
if packet.src not in tabela_mac:
log.info("Aprendendo: MAC " + str(packet.src) + " esta na porta " + str(packet_in.in_port))
tabela_mac[packet.src] = packet_in.in_port
try:
porta = tabela_mac[packet.dst] #Porta destino
log.info(str(packet.dst) + " e um MAC conhecido. Instalando regra: porta " + str(packet_in.in_port) + "->" + str(porta))
msg = of.ofp_flow_mod()
#print packet
#print packet.next
#print packet.next.next
if (packet.find('arp')):
tipo = 0x0806
else:
tipo = 0x0800
msg.match.dl_type = tipo
msg.match.in_port = packet_in.in_port #Porta origem
msg.match.dl_src = packet.src #MAC origem
msg.match.dl_dst = packet.dst #MAC destino
#Packet.next sobe para a proxima camada
#Packet = camada enlace
#Packet.next = camada rede
#Packet.next.next = camada de transporte
msg.match.nw_src = packet.next.srcip #IP origem
msg.match.nw_dst = packet.next.dstip #IP destino
msg.match.nw_proto = packet.next.protocol #Protocolo
msg.match.tp_src = packet.next.next.srcport #Porta de origem (Protocolo)
msg.match.tp_dst = packet.next.next.dstport #Porta de origem (Protocolo)
msg.priority = 10
msg.actions.append(of.ofp_action_output(port = porta)) #Porta destino
event.connection.send(msg)
except:
log.info(str(packet.dst) + " nao e um MAC conhecido, enviando pacote para todos")
porta = of.OFPP_FLOOD #Manda para todas as portas (pode usar of.OFPP_ALL tambem)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = porta))
msg.data = packet_in
msg.in_port = event.port
event.connection.send(msg)
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Executando codigo...")
|
[
"pox.core.core.openflow.addListenerByName",
"pox.openflow.libopenflow_01.ofp_flow_mod",
"pox.openflow.libopenflow_01.ofp_action_output",
"pox.openflow.libopenflow_01.ofp_packet_out",
"pox.lib.util.dpidToStr",
"pox.core.core.getLogger"
] |
[((942, 958), 'pox.core.core.getLogger', 'core.getLogger', ([], {}), '()\n', (956, 958), False, 'from pox.core import core\n'), ((3303, 3322), 'pox.openflow.libopenflow_01.ofp_packet_out', 'of.ofp_packet_out', ([], {}), '()\n', (3320, 3322), True, 'import pox.openflow.libopenflow_01 as of\n'), ((3477, 3546), 'pox.core.core.openflow.addListenerByName', 'core.openflow.addListenerByName', (['"""ConnectionUp"""', '_handle_ConnectionUp'], {}), "('ConnectionUp', _handle_ConnectionUp)\n", (3508, 3546), False, 'from pox.core import core\n'), ((3549, 3610), 'pox.core.core.openflow.addListenerByName', 'core.openflow.addListenerByName', (['"""PacketIn"""', '_handle_PacketIn'], {}), "('PacketIn', _handle_PacketIn)\n", (3580, 3610), False, 'from pox.core import core\n'), ((1349, 1370), 'pox.lib.util.dpidToStr', 'dpidToStr', (['event.dpid'], {}), '(event.dpid)\n', (1358, 1370), False, 'from pox.lib.util import dpidToStr\n'), ((2178, 2195), 'pox.openflow.libopenflow_01.ofp_flow_mod', 'of.ofp_flow_mod', ([], {}), '()\n', (2193, 2195), True, 'import pox.openflow.libopenflow_01 as of\n'), ((3344, 3376), 'pox.openflow.libopenflow_01.ofp_action_output', 'of.ofp_action_output', ([], {'port': 'porta'}), '(port=porta)\n', (3364, 3376), True, 'import pox.openflow.libopenflow_01 as of\n'), ((1478, 1499), 'pox.lib.util.dpidToStr', 'dpidToStr', (['event.dpid'], {}), '(event.dpid)\n', (1487, 1499), False, 'from pox.lib.util import dpidToStr\n'), ((3031, 3063), 'pox.openflow.libopenflow_01.ofp_action_output', 'of.ofp_action_output', ([], {'port': 'porta'}), '(port=porta)\n', (3051, 3063), True, 'import pox.openflow.libopenflow_01 as of\n')]
|
import os
import pandas as pd
import pytest
from whylogs.core.metrics.regression_metrics import RegressionMetrics
from whylogs.proto import RegressionMetricsMessage
TEST_DATA_PATH = os.path.abspath(
os.path.join(
os.path.realpath(os.path.dirname(__file__)),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
"testdata",
)
)
def my_test():
regmet = RegressionMetrics()
assert regmet.count == 0
assert regmet.sum_diff == 0.0
assert regmet.sum2_diff == 0.0
assert regmet.sum_abs_diff == 0.0
assert regmet.mean_squared_error() is None
assert regmet.mean_absolute_error() is None
assert regmet.root_mean_squared_error() is None
def test_load_parquet():
mean_absolute_error = 85.94534216005789
mean_squared_error = 11474.89611670205
root_mean_squared_error = 107.12094154133472
regmet = RegressionMetrics()
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
assert regmet.count == len(df["predictions"].to_list())
assert regmet.mean_squared_error() == pytest.approx(mean_squared_error, 0.01)
assert regmet.mean_absolute_error() == pytest.approx(mean_absolute_error, 0.01)
assert regmet.root_mean_squared_error() == pytest.approx(root_mean_squared_error, 0.01)
msg = regmet.to_protobuf()
new_regmet = RegressionMetrics.from_protobuf(msg)
assert regmet.count == new_regmet.count
assert regmet.mean_squared_error() == new_regmet.mean_squared_error()
assert regmet.root_mean_squared_error() == new_regmet.root_mean_squared_error()
assert regmet.mean_absolute_error() == new_regmet.mean_absolute_error()
def test_empty_protobuf_should_return_none():
empty_message = RegressionMetricsMessage()
assert RegressionMetrics.from_protobuf(empty_message) is None
def test_merging():
regmet_sum = RegressionMetrics()
regmet = RegressionMetrics(prediction_field="predictions", target_field="targets")
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_sum.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_2 = RegressionMetrics(prediction_field="predictions", target_field="targets")
df_2 = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-13.parquet")))
regmet_2.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
regmet_sum.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
merged_reg_metr = regmet.merge(regmet_2)
assert merged_reg_metr.count == regmet_sum.count
assert merged_reg_metr.mean_squared_error() == pytest.approx(regmet_sum.mean_squared_error(), 0.001)
assert merged_reg_metr.root_mean_squared_error() == pytest.approx(regmet_sum.root_mean_squared_error(), 0.001)
assert merged_reg_metr.mean_absolute_error() == pytest.approx(regmet_sum.mean_absolute_error(), 0.001)
|
[
"whylogs.core.metrics.regression_metrics.RegressionMetrics",
"os.path.dirname",
"whylogs.proto.RegressionMetricsMessage",
"pytest.approx",
"os.path.join",
"whylogs.core.metrics.regression_metrics.RegressionMetrics.from_protobuf"
] |
[((407, 426), 'whylogs.core.metrics.regression_metrics.RegressionMetrics', 'RegressionMetrics', ([], {}), '()\n', (424, 426), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((889, 908), 'whylogs.core.metrics.regression_metrics.RegressionMetrics', 'RegressionMetrics', ([], {}), '()\n', (906, 908), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((1449, 1485), 'whylogs.core.metrics.regression_metrics.RegressionMetrics.from_protobuf', 'RegressionMetrics.from_protobuf', (['msg'], {}), '(msg)\n', (1480, 1485), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((1832, 1858), 'whylogs.proto.RegressionMetricsMessage', 'RegressionMetricsMessage', ([], {}), '()\n', (1856, 1858), False, 'from whylogs.proto import RegressionMetricsMessage\n'), ((1964, 1983), 'whylogs.core.metrics.regression_metrics.RegressionMetrics', 'RegressionMetrics', ([], {}), '()\n', (1981, 1983), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((1998, 2071), 'whylogs.core.metrics.regression_metrics.RegressionMetrics', 'RegressionMetrics', ([], {'prediction_field': '"""predictions"""', 'target_field': '"""targets"""'}), "(prediction_field='predictions', target_field='targets')\n", (2015, 2071), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((2332, 2405), 'whylogs.core.metrics.regression_metrics.RegressionMetrics', 'RegressionMetrics', ([], {'prediction_field': '"""predictions"""', 'target_field': '"""targets"""'}), "(prediction_field='predictions', target_field='targets')\n", (2349, 2405), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((1183, 1222), 'pytest.approx', 'pytest.approx', (['mean_squared_error', '(0.01)'], {}), '(mean_squared_error, 0.01)\n', (1196, 1222), False, 'import pytest\n'), ((1267, 1307), 'pytest.approx', 'pytest.approx', (['mean_absolute_error', '(0.01)'], {}), '(mean_absolute_error, 0.01)\n', (1280, 1307), False, 'import pytest\n'), ((1355, 1399), 'pytest.approx', 'pytest.approx', (['root_mean_squared_error', '(0.01)'], {}), '(root_mean_squared_error, 0.01)\n', (1368, 1399), False, 'import pytest\n'), ((1870, 1916), 'whylogs.core.metrics.regression_metrics.RegressionMetrics.from_protobuf', 'RegressionMetrics.from_protobuf', (['empty_message'], {}), '(empty_message)\n', (1901, 1916), False, 'from whylogs.core.metrics.regression_metrics import RegressionMetrics\n'), ((245, 270), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (260, 270), False, 'import os\n'), ((947, 1008), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', '"""metrics"""', '"""2021-02-12.parquet"""'], {}), "(TEST_DATA_PATH, 'metrics', '2021-02-12.parquet')\n", (959, 1008), False, 'import os\n'), ((2110, 2171), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', '"""metrics"""', '"""2021-02-12.parquet"""'], {}), "(TEST_DATA_PATH, 'metrics', '2021-02-12.parquet')\n", (2122, 2171), False, 'import os\n'), ((2446, 2507), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', '"""metrics"""', '"""2021-02-13.parquet"""'], {}), "(TEST_DATA_PATH, 'metrics', '2021-02-13.parquet')\n", (2458, 2507), False, 'import os\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
PRECISION = 8 # in signs after dot
def objective_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_F_" + module_basename + ".txt"
def jacobian_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_J_" + module_basename + ".txt"
def time_to_string(objective_time, derivative_time):
obj_time_str = np.format_float_scientific(
objective_time,
unique=False,
precision=PRECISION
)
der_time_str = np.format_float_scientific(
derivative_time,
unique=False,
precision=PRECISION
)
return f"{obj_time_str}\n{der_time_str}"
def save_time_to_file(filepath, objective_time, derivative_time):
# open file in write mode or create new one if it does not exist
out = open(filepath,"w")
out.write(time_to_string(objective_time, derivative_time))
out.close()
def value_to_string(value):
return np.format_float_scientific(value, unique=False, precision=PRECISION)
def save_value_to_file(filepath, value):
out = open(filepath,"w")
out.write(value_to_string(value))
out.close()
def save_vector_to_file(filepath, gradient):
out = open(filepath,"w")
for value in gradient:
out.write(value_to_string(value) + '\n')
out.close()
def save_jacobian_to_file(filepath, jacobian):
out = open(filepath,"w")
# output row-major matrix
for row in jacobian:
out.write(value_to_string(row[0]))
for value in row[1:]:
out.write('\t' + value_to_string(value))
out.write('\n')
out.close()
def save_errors_to_file(filepath, reprojection_error, zach_weight_error):
out = open(filepath,"w")
out.write("Reprojection error:\n")
for value in reprojection_error:
out.write(value_to_string(value) + '\n')
out.write("Zach weight error:\n")
for value in zach_weight_error:
out.write(value_to_string(value) + '\n')
out.close()
def save_sparse_j_to_file(filepath, J):
out = open(filepath,"w")
out.write(f"{J.nrows} {J.ncols}\n")
out.write(f"{len(J.rows)}\n")
for row in J.rows:
out.write(f"{row} ")
out.write('\n')
out.write(f"{len(J.cols)}\n")
for column in J.cols:
out.write(f"{column} ")
out.write('\n')
for value in J.vals:
out.write(value_to_string(value) + ' ')
out.close()
|
[
"numpy.format_float_scientific"
] |
[((507, 584), 'numpy.format_float_scientific', 'np.format_float_scientific', (['objective_time'], {'unique': '(False)', 'precision': 'PRECISION'}), '(objective_time, unique=False, precision=PRECISION)\n', (533, 584), True, 'import numpy as np\n'), ((635, 713), 'numpy.format_float_scientific', 'np.format_float_scientific', (['derivative_time'], {'unique': '(False)', 'precision': 'PRECISION'}), '(derivative_time, unique=False, precision=PRECISION)\n', (661, 713), True, 'import numpy as np\n'), ((1080, 1148), 'numpy.format_float_scientific', 'np.format_float_scientific', (['value'], {'unique': '(False)', 'precision': 'PRECISION'}), '(value, unique=False, precision=PRECISION)\n', (1106, 1148), True, 'import numpy as np\n')]
|
from django.contrib import admin
from .models import Blogpost
from django_summernote.admin import SummernoteModelAdmin
# class BlogpostAdmin(admin.ModelAdmin):
# list_display = ('title', 'slug', 'status','created_on')
# list_filter = ("status",)
# search_fields = ['title', 'content']
# prepopulated_fields = {'slug': ('title',)}
class BlogpostAdmin(SummernoteModelAdmin):
summernote_fields = ('content',)
list_display = ('title', 'slug', 'status', 'updated_on', 'created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Blogpost, BlogpostAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((621, 665), 'django.contrib.admin.site.register', 'admin.site.register', (['Blogpost', 'BlogpostAdmin'], {}), '(Blogpost, BlogpostAdmin)\n', (640, 665), False, 'from django.contrib import admin\n')]
|
from bs4 import BeautifulSoup as bs
import json
import uuid
class htmlCreator:
def generate_html_file(self, jsonObject):
soup = self.__getTemplateFileData()
jsonData = json.loads(jsonObject)
self.__appendDivs(soup, jsonData)
self.__saveFile(soup)
def __saveFile(self, soup):
myuuid = uuid.uuid4()
resultFilename = "./output_folder/bingo-{}.html".format(myuuid)
with open(resultFilename, "w") as file:
file.write(str(soup))
def __appendDivs(self, soup, jsonObject):
container = soup.div
freeSpaceLocation = self.__getFreeSpaceLocation(jsonObject)
for i in range(0, 25):
tag = soup.new_tag("div")
tag["class"] = "grid-item"
tag["id"] = "element{}".format(i)
if i is not freeSpaceLocation:
tag.string = jsonObject['spaces'][i]
else:
tag["class"] += " free-item"
tag.string = jsonObject['free_spaces'][0]
container.append(tag)
def __getFreeSpaceLocation(self, jsonObject):
print(jsonObject)
x, y = jsonObject['free space coordinates']
return x + y * 5
def __getTemplateFileData(self):
with open("./resources/templates/websiteTemplate.html") as file:
txt = file.read()
return bs(txt, "lxml")
|
[
"bs4.BeautifulSoup",
"uuid.uuid4",
"json.loads"
] |
[((189, 211), 'json.loads', 'json.loads', (['jsonObject'], {}), '(jsonObject)\n', (199, 211), False, 'import json\n'), ((335, 347), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (345, 347), False, 'import uuid\n'), ((1377, 1392), 'bs4.BeautifulSoup', 'bs', (['txt', '"""lxml"""'], {}), "(txt, 'lxml')\n", (1379, 1392), True, 'from bs4 import BeautifulSoup as bs\n')]
|
import abjad
import auxjad
def test_remove_repeated_time_signatures_01():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_02():
staff = abjad.Staff(r"c'4 d'8 | e'4. | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[3])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_03():
staff = abjad.Staff([abjad.Note("c'2"),
abjad.Chord("<d' f'>2"),
abjad.Tuplet((2, 3), "g2 a2 b2"),
])
abjad.attach(abjad.TimeSignature((2, 2)), staff[0])
abjad.attach(abjad.TimeSignature((2, 2)), staff[2][0])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
\time 2/2
g2
a2
b2
}
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
g2
a2
b2
}
}
"""
)
def test_remove_repeated_time_signatures_04():
staff = abjad.Staff(r"c'2 d'2 | e'2 d'2")
abjad.attach(abjad.TimeSignature((4, 4)), staff[2])
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
c'2
d'2
e'2
d'2
}
"""
)
def test_remove_repeated_time_signatures_05():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
abjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
|
[
"abjad.mutate.remove_repeated_time_signatures",
"abjad.TimeSignature",
"abjad.lilypond",
"abjad.String.normalize",
"abjad.Chord",
"auxjad.mutate.remove_repeated_time_signatures",
"abjad.Note",
"abjad.Staff",
"abjad.Tuplet"
] |
[((89, 121), 'abjad.Staff', 'abjad.Staff', (['"""c\'4 d\'8 | c\'4 d\'8"""'], {}), '("c\'4 d\'8 | c\'4 d\'8")\n', (100, 121), False, 'import abjad\n'), ((477, 532), 'auxjad.mutate.remove_repeated_time_signatures', 'auxjad.mutate.remove_repeated_time_signatures', (['staff[:]'], {}), '(staff[:])\n', (522, 532), False, 'import auxjad\n'), ((810, 849), 'abjad.Staff', 'abjad.Staff', (['"""c\'4 d\'8 | e\'4. | c\'4 d\'8"""'], {}), '("c\'4 d\'8 | e\'4. | c\'4 d\'8")\n', (821, 849), False, 'import abjad\n'), ((1222, 1277), 'auxjad.mutate.remove_repeated_time_signatures', 'auxjad.mutate.remove_repeated_time_signatures', (['staff[:]'], {}), '(staff[:])\n', (1267, 1277), False, 'import auxjad\n'), ((2183, 2238), 'auxjad.mutate.remove_repeated_time_signatures', 'auxjad.mutate.remove_repeated_time_signatures', (['staff[:]'], {}), '(staff[:])\n', (2228, 2238), False, 'import auxjad\n'), ((2597, 2629), 'abjad.Staff', 'abjad.Staff', (['"""c\'2 d\'2 | e\'2 d\'2"""'], {}), '("c\'2 d\'2 | e\'2 d\'2")\n', (2608, 2629), False, 'import abjad\n'), ((2691, 2746), 'auxjad.mutate.remove_repeated_time_signatures', 'auxjad.mutate.remove_repeated_time_signatures', (['staff[:]'], {}), '(staff[:])\n', (2736, 2746), False, 'import auxjad\n'), ((3002, 3034), 'abjad.Staff', 'abjad.Staff', (['"""c\'4 d\'8 | c\'4 d\'8"""'], {}), '("c\'4 d\'8 | c\'4 d\'8")\n', (3013, 3034), False, 'import abjad\n'), ((3390, 3444), 'abjad.mutate.remove_repeated_time_signatures', 'abjad.mutate.remove_repeated_time_signatures', (['staff[:]'], {}), '(staff[:])\n', (3434, 3444), False, 'import abjad\n'), ((140, 167), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (159, 167), False, 'import abjad\n'), ((196, 223), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (215, 223), False, 'import abjad\n'), ((246, 267), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (260, 267), False, 'import abjad\n'), ((271, 470), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n \\\\time 3/8\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n \\\\time 3/8\n c\'4\n d\'8\n }\n """\n )\n', (293, 470), False, 'import abjad\n'), ((544, 565), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (558, 565), False, 'import abjad\n'), ((569, 745), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n c\'4\n d\'8\n }\n """\n )\n', (591, 745), False, 'import abjad\n'), ((868, 895), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (887, 895), False, 'import abjad\n'), ((924, 951), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (943, 951), False, 'import abjad\n'), ((974, 995), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (988, 995), False, 'import abjad\n'), ((999, 1215), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n e\'4.\n \\\\time 3/8\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n e\'4.\n \\\\time 3/8\n c\'4\n d\'8\n }\n """\n )\n', (1021, 1215), False, 'import abjad\n'), ((1289, 1310), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (1303, 1310), False, 'import abjad\n'), ((1314, 1507), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n e\'4.\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n e\'4.\n c\'4\n d\'8\n }\n """\n )\n', (1336, 1507), False, 'import abjad\n'), ((1758, 1785), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(2, 2)'], {}), '((2, 2))\n', (1777, 1785), False, 'import abjad\n'), ((1814, 1841), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(2, 2)'], {}), '((2, 2))\n', (1833, 1841), False, 'import abjad\n'), ((1867, 1888), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (1881, 1888), False, 'import abjad\n'), ((1892, 2177), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 2/2\n c\'2\n <d\' f\'>2\n \\\\times 2/3\n {\n \\\\time 2/2\n g2\n a2\n b2\n }\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 2/2\n c\'2\n <d\' f\'>2\n \\\\times 2/3\n {\n \\\\time 2/2\n g2\n a2\n b2\n }\n }\n """\n )\n', (1914, 2177), False, 'import abjad\n'), ((2250, 2271), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (2264, 2271), False, 'import abjad\n'), ((2275, 2533), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 2/2\n c\'2\n <d\' f\'>2\n \\\\times 2/3\n {\n g2\n a2\n b2\n }\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 2/2\n c\'2\n <d\' f\'>2\n \\\\times 2/3\n {\n g2\n a2\n b2\n }\n }\n """\n )\n', (2297, 2533), False, 'import abjad\n'), ((2648, 2675), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(4, 4)'], {}), '((4, 4))\n', (2667, 2675), False, 'import abjad\n'), ((2758, 2779), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (2772, 2779), False, 'import abjad\n'), ((2783, 2936), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n c\'2\n d\'2\n e\'2\n d\'2\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n c\'2\n d\'2\n e\'2\n d\'2\n }\n """\n )\n', (2805, 2936), False, 'import abjad\n'), ((3053, 3080), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (3072, 3080), False, 'import abjad\n'), ((3109, 3136), 'abjad.TimeSignature', 'abjad.TimeSignature', (['(3, 8)'], {}), '((3, 8))\n', (3128, 3136), False, 'import abjad\n'), ((3159, 3180), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (3173, 3180), False, 'import abjad\n'), ((3184, 3383), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n \\\\time 3/8\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n \\\\time 3/8\n c\'4\n d\'8\n }\n """\n )\n', (3206, 3383), False, 'import abjad\n'), ((3456, 3477), 'abjad.lilypond', 'abjad.lilypond', (['staff'], {}), '(staff)\n', (3470, 3477), False, 'import abjad\n'), ((3481, 3657), 'abjad.String.normalize', 'abjad.String.normalize', (['"""\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n c\'4\n d\'8\n }\n """'], {}), '(\n """\n \\\\new Staff\n {\n \\\\time 3/8\n c\'4\n d\'8\n c\'4\n d\'8\n }\n """\n )\n', (3503, 3657), False, 'import abjad\n'), ((1585, 1602), 'abjad.Note', 'abjad.Note', (['"""c\'2"""'], {}), '("c\'2")\n', (1595, 1602), False, 'import abjad\n'), ((1629, 1652), 'abjad.Chord', 'abjad.Chord', (['"""<d\' f\'>2"""'], {}), '("<d\' f\'>2")\n', (1640, 1652), False, 'import abjad\n'), ((1679, 1711), 'abjad.Tuplet', 'abjad.Tuplet', (['(2, 3)', '"""g2 a2 b2"""'], {}), "((2, 3), 'g2 a2 b2')\n", (1691, 1711), False, 'import abjad\n')]
|
import pytest
import datetime as dt
from subtypes import DateTime
@pytest.fixture
def example_datetime():
return DateTime(1994, 3, 24, 12, 30, 15)
class TestDateTime:
def test___str__(self): # synced
assert True
def test_shift(self, example_datetime): # synced
assert example_datetime.shift(years=26, months=-2, days=-23, hours=-12, minutes=-30, seconds=-15) == DateTime(2020, 1, 1)
def test_date(self): # synced
assert True
def test_time(self): # synced
assert True
def test_to_stdlib(self): # synced
assert True
def test_to_isoformat(self, example_datetime): # synced
assert example_datetime.to_isoformat() == "1994-03-24 12:30:15"
def test_to_format(self): # synced
assert True
def test_from_datetime(self, example_datetime): # synced
assert example_datetime == DateTime.from_datetime(dt.datetime(1994, 3, 24, 12, 30, 15))
def test_from_isoformat(self): # synced
assert True
def test_from_format(self): # synced
assert True
def test_from_string(self): # synced
assert True
def test_from_parts(self): # synced
assert True
def test_infer(self): # synced
assert True
def test_TimeZone(self): # synced
assert True
def test_Hour(self): # synced
assert True
def test_Minute(self): # synced
assert True
def test_Second(self): # synced
assert True
def test_MicroSecond(self): # synced
assert True
|
[
"subtypes.DateTime",
"datetime.datetime"
] |
[((120, 153), 'subtypes.DateTime', 'DateTime', (['(1994)', '(3)', '(24)', '(12)', '(30)', '(15)'], {}), '(1994, 3, 24, 12, 30, 15)\n', (128, 153), False, 'from subtypes import DateTime\n'), ((398, 418), 'subtypes.DateTime', 'DateTime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (406, 418), False, 'from subtypes import DateTime\n'), ((908, 944), 'datetime.datetime', 'dt.datetime', (['(1994)', '(3)', '(24)', '(12)', '(30)', '(15)'], {}), '(1994, 3, 24, 12, 30, 15)\n', (919, 944), True, 'import datetime as dt\n')]
|
import argparse
import os
import git
from enum import Enum
script_location = os.path.dirname(__file__)
repo_path = os.path.abspath(os.sep.join([script_location, '..', '..']))
print('Using git repository location {}'.format(repo_path))
repo = git.Repo(repo_path)
repo_submodules = repo.submodules
patches_folder = os.sep.join([repo_path, 'PolyEngine', 'ThirdParty', 'patches'])
for submodule in repo_submodules:
sub_name = os.path.basename(os.path.normpath(submodule.path))
print('Initializing submodule [{}] in path: {}'.format(sub_name, submodule.path))
submodule.update(init=True, force=True)
patch_name = os.sep.join([patches_folder, '{}.patch'.format(sub_name)])
# Apply patch if needed
if os.path.isfile(patch_name):
print('Applying patch to submodule {} found in {}'.format(sub_name,patch_name))
sub_repo = submodule.module()
sub_repo.git.reset(['--hard']) # Reset first
sub_repo.git.apply([patch_name]) # Apply patch
|
[
"os.path.dirname",
"git.Repo",
"os.path.isfile",
"os.path.normpath",
"os.sep.join"
] |
[((78, 103), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (93, 103), False, 'import os\n'), ((243, 262), 'git.Repo', 'git.Repo', (['repo_path'], {}), '(repo_path)\n', (251, 262), False, 'import git\n'), ((315, 378), 'os.sep.join', 'os.sep.join', (["[repo_path, 'PolyEngine', 'ThirdParty', 'patches']"], {}), "([repo_path, 'PolyEngine', 'ThirdParty', 'patches'])\n", (326, 378), False, 'import os\n'), ((132, 174), 'os.sep.join', 'os.sep.join', (["[script_location, '..', '..']"], {}), "([script_location, '..', '..'])\n", (143, 174), False, 'import os\n'), ((727, 753), 'os.path.isfile', 'os.path.isfile', (['patch_name'], {}), '(patch_name)\n', (741, 753), False, 'import os\n'), ((446, 478), 'os.path.normpath', 'os.path.normpath', (['submodule.path'], {}), '(submodule.path)\n', (462, 478), False, 'import os\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("reportes", views.reportes, name="reportes"),
path(
"courses_by/<str:teacher_name>", views.teacher_courses, name="teacher_courses"
),
path(
"courses_of/<str:subject_name>", views.subject_courses, name="subject_courses"
),
path(
"classrooms_at/<str:time_slot>",
views.available_classrooms,
name="available_classrooms",
),
path("teaching_at/<str:time_slot>", views.busy_teachers, name="busy_teachers"),
path(
"not_teaching_at/<str:time_slot>",
views.available_teachers,
name="available_teachers",
),
path(
"course_at/<str:day>/<str:classroom>", views.which_course, name="which_course"
),
path(
"check_classroom/<str:classroom>/<str:time_slot>",
views.validate_slot,
name="validate_slot",
),
]
|
[
"django.urls.path"
] |
[((71, 106), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (75, 106), False, 'from django.urls import path\n'), ((112, 161), 'django.urls.path', 'path', (['"""reportes"""', 'views.reportes'], {'name': '"""reportes"""'}), "('reportes', views.reportes, name='reportes')\n", (116, 161), False, 'from django.urls import path\n'), ((167, 256), 'django.urls.path', 'path', (['"""courses_by/<str:teacher_name>"""', 'views.teacher_courses'], {'name': '"""teacher_courses"""'}), "('courses_by/<str:teacher_name>', views.teacher_courses, name=\n 'teacher_courses')\n", (171, 256), False, 'from django.urls import path\n'), ((271, 360), 'django.urls.path', 'path', (['"""courses_of/<str:subject_name>"""', 'views.subject_courses'], {'name': '"""subject_courses"""'}), "('courses_of/<str:subject_name>', views.subject_courses, name=\n 'subject_courses')\n", (275, 360), False, 'from django.urls import path\n'), ((375, 474), 'django.urls.path', 'path', (['"""classrooms_at/<str:time_slot>"""', 'views.available_classrooms'], {'name': '"""available_classrooms"""'}), "('classrooms_at/<str:time_slot>', views.available_classrooms, name=\n 'available_classrooms')\n", (379, 474), False, 'from django.urls import path\n'), ((506, 584), 'django.urls.path', 'path', (['"""teaching_at/<str:time_slot>"""', 'views.busy_teachers'], {'name': '"""busy_teachers"""'}), "('teaching_at/<str:time_slot>', views.busy_teachers, name='busy_teachers')\n", (510, 584), False, 'from django.urls import path\n'), ((590, 687), 'django.urls.path', 'path', (['"""not_teaching_at/<str:time_slot>"""', 'views.available_teachers'], {'name': '"""available_teachers"""'}), "('not_teaching_at/<str:time_slot>', views.available_teachers, name=\n 'available_teachers')\n", (594, 687), False, 'from django.urls import path\n'), ((719, 808), 'django.urls.path', 'path', (['"""course_at/<str:day>/<str:classroom>"""', 'views.which_course'], {'name': '"""which_course"""'}), "('course_at/<str:day>/<str:classroom>', views.which_course, name=\n 'which_course')\n", (723, 808), False, 'from django.urls import path\n'), ((823, 925), 'django.urls.path', 'path', (['"""check_classroom/<str:classroom>/<str:time_slot>"""', 'views.validate_slot'], {'name': '"""validate_slot"""'}), "('check_classroom/<str:classroom>/<str:time_slot>', views.validate_slot,\n name='validate_slot')\n", (827, 925), False, 'from django.urls import path\n')]
|
from Compiler.program import Program
from .GC import types as GC_types
import sys
import re, tempfile, os
def run(args, options):
""" Compile a file and output a Program object.
If options.merge_opens is set to True, will attempt to merge any
parallelisable open instructions. """
prog = Program(args, options)
VARS['program'] = prog
if options.binary:
VARS['sint'] = GC_types.sbitintvec.get_type(int(options.binary))
VARS['sfix'] = GC_types.sbitfixvec
for i in 'cint', 'cfix', 'cgf2n', 'sintbit', 'sgf2n', 'sgf2nint', \
'sgf2nuint', 'sgf2nuint32', 'sgf2nfloat', 'sfloat', 'cfloat', \
'squant':
del VARS[i]
print('Compiling file', prog.infile)
f = open(prog.infile, 'rb')
changed = False
if options.flow_optimization:
output = []
if_stack = []
for line in open(prog.infile):
if if_stack and not re.match(if_stack[-1][0], line):
if_stack.pop()
m = re.match(
'(\s*)for +([a-zA-Z_]+) +in +range\(([0-9a-zA-Z_]+)\):',
line)
if m:
output.append('%s@for_range_opt(%s)\n' % (m.group(1),
m.group(3)))
output.append('%sdef _(%s):\n' % (m.group(1), m.group(2)))
changed = True
continue
m = re.match('(\s*)if(\W.*):', line)
if m:
if_stack.append((m.group(1), len(output)))
output.append('%s@if_(%s)\n' % (m.group(1), m.group(2)))
output.append('%sdef _():\n' % (m.group(1)))
changed = True
continue
m = re.match('(\s*)elif\s+', line)
if m:
raise CompilerError('elif not supported')
if if_stack:
m = re.match('%selse:' % if_stack[-1][0], line)
if m:
start = if_stack[-1][1]
ws = if_stack[-1][0]
output[start] = re.sub(r'^%s@if_\(' % ws, r'%s@if_e(' % ws,
output[start])
output.append('%s@else_\n' % ws)
output.append('%sdef _():\n' % ws)
continue
output.append(line)
if changed:
infile = tempfile.NamedTemporaryFile('w+', delete=False)
for line in output:
infile.write(line)
infile.seek(0)
else:
infile = open(prog.infile)
else:
infile = open(prog.infile)
# make compiler modules directly accessible
sys.path.insert(0, 'Compiler')
# create the tapes
exec(compile(infile.read(), infile.name, 'exec'), VARS)
if changed and not options.debug:
os.unlink(infile.name)
prog.finalize()
if prog.req_num:
print('Program requires at most:')
for x in prog.req_num.pretty():
print(x)
if prog.verbose:
print('Program requires:', repr(prog.req_num))
print('Cost:', 0 if prog.req_num is None else prog.req_num.cost())
print('Memory size:', dict(prog.allocated_mem))
return prog
|
[
"tempfile.NamedTemporaryFile",
"os.unlink",
"re.match",
"sys.path.insert",
"Compiler.program.Program",
"re.sub"
] |
[((317, 339), 'Compiler.program.Program', 'Program', (['args', 'options'], {}), '(args, options)\n', (324, 339), False, 'from Compiler.program import Program\n'), ((2701, 2731), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""Compiler"""'], {}), "(0, 'Compiler')\n", (2716, 2731), False, 'import sys\n'), ((2862, 2884), 'os.unlink', 'os.unlink', (['infile.name'], {}), '(infile.name)\n', (2871, 2884), False, 'import re, tempfile, os\n'), ((1030, 1104), 're.match', 're.match', (['"""(\\\\s*)for +([a-zA-Z_]+) +in +range\\\\(([0-9a-zA-Z_]+)\\\\):"""', 'line'], {}), "('(\\\\s*)for +([a-zA-Z_]+) +in +range\\\\(([0-9a-zA-Z_]+)\\\\):', line)\n", (1038, 1104), False, 'import re, tempfile, os\n'), ((1441, 1475), 're.match', 're.match', (['"""(\\\\s*)if(\\\\W.*):"""', 'line'], {}), "('(\\\\s*)if(\\\\W.*):', line)\n", (1449, 1475), False, 'import re, tempfile, os\n'), ((1757, 1789), 're.match', 're.match', (['"""(\\\\s*)elif\\\\s+"""', 'line'], {}), "('(\\\\s*)elif\\\\s+', line)\n", (1765, 1789), False, 'import re, tempfile, os\n'), ((2408, 2455), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+"""'], {'delete': '(False)'}), "('w+', delete=False)\n", (2435, 2455), False, 'import re, tempfile, os\n'), ((1909, 1952), 're.match', 're.match', (["('%selse:' % if_stack[-1][0])", 'line'], {}), "('%selse:' % if_stack[-1][0], line)\n", (1917, 1952), False, 'import re, tempfile, os\n'), ((950, 981), 're.match', 're.match', (['if_stack[-1][0]', 'line'], {}), '(if_stack[-1][0], line)\n', (958, 981), False, 'import re, tempfile, os\n'), ((2096, 2153), 're.sub', 're.sub', (["('^%s@if_\\\\(' % ws)", "('%s@if_e(' % ws)", 'output[start]'], {}), "('^%s@if_\\\\(' % ws, '%s@if_e(' % ws, output[start])\n", (2102, 2153), False, 'import re, tempfile, os\n')]
|
#!/bin/python
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
class LocalFeatureAlignment(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(LocalFeatureAlignment, self).__init__(self, **kwargs)
def call(self, inputs):
distance, similarities = inputs
_, i, j, k, d = distance.shape
_, i, j, k_ = similarities.shape
assert(k == k_)
distance = array_ops.reshape(
distance,
(array_ops.shape(distance)[0],)+(i*j, k, d))
argmx = tf.cast(
array_ops.reshape(
tf.keras.backend.argmax(similarities),
(array_ops.shape(similarities)[0],)+(i*j, 1)),
dtype=tf.int32)
ones = tf.cast(tf.keras.backend.ones_like(argmx), dtype=tf.int32)
selector = tf.concat(
[tf.math.multiply(
ones,
tf.keras.backend.reshape(tf.range(i*j), shape=(i*j, 1))),
argmx], axis=-1)
residuals = tf.gather_nd(distance, selector, batch_dims=1)
aligned_residuals = tf.concat(
[residuals, tf.cast(argmx, dtype=tf.float32)],
axis=-1)
return [aligned_residuals]
|
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.range",
"tensorflow.keras.backend.ones_like",
"tensorflow.keras.backend.argmax",
"tensorflow.gather_nd",
"tensorflow.cast"
] |
[((1123, 1169), 'tensorflow.gather_nd', 'tf.gather_nd', (['distance', 'selector'], {'batch_dims': '(1)'}), '(distance, selector, batch_dims=1)\n', (1135, 1169), True, 'import tensorflow as tf\n'), ((865, 898), 'tensorflow.keras.backend.ones_like', 'tf.keras.backend.ones_like', (['argmx'], {}), '(argmx)\n', (891, 898), True, 'import tensorflow as tf\n'), ((712, 749), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', (['similarities'], {}), '(similarities)\n', (735, 749), True, 'import tensorflow as tf\n'), ((1233, 1265), 'tensorflow.cast', 'tf.cast', (['argmx'], {'dtype': 'tf.float32'}), '(argmx, dtype=tf.float32)\n', (1240, 1265), True, 'import tensorflow as tf\n'), ((596, 621), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['distance'], {}), '(distance)\n', (611, 621), False, 'from tensorflow.python.ops import array_ops\n'), ((1040, 1055), 'tensorflow.range', 'tf.range', (['(i * j)'], {}), '(i * j)\n', (1048, 1055), True, 'import tensorflow as tf\n'), ((768, 797), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['similarities'], {}), '(similarities)\n', (783, 797), False, 'from tensorflow.python.ops import array_ops\n')]
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.platform.v20190314 import models
class PlatformClient(AbstractClient):
_apiVersion = '2019-03-14'
_endpoint = 'platform.tencentcloudapi.com'
def DescribePasswords(self, request):
"""获取密码库设备密码
:param request: 调用DescribePasswords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.DescribePasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.DescribePasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePasswords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPassword(self, request):
"""修改设备密码
:param request: 调用ModifyPassword所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.ModifyPasswordRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.ModifyPasswordResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPassword", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPasswordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def Passwords(self, request):
"""获取密码
:param request: 调用Passwords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.PasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.PasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("Passwords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.PasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def QueryPasswords(self, request):
"""查询密码
:param request: 调用QueryPasswords所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.QueryPasswordsRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.QueryPasswordsResponse`
"""
try:
params = request._serialize()
body = self.call("QueryPasswords", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.QueryPasswordsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ResetPassword(self, request):
"""重置密码库设备密码
:param request: 调用ResetPassword所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.ResetPasswordRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.ResetPasswordResponse`
"""
try:
params = request._serialize()
body = self.call("ResetPassword", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ResetPasswordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def SetPasswordTypes(self, request):
"""重新设置密码类型(长期或临时密码)并修改密码
:param request: 调用SetPasswordTypes所需参数的结构体。
:type request: :class:`tencentcloud.platform.v20190314.models.SetPasswordTypesRequest`
:rtype: :class:`tencentcloud.platform.v20190314.models.SetPasswordTypesResponse`
"""
try:
params = request._serialize()
body = self.call("SetPasswordTypes", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.SetPasswordTypesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException",
"json.loads",
"tencentcloud.platform.v20190314.models.DescribePasswordsResponse",
"tencentcloud.platform.v20190314.models.ResetPasswordResponse",
"tencentcloud.platform.v20190314.models.QueryPasswordsResponse",
"tencentcloud.platform.v20190314.models.ModifyPasswordResponse",
"tencentcloud.platform.v20190314.models.PasswordsResponse",
"tencentcloud.platform.v20190314.models.SetPasswordTypesResponse"
] |
[((1446, 1462), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1456, 1462), False, 'import json\n'), ((2565, 2581), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2575, 2581), False, 'import json\n'), ((3654, 3670), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (3664, 3670), False, 'import json\n'), ((4763, 4779), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (4773, 4779), False, 'import json\n'), ((5877, 5893), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (5887, 5893), False, 'import json\n'), ((7018, 7034), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (7028, 7034), False, 'import json\n'), ((1539, 1573), 'tencentcloud.platform.v20190314.models.DescribePasswordsResponse', 'models.DescribePasswordsResponse', ([], {}), '()\n', (1571, 1573), False, 'from tencentcloud.platform.v20190314 import models\n'), ((1886, 1932), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (1910, 1932), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((2658, 2689), 'tencentcloud.platform.v20190314.models.ModifyPasswordResponse', 'models.ModifyPasswordResponse', ([], {}), '()\n', (2687, 2689), False, 'from tencentcloud.platform.v20190314 import models\n'), ((3002, 3048), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (3026, 3048), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((3747, 3773), 'tencentcloud.platform.v20190314.models.PasswordsResponse', 'models.PasswordsResponse', ([], {}), '()\n', (3771, 3773), False, 'from tencentcloud.platform.v20190314 import models\n'), ((4086, 4132), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (4110, 4132), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((4856, 4887), 'tencentcloud.platform.v20190314.models.QueryPasswordsResponse', 'models.QueryPasswordsResponse', ([], {}), '()\n', (4885, 4887), False, 'from tencentcloud.platform.v20190314 import models\n'), ((5200, 5246), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (5224, 5246), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((5970, 6000), 'tencentcloud.platform.v20190314.models.ResetPasswordResponse', 'models.ResetPasswordResponse', ([], {}), '()\n', (5998, 6000), False, 'from tencentcloud.platform.v20190314 import models\n'), ((6313, 6359), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (6337, 6359), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((7111, 7144), 'tencentcloud.platform.v20190314.models.SetPasswordTypesResponse', 'models.SetPasswordTypesResponse', ([], {}), '()\n', (7142, 7144), False, 'from tencentcloud.platform.v20190314 import models\n'), ((7457, 7503), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['code', 'message', 'reqid'], {}), '(code, message, reqid)\n', (7481, 7503), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((2082, 2128), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (2106, 2128), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((3198, 3244), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (3222, 3244), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((4282, 4328), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (4306, 4328), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((5396, 5442), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (5420, 5442), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((6509, 6555), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (6533, 6555), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n'), ((7653, 7699), 'tencentcloud.common.exception.tencent_cloud_sdk_exception.TencentCloudSDKException', 'TencentCloudSDKException', (['e.message', 'e.message'], {}), '(e.message, e.message)\n', (7677, 7699), False, 'from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n')]
|
from fastapi import APIRouter, Body, Depends
from ..models.post import ResponseModel
from ..controllers.auth import auth_handler
from ..controllers.verify import verify_post
router = APIRouter()
@router.post("/", response_description="Verify the post's authenticity")
async def verify_post_data(post_id: str = Body(..., embed=True), current_user=Depends(auth_handler.auth_wrapper)):
verified_post = await verify_post(post_id)
if verified_post["is_authentic"]:
return ResponseModel(verified_post, "Verification successful!")
else:
return ResponseModel(verified_post, "Verification failed!")
|
[
"fastapi.Body",
"fastapi.Depends",
"fastapi.APIRouter"
] |
[((184, 195), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (193, 195), False, 'from fastapi import APIRouter, Body, Depends\n'), ((313, 334), 'fastapi.Body', 'Body', (['...'], {'embed': '(True)'}), '(..., embed=True)\n', (317, 334), False, 'from fastapi import APIRouter, Body, Depends\n'), ((349, 383), 'fastapi.Depends', 'Depends', (['auth_handler.auth_wrapper'], {}), '(auth_handler.auth_wrapper)\n', (356, 383), False, 'from fastapi import APIRouter, Body, Depends\n')]
|
# Copyright 2021-xx iiPython
# Modules
from typing import Union
from datetime import datetime
from secrets import token_hex
# Timer class
class Timer(object):
def __init__(self) -> None:
self._st_times = {}
self._ret_keys = {"s": lambda x: x, "ms": lambda x: float(x) * 1000}
def start(self) -> str:
timer_id = token_hex(26)
self._st_times[timer_id] = datetime.now()
return timer_id
def end(self, timer_id: str, return_as: str = "s", as_int: bool = False) -> Union[str, int]:
if timer_id not in self._st_times:
raise RuntimeError("invalid timer id: '{}'".format(timer_id))
st_time = self._st_times[timer_id]
del self._st_times[timer_id]
# Handle return value
secs = round((datetime.now() - st_time).total_seconds(), 2)
vals = self._ret_keys[return_as](secs)
return str(vals) if not as_int else int(round(vals))
# Initialization
timer = Timer()
|
[
"secrets.token_hex",
"datetime.datetime.now"
] |
[((346, 359), 'secrets.token_hex', 'token_hex', (['(26)'], {}), '(26)\n', (355, 359), False, 'from secrets import token_hex\n'), ((395, 409), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (407, 409), False, 'from datetime import datetime\n'), ((783, 797), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (795, 797), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
def get_populations(url: str) -> dict:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# P1082 -- идентификатор для population
population_node = root.select_one('#P1082')
populations = dict()
# Перебор строк в соседнем от population столбце
for row in population_node.select('.wikibase-statementview'):
# Небольшая хитрость -- берем только первые 2 значения, поидеи это будут: количество людей и дата
number_str, data_str = row.select('.wikibase-snakview-value')[:2]
# Вытаскиваем текст из
number_str = number_str.text.strip()
data_str = data_str.text.strip()
# Делаем разделение и берем последнуюю часть, после приводим к числу
# "1 July 2012" -> 2012, "2010" -> 2010
year = int(data_str.split()[-1])
# Добавляем в словарь
populations[year] = number_str
return populations
def get_population_by_year(populations: dict, year: int) -> str:
# Если такой год не будет найден, вернем -1
return populations.get(year, -1)
# Аналогично get_population_by_year, но сначала вытащит данные из
# указанного url, а после достанет значение по year
def get_population_from_url_by_year(url: str, year: int) -> str:
populations = get_populations(url)
return get_population_by_year(populations, year)
if __name__ == '__main__':
url = 'https://www.wikidata.org/wiki/Q148'
populations = get_populations(url)
print(populations) # {2012: '1,375,198,619', 2010: '1,359,755,102', 2015: '1,397,028,553', ...
# Выводим данные с сортировкой по ключу: по возрастанию
for year in sorted(populations):
print("{}: {}".format(year, populations[year]))
# 2010: 1,359,755,102
# 2011: 1,367,480,264
# 2012: 1,375,198,619
# 2013: 1,382,793,212
# 2014: 1,390,110,388
# 2015: 1,397,028,553
# 2016: 1,403,500,365
# 2017: 1,409,517,397
print(get_population_by_year(populations, 2012)) # 1,375,198,619
print(get_population_by_year(populations, 2013)) # 1,382,793,212
print(get_population_by_year(populations, 2014)) # 1,390,110,388
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((170, 187), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (182, 187), False, 'import requests\n'), ((199, 239), 'bs4.BeautifulSoup', 'BeautifulSoup', (['rs.content', '"""html.parser"""'], {}), "(rs.content, 'html.parser')\n", (212, 239), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python
#Generates a wordcloud from a exported whatsapp chat
#3/06/2018
from os import path
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
import emoji
import re
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
#d = path.dirname(__file__)
d="./"
# Read the whole text.
text=""
#f= open(path.join(d, '../wappStats/310118.txt'))
f= open(path.join(d, 'agatka.txt'))
i=0
for line in f:
if i ==0:
print(line, datetime.strptime(line[0:15].rstrip(" "),'%d/%m/%y, %H:%M'), line[18:],"".join(line[18:].split(": ")[1:]).rstrip("\n"))
try:
date=datetime.strptime(line[0:15].rstrip(" "),'%d/%m/%y, %H:%M')
restOfLine="".join(line[18:].split(": ")[1:]).rstrip("\n")
except: #messages with \n (date fails)
restOfLine=line.rstrip("\n")
if (restOfLine != "<Media omitted>"):
for word in restOfLine.split():
decode= word#.decode('utf-8')
good= True
for c in decode:
if c in emoji.UNICODE_EMOJI:
good= False
break
if good:
text+=(''.join(decode))
text+=(" ")
i+=1
#print(i)
#print text
stopwords = set(STOPWORDS)
stopwords.add("ye")
stopwords.add("know")
stopwords.add("one")
stopwords.add("lot")
stopwords.add("tell")
stopwords.add("say")
stopwords.add("think")
stopwords.add("yes")
stopwords.add("will")
stopwords.add("maybe")
stopwords.add("even")
stopwords.add("still")
stopwords.add("now")
stopwords.add("really")
stopwords.add("later")
#stopwords.add("ok")
#stopwords.add("going")
stopwords.add("go")
#stopwords.add("well")
stopwords.add("nd")
#stopwords.add("yeah")
stopwords.add("got")
stopwords.add("'m'")
stopwords.add("o")
stopwords.add("ut")
stopwords.add("ou")
stopwords.add("ricardo")
stopwords.add("aby")
stopwords.add("'m'")
stopwords.add("t's")
#print stopwords
# Initializing Dictionary
dic = {}
# Count number of times each word comes up in list of words (in dictionary)
for word in text.split():
if word.lower() not in stopwords:
if word not in dic:
dic[word] = 0
dic[word] += 1
word_freq = []
for key, value in dic.items():
word_freq.append((value, key))
word_freq.sort(reverse=True)
# read the mask image
h_mask = np.array(Image.open(path.join(d, "blue.png")))
wc = WordCloud(background_color="white", max_words=10000, mask=h_mask,
stopwords=stopwords)
# generate word cloud
#print wc.process_text(text)
#print dic
wc.generate_from_frequencies(dic)
#wc.generate_from_frequencies(wc.process_text(text))
#wc.generate(text)
# create coloring from image
image_colors = ImageColorGenerator(h_mask)
# store to file
wc.recolor(color_func=image_colors).to_file(path.join(d, "wordcloudtest.png"))
# show
#plt.imshow(wc, interpolation='bilinear')
plt.imshow(wc.recolor(color_func=image_colors), interpolation="bilinear")
plt.axis("off")
#plt.figure()
#plt.imshow(h_mask, cmap=plt.cm.gray, interpolation='bilinear')
#plt.axis("off")
plt.show()
|
[
"matplotlib.pyplot.show",
"wordcloud.ImageColorGenerator",
"wordcloud.WordCloud",
"matplotlib.pyplot.axis",
"os.path.join"
] |
[((2428, 2519), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'max_words': '(10000)', 'mask': 'h_mask', 'stopwords': 'stopwords'}), "(background_color='white', max_words=10000, mask=h_mask, stopwords\n =stopwords)\n", (2437, 2519), False, 'from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n'), ((2745, 2772), 'wordcloud.ImageColorGenerator', 'ImageColorGenerator', (['h_mask'], {}), '(h_mask)\n', (2764, 2772), False, 'from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n'), ((2994, 3009), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3002, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3113, 3115), True, 'import matplotlib.pyplot as plt\n'), ((423, 449), 'os.path.join', 'path.join', (['d', '"""agatka.txt"""'], {}), "(d, 'agatka.txt')\n", (432, 449), False, 'from os import path\n'), ((2834, 2867), 'os.path.join', 'path.join', (['d', '"""wordcloudtest.png"""'], {}), "(d, 'wordcloudtest.png')\n", (2843, 2867), False, 'from os import path\n'), ((2394, 2418), 'os.path.join', 'path.join', (['d', '"""blue.png"""'], {}), "(d, 'blue.png')\n", (2403, 2418), False, 'from os import path\n')]
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import os
import unittest
from infra.libs.buildbot import master
from infra_libs.time_functions import timestamp
from infra_libs.utils import temporary_directory
from infra.services.master_manager_launcher import desired_state_parser
from testing_support import auto_stub
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
# UNIX timestamp corresponding to 500 seconds past epoch.
UNIX_TIMESTAMP_0500 = '1970-01-01T00:08:20Z'
UNIX_TIMESTAMP_1000 = '1970-01-01T00:16:40Z'
UNIX_TIMESTAMP_4000 = '1970-01-01T01:06:40Z'
UNIX_TIMESTAMP_5000 = '1970-01-01T01:23:20Z'
UNIX_TIMESTAMP_6000 = '1970-01-01T01:40:00Z'
UNIX_TIMESTAMP_7000 = '1970-01-01T01:56:40Z'
UNIX_TIMESTAMP_8000 = '1970-01-01T02:13:20Z'
class TestDesiredStateValidation(auto_stub.TestCase):
def setUp(self):
super(TestDesiredStateValidation, self).setUp()
self.mock(timestamp, 'utcnow_ts', lambda: 5000)
def _stateConfig(self, states, **params):
c = {
'version': desired_state_parser.VERSION,
'master_states': {
'master.chromium.fyi': states,
},
}
if params:
c['master_params'] = {
'master.chromium.fyi': params,
}
return c
def testValidState(self):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
drain_timeout_sec=1300,
builder_filters=[
r'^valid$',
],
))
def testValidStateZulu(self):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testNoDesiredState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testNoTransitionTime(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline'},
]))
def testTransitionTimeInvalid(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running', 'transition_time_utc': 'boats'},
{'desired_state': 'offline', 'transition_time_utc': 'llama'},
]))
def testNotSorted(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
]))
def testNotSortedZulu(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
]))
def testInvalidState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'pajamas',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
]))
def testUncertainPresent(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_8000},
]))
def testUnknownKeyPresent(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
unknown_key=1337,
))
def testNonNumericDrainTimeout(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
drain_timeout_sec='abc',
))
def testInvalidBuilderFilter(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig(
[
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
builder_filters=[
r'+invalid-regex+',
],
))
def testDifferentVersion(self):
# Confirm that the configuration loads.
c = self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
])
desired_state_parser.validate_desired_master_state(c)
# Modify the version to invalidate it.
c['version'] = 'test'
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(c)
def testValidFile(self):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'valid.json'))
def testValidPrevVersion(self):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'valid_prev_version.json'))
def testInvalidFile(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'invalid.json'))
def testBrokenFile(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.load_desired_state_file(
os.path.join(DATA_DIR, 'broken.json'))
def testIllegallyManaged(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
desired_state_parser.validate_desired_master_state(self._stateConfig([
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
],
manually_managed='<EMAIL>',
))
class TestMasterStateLookup(unittest.TestCase):
STATE_CONFIG = [
{'desired_state': 'pajamas', 'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000},
]
def testUnknownPast(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=300)
self.assertIsNone(state)
def testMiddle(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=4500)
self.assertEqual(state, self.STATE_CONFIG[0])
def testEnd(self):
state = desired_state_parser.get_master_state(self.STATE_CONFIG, now=8000)
self.assertEqual(state, self.STATE_CONFIG[1])
class TestHostnameLookup(auto_stub.TestCase):
def setUp(self):
super(TestHostnameLookup, self).setUp()
self.mock(master, 'get_mastermap_for_host', lambda _x, _y: [
{'dirname': 'master.chromium', 'internal': False},
{'dirname': 'master.chromium.fyi', 'internal': False},
{'dirname': 'master.supersecret', 'internal': True},
{'dirname': 'master.ultrasecret', 'internal': True},
])
def testHostnameLookup(self):
"""Test that selected masters are triggered and all else are ignored."""
desired_state = {
'version': desired_state_parser.VERSION,
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
],
'master.supersecret': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
],
},
'master_params': {
'master.chromium.fyi': {
'drain_timeout_sec': 1337,
},
},
}
triggered, ignored = desired_state_parser.get_masters_for_host(
desired_state,
'bananas/',
'impenetrablefortress.cool'
)
self.assertEqual(
[t['dirname'] for t in triggered],
['master.chromium.fyi', 'master.supersecret'])
self.assertEqual(ignored, set(['master.chromium', 'master.ultrasecret']))
self.assertEqual(triggered[0]['params'], {
'drain_timeout_sec': 1337,
})
self.assertEqual(triggered[1]['params'], {})
self.assertEqual(sorted(ignored), [
'master.chromium',
'master.ultrasecret',
])
for master_dict in triggered:
self.assertIn(master_dict['dirname'], desired_state['master_states'])
class TestWritingState(auto_stub.TestCase):
def setUp(self):
super(TestWritingState, self).setUp()
self.mock(timestamp, 'utcnow_ts', lambda: 5000)
def testPruneOldEntries(self):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_0500},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_1000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
with open(filename) as f:
parsed_data = json.load(f)
self.assertEqual(parsed_data, {
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_1000},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testInvalidState(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'running',
'transition_time_utc': 'toast'},
{'desired_state': 'running',
'transition_time_utc': UNIX_TIMESTAMP_4000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testNothingInPast(self):
with self.assertRaises(desired_state_parser.InvalidDesiredMasterState):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({
'master_states': {
'master.chromium.fyi': [
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_6000},
{'desired_state': 'offline',
'transition_time_utc': UNIX_TIMESTAMP_7000},
]},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
}, filename)
def testNothing(self):
with temporary_directory() as dirname:
filename = os.path.join(dirname, 'desired_state.json')
desired_state_parser.write_master_state({}, filename)
with open(filename) as f:
parsed_data = json.load(f)
self.assertEqual(parsed_data, {
'master_states': {},
'master_params': {},
'version': desired_state_parser.PREV_VERSION,
})
|
[
"os.path.abspath",
"infra.services.master_manager_launcher.desired_state_parser.get_master_state",
"json.load",
"infra_libs.utils.temporary_directory",
"infra.services.master_manager_launcher.desired_state_parser.validate_desired_master_state",
"infra.services.master_manager_launcher.desired_state_parser.write_master_state",
"infra.services.master_manager_launcher.desired_state_parser.get_masters_for_host",
"os.path.join"
] |
[((508, 533), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (523, 533), False, 'import os\n'), ((6314, 6367), 'infra.services.master_manager_launcher.desired_state_parser.validate_desired_master_state', 'desired_state_parser.validate_desired_master_state', (['c'], {}), '(c)\n', (6364, 6367), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((7965, 8030), 'infra.services.master_manager_launcher.desired_state_parser.get_master_state', 'desired_state_parser.get_master_state', (['self.STATE_CONFIG'], {'now': '(300)'}), '(self.STATE_CONFIG, now=300)\n', (8002, 8030), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((8097, 8163), 'infra.services.master_manager_launcher.desired_state_parser.get_master_state', 'desired_state_parser.get_master_state', (['self.STATE_CONFIG'], {'now': '(4500)'}), '(self.STATE_CONFIG, now=4500)\n', (8134, 8163), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((8248, 8314), 'infra.services.master_manager_launcher.desired_state_parser.get_master_state', 'desired_state_parser.get_master_state', (['self.STATE_CONFIG'], {'now': '(8000)'}), '(self.STATE_CONFIG, now=8000)\n', (8285, 8314), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((9454, 9555), 'infra.services.master_manager_launcher.desired_state_parser.get_masters_for_host', 'desired_state_parser.get_masters_for_host', (['desired_state', '"""bananas/"""', '"""impenetrablefortress.cool"""'], {}), "(desired_state, 'bananas/',\n 'impenetrablefortress.cool')\n", (9495, 9555), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((6520, 6573), 'infra.services.master_manager_launcher.desired_state_parser.validate_desired_master_state', 'desired_state_parser.validate_desired_master_state', (['c'], {}), '(c)\n', (6570, 6573), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((6660, 6696), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""valid.json"""'], {}), "(DATA_DIR, 'valid.json')\n", (6672, 6696), False, 'import os\n'), ((6791, 6840), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""valid_prev_version.json"""'], {}), "(DATA_DIR, 'valid_prev_version.json')\n", (6803, 6840), False, 'import os\n'), ((10333, 10354), 'infra_libs.utils.temporary_directory', 'temporary_directory', ([], {}), '()\n', (10352, 10354), False, 'from infra_libs.utils import temporary_directory\n'), ((10384, 10427), 'os.path.join', 'os.path.join', (['dirname', '"""desired_state.json"""'], {}), "(dirname, 'desired_state.json')\n", (10396, 10427), False, 'import os\n'), ((10434, 11001), 'infra.services.master_manager_launcher.desired_state_parser.write_master_state', 'desired_state_parser.write_master_state', (["{'master_states': {'master.chromium.fyi': [{'desired_state': 'running',\n 'transition_time_utc': UNIX_TIMESTAMP_0500}, {'desired_state':\n 'running', 'transition_time_utc': UNIX_TIMESTAMP_1000}, {\n 'desired_state': 'running', 'transition_time_utc': UNIX_TIMESTAMP_4000},\n {'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000\n }, {'desired_state': 'offline', 'transition_time_utc':\n UNIX_TIMESTAMP_7000}]}, 'master_params': {}, 'version':\n desired_state_parser.PREV_VERSION}", 'filename'], {}), "({'master_states': {\n 'master.chromium.fyi': [{'desired_state': 'running',\n 'transition_time_utc': UNIX_TIMESTAMP_0500}, {'desired_state':\n 'running', 'transition_time_utc': UNIX_TIMESTAMP_1000}, {\n 'desired_state': 'running', 'transition_time_utc': UNIX_TIMESTAMP_4000},\n {'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000\n }, {'desired_state': 'offline', 'transition_time_utc':\n UNIX_TIMESTAMP_7000}]}, 'master_params': {}, 'version':\n desired_state_parser.PREV_VERSION}, filename)\n", (10473, 11001), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((13382, 13403), 'infra_libs.utils.temporary_directory', 'temporary_directory', ([], {}), '()\n', (13401, 13403), False, 'from infra_libs.utils import temporary_directory\n'), ((13433, 13476), 'os.path.join', 'os.path.join', (['dirname', '"""desired_state.json"""'], {}), "(dirname, 'desired_state.json')\n", (13445, 13476), False, 'import os\n'), ((13483, 13536), 'infra.services.master_manager_launcher.desired_state_parser.write_master_state', 'desired_state_parser.write_master_state', (['{}', 'filename'], {}), '({}, filename)\n', (13522, 13536), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((7010, 7048), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""invalid.json"""'], {}), "(DATA_DIR, 'invalid.json')\n", (7022, 7048), False, 'import os\n'), ((7217, 7254), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""broken.json"""'], {}), "(DATA_DIR, 'broken.json')\n", (7229, 7254), False, 'import os\n'), ((11204, 11216), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11213, 11216), False, 'import json\n'), ((11948, 11969), 'infra_libs.utils.temporary_directory', 'temporary_directory', ([], {}), '()\n', (11967, 11969), False, 'from infra_libs.utils import temporary_directory\n'), ((12001, 12044), 'os.path.join', 'os.path.join', (['dirname', '"""desired_state.json"""'], {}), "(dirname, 'desired_state.json')\n", (12013, 12044), False, 'import os\n'), ((12053, 12530), 'infra.services.master_manager_launcher.desired_state_parser.write_master_state', 'desired_state_parser.write_master_state', (["{'master_states': {'master.chromium.fyi': [{'desired_state': 'running',\n 'transition_time_utc': 'toast'}, {'desired_state': 'running',\n 'transition_time_utc': UNIX_TIMESTAMP_4000}, {'desired_state':\n 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000}, {\n 'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_7000}\n ]}, 'master_params': {}, 'version': desired_state_parser.PREV_VERSION}", 'filename'], {}), "({'master_states': {\n 'master.chromium.fyi': [{'desired_state': 'running',\n 'transition_time_utc': 'toast'}, {'desired_state': 'running',\n 'transition_time_utc': UNIX_TIMESTAMP_4000}, {'desired_state':\n 'offline', 'transition_time_utc': UNIX_TIMESTAMP_6000}, {\n 'desired_state': 'offline', 'transition_time_utc': UNIX_TIMESTAMP_7000}\n ]}, 'master_params': {}, 'version': desired_state_parser.PREV_VERSION},\n filename)\n", (12092, 12530), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((12804, 12825), 'infra_libs.utils.temporary_directory', 'temporary_directory', ([], {}), '()\n', (12823, 12825), False, 'from infra_libs.utils import temporary_directory\n'), ((12857, 12900), 'os.path.join', 'os.path.join', (['dirname', '"""desired_state.json"""'], {}), "(dirname, 'desired_state.json')\n", (12869, 12900), False, 'import os\n'), ((12909, 13240), 'infra.services.master_manager_launcher.desired_state_parser.write_master_state', 'desired_state_parser.write_master_state', (["{'master_states': {'master.chromium.fyi': [{'desired_state': 'offline',\n 'transition_time_utc': UNIX_TIMESTAMP_6000}, {'desired_state':\n 'offline', 'transition_time_utc': UNIX_TIMESTAMP_7000}]},\n 'master_params': {}, 'version': desired_state_parser.PREV_VERSION}", 'filename'], {}), "({'master_states': {\n 'master.chromium.fyi': [{'desired_state': 'offline',\n 'transition_time_utc': UNIX_TIMESTAMP_6000}, {'desired_state':\n 'offline', 'transition_time_utc': UNIX_TIMESTAMP_7000}]},\n 'master_params': {}, 'version': desired_state_parser.PREV_VERSION},\n filename)\n", (12948, 13240), False, 'from infra.services.master_manager_launcher import desired_state_parser\n'), ((13592, 13604), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13601, 13604), False, 'import json\n')]
|
#!/usr/bin/python
""" DEBUGGING PATTERNS
Both patterns in this exercise contain mistakes and won’t match as expected.
Can you fix them? If you get stuck, try printing the tokens in the doc to see
how the text will be split and adjust the pattern so that each dictionary
represents one token.
"""
# Edit pattern1 so that it correctly matches all case-insensitive mentions
# of "Amazon" plus a title-cased proper noun.
# Edit pattern2 so that it correctly matches all case-insensitive mentions
# of "ad-free", plus the following noun.
import spacy
from spacy.matcher import Matcher
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"Twitch Prime, the perks program for Amazon Prime members offering free "
"loot, games and other benefits, is ditching one of its best features: "
"ad-free viewing. According to an email sent out to Amazon Prime members "
"today, ad-free viewing will no longer be included as a part of Twitch "
"Prime for new members, beginning on September 14. However, members with "
"existing annual subscriptions will be able to continue to enjoy ad-free "
"viewing until their subscription comes up for renewal. Those with "
"monthly subscriptions will have access to ad-free viewing until October 15."
)
# Create the match patterns
pattern1 = [{"LOWER": "amazon"}, {"IS_TITLE": True, "POS": "PROPN"}]
pattern2 = [{"LOWER": "ad"}, {"TEXT": "-"}, {"LOWER": "free"}, {"POS": "NOUN"}]
# Initialize the Matcher and add the patterns
matcher = Matcher(nlp.vocab)
matcher.add("PATTERN1", None, pattern1)
matcher.add("PATTERN2", None, pattern2)
# Iterate over the matches
for match_id, start, end in matcher(doc):
# Print pattern string name and text of matched span
print(doc.vocab.strings[match_id], doc[start:end].text)
|
[
"spacy.load",
"spacy.matcher.Matcher"
] |
[((596, 624), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (606, 624), False, 'import spacy\n'), ((1497, 1515), 'spacy.matcher.Matcher', 'Matcher', (['nlp.vocab'], {}), '(nlp.vocab)\n', (1504, 1515), False, 'from spacy.matcher import Matcher\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 14:30:36 2020
@author: Arun
"""
#import Simurgh-multi-agent-main
from Simurgh_multi_agent_main import mddpg
import streamlit as st
################################
## ##
## <NAME> ##
## github.com/arunbalas ##
## ##
################################
if __name__ == "__main__":
st.write("Training Started")
scores = mddpg(n_episodes=1500, max_t=1000, print_every=10)
|
[
"Simurgh_multi_agent_main.mddpg",
"streamlit.write"
] |
[((437, 465), 'streamlit.write', 'st.write', (['"""Training Started"""'], {}), "('Training Started')\n", (445, 465), True, 'import streamlit as st\n'), ((480, 530), 'Simurgh_multi_agent_main.mddpg', 'mddpg', ([], {'n_episodes': '(1500)', 'max_t': '(1000)', 'print_every': '(10)'}), '(n_episodes=1500, max_t=1000, print_every=10)\n', (485, 530), False, 'from Simurgh_multi_agent_main import mddpg\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-19 17:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('remind', '0004_custom_cost'),
]
operations = [
migrations.AddField(
model_name='custom',
name='stop_loss',
field=models.FloatField(default=0, help_text='止损线', verbose_name='percent_min'),
),
migrations.AddField(
model_name='custom',
name='target_profit',
field=models.FloatField(default=0, help_text='止盈线', verbose_name='percent_min'),
),
]
|
[
"django.db.models.FloatField"
] |
[((392, 465), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'help_text': '"""止损线"""', 'verbose_name': '"""percent_min"""'}), "(default=0, help_text='止损线', verbose_name='percent_min')\n", (409, 465), False, 'from django.db import migrations, models\n'), ((592, 665), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'help_text': '"""止盈线"""', 'verbose_name': '"""percent_min"""'}), "(default=0, help_text='止盈线', verbose_name='percent_min')\n", (609, 665), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME> (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# https://doc.qt.io/qtforpython/PySide2/QtSql/QSqlDatabase.html
# https://doc.qt.io/qt-5/sql-programming.html
# https://www.developpez.net/forums/d1590644/autres-langages/python/gui/pyqt/qtsql-probleme-setquery/
# TO MAKE THE TEST DATABASE
# -------------------------
# $ sqlite3 test.sqlite
# sqlite> CREATE TABLE t_pers (nom VARCHAR, age INTEGER);
# sqlite> INSERT INTO t_pers VALUES ("john", 30);
# sqlite> INSERT INTO t_pers VALUES ("billy", 25);
from PyQt5 import QtSql
db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("test.sqlite")
if not db.open():
db = None # Erreur d'ouverture de la base de données basesql
#query = QtSql.QSqlQuery("SELECT * FROM t_pers", db)
query = QtSql.QSqlQuery()
query.exec("SELECT * FROM t_pers")
while query.next():
name = query.value(0) # QString
age = query.value(1)
print(name, age)
|
[
"PyQt5.QtSql.QSqlQuery",
"PyQt5.QtSql.QSqlDatabase.addDatabase"
] |
[((1637, 1678), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QtSql.QSqlDatabase.addDatabase', (['"""QSQLITE"""'], {}), "('QSQLITE')\n", (1667, 1678), False, 'from PyQt5 import QtSql\n'), ((1860, 1877), 'PyQt5.QtSql.QSqlQuery', 'QtSql.QSqlQuery', ([], {}), '()\n', (1875, 1877), False, 'from PyQt5 import QtSql\n')]
|
# -*- coding: utf-8 -*-
"""
log for heka Logfile input
"""
import logbook
from datetime import datetime
logbook.set_datetime_format("local")
import socket
import gevent
logger = logbook.Logger('app')
log = logbook.FileHandler('test.log')
log.push_application()
def main():
while True:
logger.info("info")
gevent.sleep(3)
if __name__ == '__main__':
main()
|
[
"logbook.set_datetime_format",
"gevent.sleep",
"logbook.Logger",
"logbook.FileHandler"
] |
[((107, 143), 'logbook.set_datetime_format', 'logbook.set_datetime_format', (['"""local"""'], {}), "('local')\n", (134, 143), False, 'import logbook\n'), ((183, 204), 'logbook.Logger', 'logbook.Logger', (['"""app"""'], {}), "('app')\n", (197, 204), False, 'import logbook\n'), ((212, 243), 'logbook.FileHandler', 'logbook.FileHandler', (['"""test.log"""'], {}), "('test.log')\n", (231, 243), False, 'import logbook\n'), ((358, 373), 'gevent.sleep', 'gevent.sleep', (['(3)'], {}), '(3)\n', (370, 373), False, 'import gevent\n')]
|
#!/usr/bin/ python3
#! /usr/bin/env python
from subprocess import call
call(['espeak "Welcome to granDome" 2>/dev/null'], shell=True)
"""
User interface to control simultanous captures and leds
-- Using i2c from Raspberry and Arduino
@ mercurio
"""
from tkinter import *
from tkinter.ttk import Progressbar
from PIL import ImageTk, Image, ImageGrab
import os, shutil, subprocess, signal
import smbus, time, datetime
import json
from sh import gphoto2 as gp
import shutil
from glob import glob
import settings
import webbrowser, threading
from i2c_devices import i2c_checker
import RPi.GPIO as GPIO
###### metadata ---------------------------------
# focal = settings.focal_length()
today_time = datetime.datetime.now().strftime("%H:%M")
today_date = datetime.datetime.now().strftime("%d/%m/%Y")
who = {"Actor":"", "Company":""}
where = {"Place":""}
when = {"Date":today_date, "Time":today_time}
what = {"Appelation":"rti", "Description":""}
how = {"Modality":{"Technique":"RTI", "Protocol":{"Automation":"", "Detail":{"AcquisitionType":"", "LPFilename":"LP", "DomeDiameterinmm":750}}}}
which = {"Camera":{"Type":"DSRL", "Model":"", "Focal":"", "Iso":"", "Aperture":"", "Whitebalance":"", "Shutterspeed":""},
"Light":{"SourceType":"LED", "Number":"", "Natural":"True"}}
why = {"Project":""}
def metadata(who=who, where=where, when=when, what=what, how=how, which=which, why=why):
inside_data = {'WHO':who, 'WHERE':where, 'WHEN':when, 'WHAT':what, 'HOW':how, 'WHICH':which, 'WHY':why}
metadata = {'Activity':inside_data}
return metadata
try:
os.mkdir("./json")
except:
pass
subprocess.run(["sudo", "chmod", "777", "./json/"+str(datetime.datetime.now().strftime("%d%m%Y%H%M%S"))+".json"]) #### Get permission to make an empty json file
#### Json file
json_file_name = "./json/"+str(datetime.datetime.now().strftime("%d%m%Y%H%M%S"))+".json"
def json_file(metadata, path=None): ##### Save data
json_object = json.dumps(metadata, indent=4)
with open(json_file_name, "w") as json_file:
json_file.write(json_object)
print(json_file)
if path is None:
pass
else:
shutil.move(json_file_name, path)
######
####### ------------- Clavier
cara = settings.clavier()
class user_interface:
def __init__(self):
self.interface = Tk()
#self.interface.geometry("800x480")
self.interface.attributes("-fullscreen", True)
self.interface.configure(bg="#212121")
self.interface.title("Dome")
self.w = self.interface.winfo_screenwidth()
self.frame = Frame(self.interface, bg="#212121")
self.frame_exit = Frame(self.interface, bg="#212121")
self.frame_menu_reglages = Frame(self.interface, bg="#212121")
self.frame_shutdown = Frame(self.interface, bg="#212121")
self.frame_version = Frame(self.interface, bg="#212121")
self.frame_bienvenue = Frame(self.interface, bg="#212121")
self.label_bienvenue = Label(self.frame_bienvenue, text="DÔME Mercurio V1", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 35, "bold"))
self.icon_exit = ImageTk.PhotoImage(Image.open(icons_path_+"IconeAnnuler.png").resize((70, 70)), Image.BILINEAR)
self.icon_reglages = ImageTk.PhotoImage(Image.open(icons_path_+"IconeSettings.png").resize((70, 70)), Image.BILINEAR)
self.icon_menu_capture = ImageTk.PhotoImage(Image.open(icons_path_+"menu_capture.png").resize((165, 165)), Image.BILINEAR)
self.icon_menu_projects = ImageTk.PhotoImage(Image.open(icons_path_+"menu_projets.png").resize((165, 165)), Image.BILINEAR)
self.icon_shutdown = ImageTk.PhotoImage(Image.open(icons_path_+"IconeEteindre.png").resize((70, 70)), Image.BILINEAR)
self.icon_mercurio = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icon = Label(self.interface, image=self.icon_mercurio, bg="#212121")
self.info_label = Label(self.frame, bitmap='info', bg="#212121", fg="#FFF3AE")
self.memory_label = Label(self.frame, text="Free Memory : "+str(settings.check_memory()[2])+" Go", fg="#FFF3AE", bg="#212121")
self.button_exit = Button(self.frame_exit, text="Sortir", bg="#212121", fg="#212121", relief="flat"
,cursor="tcross", command=self.close_window)
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_reglages = Button(self.frame_menu_reglages, text="Reglages",relief=FLAT, bg="#212121", fg="#212121", activebackground = "#33B5E5", bd=0
, cursor="tcross", command=self.menu_reglages)
self.button_capture = Button(self.frame, text="Commencer",relief="flat", bg="#212121", fg="#FFF3AE"
,compound=TOP, cursor="tcross", font=("Roboto Mono", 18 * -1), command=self.start_captures)
self.button_projects = Button(self.frame, text="Projets",relief="flat", bg="#212121", fg="#FFF3AE"
,compound=TOP, cursor="tcross", font=("Roboto Mono", 18 * -1), command=self.projects)
self.button_shutdown = Button(self.frame_shutdown, text="Eteindre",relief="flat", bg="#212121", fg="#212121"
,compound=TOP, cursor="tcross", command=self.shutdown)
self.button_exit['image'] = self.icon_exit
self.button_reglages['image'] = self.icon_reglages
self.button_capture['image'] = self.icon_menu_capture
self.button_projects['image'] = self.icon_menu_projects
self.button_shutdown['image'] = self.icon_shutdown
self.interface.rowconfigure(0, weight=1)
self.interface.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, stick='nw')
self.frame_menu_reglages.grid(row=0, column=0, stick='ne')
self.frame_shutdown.grid(row=0, column=0, stick='se')
self.frame_version.grid(row=0, column=0, stick='s')
self.frame_bienvenue.grid(row=0, column=0, stick='n')
self.label_bienvenue.grid(row=0, column=0, sticky='n')
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_reglages.grid(row=0, column=0, sticky='news')
self.button_capture.grid(row=4, column=2, padx=10, pady=30, sticky='news')
self.button_projects.grid(row=4, column=3, padx=10, pady=30, sticky='news')
self.info_label.grid(row=5, column=2, padx=10, pady=20, sticky='news')
self.memory_label.grid(row=5, column=3, pady=20, sticky='news')
self.button_shutdown.grid(row=0, column=0, sticky='news')
self.label_mercurio_icon.place(x=-15, y=425)
self.frame.grid(row=0, column=0, padx=10, pady=50, sticky='n')
self.interface.update()
def close_window(self):
try:
bus = smbus.SMBus(1)
bus.write_byte(0x44, 1)
except:
pass
mario_sound(100)
self.interface.destroy()
### ---------------------------------------- Menu Reglages ---------------------------------------------------------------------
def menu_reglages(self):
self.reglage_interface = Toplevel()
self.reglage_interface.attributes('-fullscreen', True)
#self.reglage_interface.geometry("800x480")
self.reglage_interface.configure(bg="#212121")
self.reglage_frame = Frame(self.reglage_interface, bg="#212121", relief=FLAT)
self.reglage_frame_retour = Frame(self.reglage_interface, bg="#212121", relief=FLAT)
self.icon_apropos = ImageTk.PhotoImage(Image.open(icons_path_+"IconeFaq.png").resize((160, 160)), Image.BILINEAR)
self.icon_metadata = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_metadata.png").resize((160, 160)), Image.BILINEAR)
self.icon_dometester = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_dome_tester.png").resize((160, 160)), Image.BILINEAR)
self.icon_cameratester = ImageTk.PhotoImage(Image.open(icons_path_+"reglage_camera_tester.png").resize((160, 160)), Image.BILINEAR)
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self._icon_mercurio_ = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.__label_mercurio_icon = Label(self.reglage_frame, image=self._icon_mercurio_, bg="#212121")
self.button_retour = Button(self.reglage_frame_retour, text="Sortir", bg="#212121", fg="#212121",
relief="flat", compound=TOP, cursor="tcross",
command=self.reglage_interface.destroy)
self.button_retour['image'] = self.icon_retour
self.button_apropos = Button(self.reglage_frame, text="A Propos", bg="#212121", fg="#FFF3AE", cursor="tcross", relief="flat",
font=("Roboto Mono", 13 * -1), compound=TOP, command=self.apropos)
self.button_metadata = Button(self.reglage_frame, text='Meta Data', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self._reglage_metadata_)
self.button_dometester = Button(self.reglage_frame, text='Tester le Dome', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self.reglage_dometester)
self.button_cameratester = Button(self.reglage_frame, text='Réglages de la Camera', cursor="tcross", bg="#212121", fg="#FFF3AE", relief="flat",
compound=TOP, font=("Roboto Mono", 13 * -1), command=self.reglage_cameratester)
self.button_apropos['image'] = self.icon_apropos
self.button_metadata['image'] = self.icon_metadata
self.button_dometester['image'] = self.icon_dometester
self.button_cameratester['image'] = self.icon_cameratester
self.reglage_interface.rowconfigure(0, weight=1)
self.reglage_interface.columnconfigure(0, weight=1)
self.reglage_frame.grid(row=0, column=0, sticky='news')
self.reglage_frame_retour.grid(row=0, column=0, stick='nw')
self.button_retour.pack(anchor=NW)
self.button_metadata.place(x=250, y=250)
self.button_cameratester.place(x=250, y=50)
self.button_dometester.place(x=450, y=50)
self.button_apropos.place(x=450, y=250)
self.__label_mercurio_icon.place(x=-15, y=425)
### ---------------------------------------- start Captures ---------------------------------------------------------------------
def apropos(self):
others()
def start_captures(self):
self.capture_wind = Toplevel()
self.capture_wind.attributes('-fullscreen', True)
#self.capture_wind.geometry("800x480")
self.capture_wind.configure(bg="#212121")
self.capture_frame = Frame(self.capture_wind, bg="#212121")
self.capture_frame_exit = Frame(self.capture_wind, bg="#212121")
self.label_projectName = Label(self.capture_frame, text="Nom du Projet", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1), width=20)
self.entry_projectName = Entry(self.capture_frame, width=50, bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 14 * -1, "bold"))
self.entry_projectName.insert(END, what["Appelation"]) ###
self.icon_mercurio_cap = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icone_ = Label(self.capture_frame, image=self.icon_mercurio_cap, bg="#212121")
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.capture_button_exit = Button(self.capture_frame_exit, text="Sortir", bg="#212121", fg="#212121",
relief="flat", cursor="tcross", command=self.capture_wind.destroy)
self.capture_button_exit['image'] = self.icon_retour
self.mode_aq_icon_dense = ImageTk.PhotoImage(Image.open(icons_path_+"allumeLed.png").resize((50, 35)), Image.BILINEAR)
self.mode_aq_icon_rapide = ImageTk.PhotoImage(Image.open(icons_path_+"aq_rapide_icon.png").resize((50, 35)),Image.BILINEAR)
self.button_mode_rapide = Button(self.capture_frame, width=15, text="MODE RAPIDE", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self._mode_rapide_)
self.button_mode_lent = Button(self.capture_frame, width=15, text="MODE DENSE", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self._mode_lent_)
self.button_AQ = Button(self.capture_frame, width=15, text="COMMENCER", font=("Roboto Mono", 16 * -1, "bold"),
bg="#212121", fg="#FFF3AE", command=self.__stop__)
self.state_label = Label(self.capture_frame, relief="flat", bg="#212121")
self.progress_bar = Progressbar(self.capture_frame, orient=HORIZONTAL, length=375)
self.label_aq = Label(self.capture_frame, text="", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1))
self.capture_wind.rowconfigure(0, weight=1)
self.capture_wind.columnconfigure(0, weight=1)
self.capture_frame_exit.grid(row=0, column=0, sticky='nw')
self.capture_button_exit.grid(row=0, column=0, sticky='nw')
############### CLAVIER #########################################
keypad_frame = Frame(self.capture_wind, bg='#212121', relief='groove')
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', activebackground ='green', bd=0, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, relief='flat', command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE',bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text_(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
button_del = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', activebackground ='gray', font=('helvetica', 14, 'bold'),
borderwidth=0, command=self.delete_text_).grid(row=8, column=11, pady=2, sticky='news')
keypad_frame.grid(row=0, column=0, sticky='s')
#################################################################
self.capture_frame.grid(row=0, column=0, sticky="news")
self.label_projectName.place(x=125, y=20)
self.entry_projectName.place(x=160, y=40)
self.button_mode_rapide.place(x=175, y=150)
self.button_mode_lent.place(x=425, y=150)
self.state_label.place(x=370, y=147)
self.progress_bar.place(x=200, y=250)
self.button_AQ.place(x=300, y=200)
self.label_aq.place(x=275, y=280)
self.label_mercurio_icone_.place(x=-15, y=425)
def _mode_rapide_(self):
self.project_data()
print("Mode Rapide lancé!")
mario_sound(100)
led_1_ctrl(1)
self.state_label.config(image=self.mode_aq_icon_rapide, bg='#212121')
self.button_AQ['text'] = "ARRETER"
self.button_mode_rapide['bg'] = '#424035'
self.button_mode_lent['bg'] = '#212121'
self._aquisition_(image_nb=85) ############
def _mode_lent_(self):
self.project_data()
print("Mode Dense lancé!")
mario_sound(100)
led_2_ctrl(1)
self.state_label.config(image=self.mode_aq_icon_dense, bg='#212121')
self.button_AQ['text'] = "ARRETER"
self.button_mode_lent['bg'] = '#424035'
self.button_mode_rapide['bg'] = '#212121'
self ._aquisition_(image_nb=155) #############
def project_data(self):
p_name = self.entry_projectName.get() ### p_name == Project name
what['Appelation'] = p_name
json_file(metadata(what=what))
return p_name
def set_text_(self, text):
widget = self.capture_wind.focus_get()
self.entry_projectName.insert("insert", text)
def delete_text_(self):
self.entry_projectName.delete(0, END)
### ---------------------------------------- See Projects ---------------------------------------------------------------------
def projects(self):
self.project_wind = Toplevel()
self.project_wind.attributes('-fullscreen', True)
#self.project_wind.geometry("800x480")
self.project_wind.configure(bg="#212121")
self.frame = Frame(self.project_wind, bg="#212121")
self.icon_retour = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.button_exit_ = Button(self.frame, text="Sortir", bg="#212121", fg='#424035', command=self.project_wind.destroy)
self.button_exit_['image'] = self.icon_retour
self.icon_mercurio_pro = ImageTk.PhotoImage(Image.open(icons_path_+"logo_mercurio.png").resize((100, 60)), Image.BILINEAR)
self.label_mercurio_icone = Label(self.frame, image=self.icon_mercurio_pro, bg="#212121")
self.button_delete_project = Button(self.frame, text="Supprimer", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), width=8,
state=DISABLED, command=self.message_box)
self.button_copy_project = Button(self.frame, text="Copier USB", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), width=8,
state=DISABLED, command=self.copy_to_usb_)
self.label_display = Label(self.frame, height=450, bg="#212121", fg="#424035", relief="ridge", font=("Roboto Mono", 15 * -1, "bold"))
self.label_imageName = Label(self.frame, bg="#212121", font=("Roboto Mono", 10 * -1, "bold"))
self.label_Nombre = Label(self.frame, bg="#212121", fg="#FFF3AE",font=("Roboto Mono", 10 * -1, "bold"))
self.scrollbar = Scrollbar(self.frame, width=45, bg="#FFF3AE", troughcolor="#212121")
self.list_project = os.listdir(rti_path)
self.list_project.sort()
self.listeProjet = Listbox(self.frame, height=20, width=10, yscrollcommand=self.scrollbar.set, bg="#212121", fg='#FFF3AE', font=("Roboto Mono", 20 * -1, "bold"))
for projet in self.list_project:
self.listeProjet.insert(END, projet)
self.listeProjet.bind("<<ListboxSelect>>", self.selection)
self.project_wind.rowconfigure(0, weight=1)
self.project_wind.columnconfigure(0, weight=1)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.frame.grid(row=0, column=0, sticky="news")
self.button_exit_.place(x=0, y=0)
self.button_delete_project.pack(anchor=SE)
self.button_copy_project.pack(anchor=SE)
self.scrollbar.place(x=25, y=200)
self.label_display.place(x=100, y=0)
self.listeProjet.place(x=75, y=0)
self.label_mercurio_icone.place(x=-15, y=425)
self.label_Nombre.place(x=300, y=455)
def selection(self, event):
self.button_delete_project['state'] = NORMAL
self.button_copy_project['state'] = NORMAL
projet_select = self.listeProjet.get(self.listeProjet.curselection())
print("----", projet_select)
self.list_project = os.listdir(rti_path+str(projet_select))
root, folder, file = next(os.walk(rti_path+str(projet_select)))
for i in file:
if i.endswith(".JPG"):
thumb_file = i
self.previewImg = Image.open(rti_path+str(projet_select)+"/"+thumb_file).resize((600, 520))
self.image__ = ImageTk.PhotoImage(self.previewImg, Image.BILINEAR)
self.label_display.configure(image=self.image__)
self.label_display.image = self.image__
def copy_to_usb_(self):
self.label_Nombre.config(text="Veuillez attendre -- cela peut prendre quelques temps !")
self.project_wind.update()
self.message_box_usb = Toplevel()
self.message_box_usb.attributes('-fullscreen', True)
#self.message_box_usb.geometry("800x480")
self.message_box_usb.configure(bg="#212121")
self.icon_retusb = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.icn_ret = ImageTk.PhotoImage(master=self.message_box_usb, image=self.icon_retusb)
self.button_quitusb = Button(self.message_box_usb, text="Sortir", bg="#212121", command=self.message_box_usb.destroy)
self.button_quitusb['image'] = self.icn_ret
self.button_quitusb.pack(anchor=NW)
self.label_usb = Label(self.message_box_usb, text="", bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1,'bold' ))
self.label_usb.place(x=100, y=100)
self.label_usb_mem = Label(self.message_box_usb, text="", bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1,'bold' ))
self.label_usb_mem.place(x=100, y=200)
projet_select = self.listeProjet.get(self.listeProjet.curselection())
print("----selected--2--USB", projet_select)
media_path = "/media/pi/"
folders_in_media = os.listdir(media_path)
if len(folders_in_media) == 0:
print("Inserérez une clé USB")
self.label.config(text="Insérez Une Clé USB SVP !")
else:
usb_path = media_path+folders_in_media[0]
total, used, free = shutil.disk_usage(usb_path)
### Make Zip
self.label_usb_mem.config(text="Disponible : "+str(round((free/2**30), 2))+"/"+str(round((total/2**30), 2))+" GO")
if round((free/2**30), 2) > 1.0 :
self.label_usb.config(text="Veuillez attendre -- cela peut prendre quelques temps !")
shutil.make_archive(usb_path+"/"+projet_select, 'zip', rti_path+str(projet_select))
print("Projet copié avec succès !")
self.label_usb.config(text="Le Projet "+projet_select+" est copié vers la clé USB")
else:
self.label_usb.config(text="Votre espace est insuffisant")
self.label_usb_mem.config(text="Disponible : "+str(round((free/2**30), 2))+"/"+str(round((total/2**30), 2))+" GO")
self.message_box_usb.rowconfigure(0, weight=1)
self.message_box_usb.columnconfigure(0, weight=1)
self._logo_mercurio_usb = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__usb = ImageTk.PhotoImage(master=self.message_box_usb, image=self._logo_mercurio_usb)
self.__label_logo__usb = Label(self.message_box_usb, image=self.__logo__usb, bg="#212121").place(x=-15, y=425)
def message_box(self):
self.message_box = Toplevel()
self.message_box.attributes('-fullscreen', True)
#self.message_box.geometry("800x480")
self.message_box.configure(bg="#212121")
projet_select = self.listeProjet.get(self.listeProjet.curselection())
self.label_deleting = Label(self.message_box, text="Voulez-vous supprimer le Projet : "+str(projet_select), bg="#424035", fg='#FFF3AE',
font=("Roboto Mono", 20 * -1, "bold"))
self.button_yes = Button(self.message_box, text="OUI", width=10, height=5, bg="#420035", fg='#FFF3AE',
font=("Roboto Mono", 22 * -1, "bold"), command=self.remove_selected)
self.button_No = Button(self.message_box, text="NON", width=10, height=5, bg="#4240F0", fg='#FFF3AE',
font=("Roboto Mono", 22 * -1, "bold"), command=self.message_box.destroy)
self.message_box.rowconfigure(0, weight=1)
self.message_box.columnconfigure(0, weight=1)
self.label_deleting.grid(row=0, column=0, pady=5, sticky='news')
self.button_yes.grid(row=1, column=0, pady=5, sticky='news')
self.button_No.grid(row=2, column=0, pady=5, sticky='news')
def remove_selected(self):
projet_select = self.listeProjet.get(self.listeProjet.curselection())
directotory_to_remove = rti_path+str(projet_select)
subprocess.run(["rm", "-rf", directotory_to_remove])
trois_colors(150)
self.message_box.destroy()
### ---------------------------------------- Eteindre ---------------------------------------------------------------------
def shutdown(self):
trois_colors(250)
bus.write_byte(0x44, 0)
os.system('sudo shutdown -h now')
###############################################################################################
########################### REGLAGES Nombre LEDs #####################################################
##############################################################################################
def project_exists(self):
print("Exists ! ")
self.window_overwrite = Toplevel()
self.window_overwrite.attributes("-fullscreen", True)
#self.window_overwrite.geometry("800x480")
over_write_label = Label(self.window_overwrite, text="Ecraser ?").grid(row=0, cloumn=0, sticky='n')
btn_OK = Button(self.window_overwrite, text="Oui", command=lambda projectName:self.return_and_rename(rti_path+str(projectName))).grid(row=1, cloumn=0, sticky='n')
btn_NON = Button(self.window_overwrite, text="Non", command=self.window_overwrite.destroy).grid(row=1, cloumn=1, sticky='n')
def thumbnail(self, projectName):
############ ------ ThumNail
bus = smbus.SMBus(1)
settings.killprocess()
gp(clearCMD)
bus.write_block_data(0x44, 0, [2, 15])
thumb_name = "thumbnail.JPG"
print("Thumb Name created !")
#subprocess.run(["gphoto2", "--trigger-capture", "--wait-event=FILEADDED"])
os.system("gphoto2 --trigger-capture --wait-event=FILEADDED")
settings.killprocess()
print("---------------------------1")
os.system("gphoto2 --filename="+thumb_name+" --get-all-files")
time.sleep(0.4)
print("---------------------------2")
settings.killprocess()
bus.write_byte(0x44, 1)
dest = shutil.move(thumb_name, rti_path+str(projectName)+"/")
led_1_ctrl(1)
time.sleep(0.2)
led_1_ctrl(0)
im = Image.open(rti_path+str(projectName)+"/thumbnail.JPG")
im.thumbnail((600, 400), Image.ANTIALIAS)
im.save(rti_path+str(projectName)+"/thumbnail.JPG")
print("Thumb Created !")
def __stop__(self):
"""
Stop i2c transmission
"""
bus.write_byte(0x44, 1)
bus.close()
self.capture_wind.destroy()
def _aquisition_(self, image_nb):
i2c_state = i2c_checker() ### Check i2c ?
leds = [0, 1, 4, 6, 8, 11, 12, 13, 14, 17, 19, 21, 23, 24, 26, 27, 30] ## 85 LEDs Mode !
### For 155 LEDs (Deleting small LEds !
leds_a_allumer = [s for s in range(160)]
for k in range(5):
leds_a_allumer.remove(31+(32*k))
default_projectname = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
camera_available = settings.camera_available()
if camera_available == True and i2c_state != 0 :
self.label_aq.config(text="Camera and i2c Device Detected")
self.capture_wind.update_idletasks()
self.capture_wind.update()
##########
print("Camera is On ")
which["Light"]["Number"]= image_nb
if image_nb == 85 :
how['Modality']['Protocol']['Detail']['AcquisitionType']="RTI LEGERE"
elif image_nb == 155:
how['Modality']['Protocol']['Detail']['AcquisitionType']="RTI DENSE"
# how['Modality']['Protocol']['Detail']['LPFilename']="LP"+str(image_nb)
json_file(metadata(which=which))
json_file(metadata(how=how))
subprocess.run(["gphoto2", "--folder", camera_folder, "-R", "--delete-all-files"])
# gp(clearCMD)
############### ------
projectname = self.project_data()
if len(projectname) == 0:
try:
os.mkdir(rti_path+default_projectname+"_"+str(image_nb))
except:
pass
if os.path.exists(rti_path+default_projectname+"_"+str(image_nb)):
self.project_exists()
self.thumbnail(default_projectname+"_"+str(image_nb))
else:
try:
os.mkdir(rti_path+projectname+"_"+str(image_nb))
except:
pass
self.thumbnail(projectname+"_"+str(image_nb))
#####################################
what["Appelation"]=projectname
json_file(metadata(what=what))
print("__json__")
file_name = rti_path+projectname+"rti%Y%m%d%H%M%S%f.%C"
#### Save Json File
camera_data = save_camera_data()
which.update(camera_data)
lp_filename = how['Modality']['Protocol']['Detail']['LPFilename']
if len(projectname) == 0:
json_file(metadata(what=what, how=how, who=who, where=where, when=when, which=which, why=why),
path=str(rti_path+default_projectname+"_"+str(image_nb)+"/"))
shutil.copy(lp_path+"LP_"+str(image_nb)+".lp", str(rti_path+default_projectname+"_"+str(image_nb)+"/"))
os.rename(rti_path+default_projectname+"_"+str(image_nb)+"/"+"LP_"+str(image_nb)+".lp",
str(rti_path+default_projectname+"_"+str(image_nb)+"/"+lp_filename+".lp"))
else:
json_file(metadata(what=what, how=how, who=who, where=where, when=when, which=which,why=why),
path=str(rti_path+projectname+"_"+str(image_nb)+"/"))
shutil.copy(lp_path+"LP_"+str(image_nb)+".lp", str(rti_path+projectname+"_"+str(image_nb)+"/"))
os.rename(rti_path+projectname+"_"+str(image_nb)+"/"+"LP_"+str(image_nb)+".lp",
rti_path+projectname+"_"+str(image_nb)+"/"+lp_filename+".lp")
bus = smbus.SMBus(1)
bus.write_byte(0x44, 1)
if image_nb == 85:
for k in range(5):
print(str(k))
for s, i in enumerate(leds):
settings.killprocess()
print(str(s), i)
self.label_aq.config(text="En Cours de PDV "+str((17*k)+(s+1))+ "/85... Please Wait!")
self.progress_bar['value'] += 100/(len(leds)*5)
self.capture_wind.update_idletasks()
bus.write_block_data(0x44, 0, [3, 32*k+i])
subprocess.run(["gphoto2", "--trigger-capture"])
time.sleep(0.4)
self.capture_wind.update()
bus.write_byte(0x44, 1)
elif image_nb == 155:
for i, j in enumerate(leds_a_allumer):
settings.killprocess()
self.label_aq.config(text="En Cours de PDV "+str(i)+ "/"+str(len(leds_a_allumer))+" ... Please Wait!")
self.progress_bar['value'] += 100/len(leds_a_allumer)
self.capture_wind.update_idletasks()
bus.write_block_data(0x44, 0, [3, j])
subprocess.run(["gphoto2", "--trigger-capture"])
time.sleep(0.4)
self.capture_wind.update()
bus.write_byte(0x44, 1)
self.label_aq['text'] = "Enregistrement des images..."
self.progress_bar['value'] = 0
self.capture_wind.update()
try:
if len(projectname) == 0:
os.mkdir(rti_path+default_projectname+"_"+str(image_nb)+"/rti")
else:
os.mkdir(rti_path+projectname+"_"+str(image_nb)+"/rti")
except:
pass
#settings.get_data_from_camera(file_name)
data_getter = threading.Thread(target=settings.get_data_from_camera, args=(file_name,))
data_getter.start()
self.progress_bar['value'] = 0
nombre_img = 0
while(nombre_img<=image_nb):
nombre_img = len(glob(rti_path+"*.JPG"))
self.progress_bar['value'] = ((nombre_img-1)/image_nb)*100
self.label_aq.config(text=str(nombre_img)+"/"+str(image_nb))
self.capture_wind.update_idletasks()
self.capture_wind.update()
self.progress_bar['value'] = 0
self.capture_wind.update()
jpg_files = glob(rti_path+'*.JPG')
jpg_files.sort()
prefix_name = "0000"
for i, img in enumerate(jpg_files):
if len(str(i)) == 1:
renamed_file = rti_path+"IMG_"+prefix_name[:-1]+str(i)
elif len(str(i)) == 2:
renamed_file = rti_path+"IMG_"+prefix_name[:-2]+str(i)
elif len(str(i)) == 3:
renamed_file = rti_path+"IMG_"+prefix_name[:-3]+str(i)
os.rename(img, renamed_file+".JPG")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+ " Renamed ! ")
if len(projectname) == 0:
dest = shutil.move(renamed_file+".JPG", rti_path+default_projectname+"_"+str(image_nb)+"/rti/")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+ " Moved To Folder ! ")
self.progress_bar['value'] += 100/image_nb
self.capture_wind.update()
else :
dest = shutil.move(renamed_file+".JPG", rti_path+projectname+"_"+str(image_nb)+"/rti/")
self.label_aq.config(text="Image "+str(i)+"/"+str(image_nb)+" Moved To Folder ! ")
self.progress_bar['value'] += 100/image_nb
self.capture_wind.update()
self.capture_wind.destroy()
subprocess.run(["gphoto2", "--folder", camera_folder, "-R", "--delete-all-files"])
#gp(clearCMD)
if camera_available == False or i2c_state == 0:
print("No Camera Detected Or No Device !")
self.label_aq['text'] = " No Camera or i2c Device"
camera_available = settings.camera_available()
self.capture_wind.update()
def choix_aq(self):
global apply
apply = True
self.reglage_aq_win.destroy()
print("Applied!", apply)
return apply
## -----
def _reglage_metadata_(self):
self.reglage_metadata = Toplevel()
self.reglage_metadata.attributes('-fullscreen', True)
#self.reglage_metadata.geometry("800x480")
self.reglage_metadata.configure(bg="#212121")
self.reglage_frame = Frame(self.reglage_metadata, bg="#212121")
self.reglage_frame_exit = Frame(self.reglage_metadata)
self.icon_user = ImageTk.PhotoImage(Image.open(icons_path_+"utilisateur.png").resize((160, 160)), Image.BILINEAR)
self.icon_camera_info = ImageTk.PhotoImage(Image.open(icons_path_+"camera_info.png").resize((160, 160)), Image.BILINEAR)
self.icon_environdata = ImageTk.PhotoImage(Image.open(icons_path_+"environement.png").resize((160, 160)), Image.BILINEAR)
self.icon_other_data = ImageTk.PhotoImage(Image.open(icons_path_+"autres.png").resize((160, 160)), Image.BILINEAR)
self.icon_retour_ = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((65, 65)), Image.BILINEAR)
self.__label_mercurio_icon__ = Label(self.reglage_metadata, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.reglage_frame_exit, relief="flat", compound=TOP, bg="#212121",
command=self.reglage_metadata.destroy)
self.button_user = Button(self.reglage_frame, text="Utilisateur", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.user_data)
self.button_camera_info = Button(self.reglage_frame, text="Caméra info", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.camera_info)
self.button_environement_data = Button(self.reglage_frame, text="Environement", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.environement_data)
self.button_other_data = Button(self.reglage_frame, text="Autres", relief="flat", compound=TOP, bg="#212121", font=("Roboto Mono", 13 * -1, "bold"),
fg="#FFF3AE", command=self.other_data)
self.button_exit['image'] = self.icon_retour_
self.button_user['image'] = self.icon_user
self.button_camera_info['image'] = self.icon_camera_info
self.button_environement_data['image'] = self.icon_environdata
self.button_other_data['image'] = self.icon_other_data
self.reglage_metadata.rowconfigure(0, weight=1)
self.reglage_metadata.columnconfigure(0, weight=1)
self.reglage_frame.grid(row=0, column=0, sticky='n')
self.reglage_frame_exit.grid(row=0, column=0, sticky='nw')
self.button_exit.grid(row=0, column=0, sticky='news')
self.button_user.grid(row=2, column=2, padx=5, pady=20, sticky='news')
self.button_camera_info.grid(row=2, column=3, padx=5, pady=20, sticky='news')
self.button_environement_data.grid(row=3, column=2, padx=5, pady=20, sticky='news')
self.button_other_data.grid(row=3, column=3, padx=5, pady=20, sticky='news')
self.__label_mercurio_icon__.place(x=-15, y=425)
### -- --
def user_data(self):
photographer_data()
def camera_info(self):
camera_info()
def environement_data(self):
environement_data()
def other_data(self):
_camera_folder_()
## -----
def reglage_dometester(self):
self.dome_wind = Toplevel()
self.dome_wind.attributes('-fullscreen', True)
#self.dome_wind.geometry("800x480")
self.dome_wind.configure(bg="#212121")
self.frame = Frame(self.dome_wind, bg="#212121")
self.frame_scales = Frame(self.dome_wind, bg="#212121")
self.button_retour_icon = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((75, 75)), Image.BILINEAR)
self.tout_allumer_icon = ImageTk.PhotoImage(Image.open(icons_path_+"toutAllumer.png").resize((120, 120)), Image.BILINEAR)
self.tout_eteindre_icon = ImageTk.PhotoImage(Image.open(icons_path_+"toutEteindre.png").resize((120, 120)), Image.BILINEAR)
self.allumer_ledX = ImageTk.PhotoImage(Image.open(icons_path_+"allumerledXon.png").resize((120, 120)), Image.BILINEAR)
self.eteindre_ledX = ImageTk.PhotoImage(Image.open(icons_path_+"eteindreLed.png").resize((120, 120)), Image.BILINEAR)
self._label_mercurio_icon_ = Label(self.dome_wind, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.frame, image=self.button_retour_icon, bg="#212121",
compound=TOP, command=self.destroy_)
self.button_tout_allumer = Button(self.frame, text="Tout Allumer", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._allOn_)
self.button_tout_eteindre = Button(self.frame, text="Tout Eteindre", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._AllOff_)
self.button_allumer_led_x = Button(self.frame, text="Allumer LED X", bg="#212121", relief='flat',
compound=TOP, fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), command=self._allummer_led_x_)
self.button_tout_allumer['image'] = self.tout_allumer_icon
self.button_tout_eteindre['image'] = self.tout_eteindre_icon
self.button_allumer_led_x['image'] = self.eteindre_ledX
## --------------------- Slides --------------------------------------------------------------------
self.slider_allumer_LedNum = Scale(self.frame_scales, width=20, length=350, label="Allumer LED N° x/155", activebackground='white', from_=0, to=155,
orient="horizontal", state=DISABLED, bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"),
troughcolor="#424035", highlightbackground="#FFF3AE", command=self._on_scale_LedN)
self.slider_intensity = Scale(self.frame_scales, width=20, length=350, label="Intensité", from_=0, to=150, orient="horizontal", state=DISABLED,
troughcolor="#424035", fg="#FFF3AE", font=("Roboto Mono", 13 * -1, "bold"), bg="#212121",
highlightbackground="#FFF3AE", command=self._on_scale_intensity)
## --------------------------- Set Positions -----------------------------------------------------
self.dome_wind.rowconfigure(0, weight=1)
self.dome_wind.columnconfigure(0, weight=1)
self.frame_scales.grid(row=1, column=0, sticky='s')
self.button_exit.pack(anchor=NW)
self.button_tout_allumer.place(x=150, y=100)
self.button_tout_eteindre.place(x=350, y=100)
self.button_allumer_led_x.place(x=550, y=100)
self.slider_allumer_LedNum.grid(row=5, column=0, pady=5, padx=5, sticky='s')
self.slider_intensity.grid(row=7, column=0, pady=5, padx=5, sticky='s')
self.frame.grid(row=0, column=0, sticky='news')
self._label_mercurio_icon_.place(x=-15, y=425)
####---------------------------------------------------------------------------------------------------------
### -------------------------------- FUNCTIONS ---------------------------------------------------------------
def _on_scale_LedN(self, value):
print(value)
bus.write_byte(0x44, 1)
time.sleep(0.1)
bus.write_block_data(0x44, 0, [3, int(value)])
value=0
def _on_scale_intensity(self, value):
print(value)
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_intensity['troughcolor'] = 'green'
self.slider_intensity['state'] = 'active'
bus.write_block_data(0x44, 0, [2, int(value)])
def destroy_(self):
try:
bus.write_byte(0x44, 1)
except:
pass
self.dome_wind.destroy()
def _allOn_(self):
self._AllOff_()
bus.write_block_data(0x44, 0, [2, 15])
self.button_allumer_led_x['image'] = self.eteindre_ledX
self.slider_intensity['troughcolor'] = 'green'
self.slider_intensity['state'] = 'active'
self.slider_intensity.set(15)
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
def _AllOff_(self):
bus.write_byte(0x44, 1)
self.button_allumer_led_x['image'] = self.eteindre_ledX
self.slider_allumer_LedNum['state'] = 'disabled'
self.slider_intensity['state'] = 'disabled'
self.slider_allumer_LedNum['troughcolor'] = '#a0a0a0'
self.slider_intensity['troughcolor'] = '#a0a0a0'
def _allummer_led_x_(self):
bus.write_block_data(0x44, 0, [3, 0])
self.button_allumer_led_x['image'] = self.allumer_ledX
self.slider_allumer_LedNum['troughcolor'] = 'green'
self.slider_allumer_LedNum['state'] = 'active'
self.slider_intensity['state'] = 'disabled'
self.slider_intensity['troughcolor'] = '#a0a0a0'
## -----
def reglage_cameratester(self):
global camera
camera = {}
self.cam_wind = Toplevel()
self.cam_wind.attributes('-fullscreen', True)
#self.cam_wind.geometry("800x480")
self.cam_wind.configure(bg="#212121")
self.frame_exit = Frame(self.cam_wind, bg="#212121")
self.frame = Frame(self.cam_wind, bg="#212121")
self.camera_deconnctee_icon = ImageTk.PhotoImage(Image.open(icons_path_+"camera_deconnectee.png").resize((200, 200)), Image.BILINEAR)
self._button_retour_icon_ = ImageTk.PhotoImage(Image.open(icons_path_+"IconeRetour.png").resize((75, 75)), Image.BILINEAR)
self.label_mercurio_icon_ = Label(self.cam_wind, image=self.icon_mercurio, bg="#212121")
self.button_exit = Button(self.cam_wind, bg="#212121", command=self.cam_wind.destroy)
self.button_exit['image'] = self._button_retour_icon_
if settings.camera_available() == True :
camera_infos = []
for line in settings.about_camera():
line = str(line)[2:].split(':')
camera_infos.append(line)
aperture = int(settings.image_data("aperture")['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
_parameters_ = {'aperture':aperture,
'iso':iso, 'whitebalance':whitebalance,
'shutterspeed':shutterspeed}
display_list = list(_parameters_.keys())
self.entry_param = []
for i, param in enumerate(display_list):
self.scrollbar = Scrollbar(self.frame, orient="vertical", width=35, bg="#FFF3AE", troughcolor="#212121")
self.list_para = Listbox(self.frame, height=2, width=25, exportselection=0, font=("Roboto Mono", 20 * -1, "bold"), bg="#212121", fg="#FFF3AE",
selectmode=SINGLE, yscrollcommand=self.scrollbar.set)
para_list = settings.image_data(param)['Choices']
for j in para_list:
self.list_para.insert(END, param+" "+j.split(" ")[-1]+" "+j.split(" ")[1])
self.scrollbar.grid(row=i+1, column=2, padx=5, pady=20, sticky='news')
self.list_para.grid(row=i+1, column=1, padx=15, pady=20, sticky='news')
self.scrollbar.config(command=self.list_para.yview)
self.list_para.bind('<<ListboxSelect>>', self.select_text)
for i,d in enumerate(display_list):
self.label = Label(self.frame, text=" "+d+" ", height=2, bd=2, width=20, relief="flat", font=("Roboto Mono", 15 * -1, "bold"), fg="#FFF3AE",
bg="#212121").grid(row=i+1, column=0, padx=50, pady=20, sticky='news')
which["Camera"] = camera
else :
self.label = Label(self.frame, text=" Aucune caméra détectée, branchez la caméra SVP !", bg="#212121", width=50, font=("Roboto Mono", 16 * -1, "bold"),
fg="#FFF3AE").place(x=150, y=100)
self.label_camera_deconnectee = Label(self.cam_wind, bg="#212121", image=self.camera_deconnctee_icon)
self.label_camera_deconnectee.place(x=325, y=235)
self.button_exit = Button(self.frame, bg="#212121", command=self.cam_wind.destroy)
self.button_exit['image'] = self._button_retour_icon_
self.cam_wind.rowconfigure(0, weight=1)
self.cam_wind.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, sticky='nw')
self.frame.grid(row=0, column=0, sticky='news')
self.button_exit.place(x=0, y=0)
self.label_mercurio_icon_.place(x=-15, y=425)
def select_text(self, text):
self.selection = text.widget.curselection()
self.index = self.selection[0]
self.value = text.widget.get(self.index)
settings.set_camera_data(self.value.split(" ")[0], self.value.split(" ")[1])
print(self.value.split(" ")[0], self.value.split(" ")[-1])
def mainloop(self):
self.interface.mainloop()
class photographer_data(Tk):
def __init__(self):
Tk.__init__(self)
#self.geometry("800x480")
self.attributes("-fullscreen", True)
self.configure(bg="#212121")
self.title("Dome")
self.data = ["PRENOM NOM", "SOCIETE", "LIEU de PDV"]
self.date = ["DATE", "TIME"]
keypad_frame = Frame(self, bg="#212121")
self.label_frame = Frame(self, bg="#212121")
for i,d in enumerate(self.data+self.date):
self.label = Label(self.label_frame, text=" "+d+" ", height=2, bd=2, width=15, bg="#212121", fg="#FFF3AE",
font=("Roboto Mono", 12 * -1, "bold")).grid(row=i+1, column=0, padx=15, pady=5, sticky='news')
self.label_y = Label(self.label_frame, text=datetime.datetime.now().strftime("%d/%m/%Y"), height=1, bd=1, width=15, relief="flat",
bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 12 * -1, "bold"))
self.label_y.grid(row=4, column=1, padx=15, pady=5, sticky='news')
self.label_d = Label(self.label_frame, text=datetime.datetime.now().strftime("%H:%M"), height=1, bd=1, width=15, relief="flat",
bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 12 * -1, "bold"))
self.label_d.grid(row=5, column=1, padx=15, pady=5, sticky='news')
self.entries = [Entry(self.label_frame, width=30, bd=3, bg="#424035", fg="#FFF3AE", font=("Roboto Mono", 15 * -1, "bold")) for i in range(len(self.data))]
self.entry_list = []
for i,e in enumerate(self.entries):
e.grid(row=i+1, column=1, padx=5, pady=5)
self.entry_list.append(e)
self.label_frame.place(x=100, y=50)
self.image_de_retour = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour = ImageTk.PhotoImage(master=self, image=self.image_de_retour)
self.btn_quit = Button(self, text='Sortir', bg="#212121", command=self.destroy)
self.btn_quit['image'] = self.icone_de_retour
self.btn_quit.place(x=0, y=0)
self.btn_save = Button(self, text='Enregistrer', bd=2, fg='#FFF3AE', bg='#212121', font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, state=DISABLED, command=self.save_data)
self.btn_save.pack(anchor=NE)
cara = settings.clavier()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
self.btn_delete = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=self.delete_text).grid(row=8, column=11, pady=2, sticky='news')
keypad_frame.place(x=135, y=325)
self._logo_mercurio_ = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo_ = ImageTk.PhotoImage(master=self, image=self._logo_mercurio_)
self.__label_logo_ = Label(self, image=self.__logo_, bg="#212121").place(x=-15, y=425)
def set_text(self, text):
self.btn_save['state'] = NORMAL
self.btn_save['bg'] = "#424035"
widget = self.focus_get()
if widget in self.entries:
widget.insert("insert", text)
def delete_text(self):
widget = self.focus_get()
widget.delete(0, END)
def save_data(self):
data_dict = {}
inside_data = {}
for s, i in enumerate(self.entry_list):
widget = i
data = widget.get()
data_dict[s] = data
data_dict[self.data[s]] = data_dict.pop(s)
print(data_dict)
who["Actor"] = data_dict["<NAME>"]
who["Company"] = data_dict["SOCIETE"]
where["Place"] = data_dict["LIEU de PDV"]
when["Date"] = datetime.datetime.now().strftime("%d/%m/%Y")
when["Time"] = datetime.datetime.now().strftime("%H:%M")
json_file(metadata(who=who, where=where, when=when))
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
new_wind = Toplevel(self)
#new_wind.geometry("800x480")
new_wind.attributes("-fullscreen", True)
new_wind.configure(bg="#212121")
new_wind.title("info")
new_lab = Label(new_wind, text=" Données Enregistrées avec Succès !", bg="#212121", fg="#FFF3AE", font=("Roboto Mono", 18 * -1, "bold")).place(x=150, y=100)
self.image_de_retour_ = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour_ = ImageTk.PhotoImage(master=new_wind, image=self.image_de_retour_)
btn_quit_ = Button(new_wind, text="Sortir", bg="#212121", fg="#FFF3AE", image=self.icone_de_retour_, command=new_wind.destroy).pack(side=TOP, anchor=NW)
self._logo_mercurio_s = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__ = ImageTk.PhotoImage(master=new_wind, image=self._logo_mercurio_s)
self.__label_logo__ = Label(new_wind, image=self.__logo__, bg="#212121").place(x=-15, y=425)
#################################################################################################
############################### Camera INFOs ##################################################
class camera_info(Tk):
global camera_
camera_ = {}
def __init__(self):
Tk.__init__(self)
#self.geometry("800x480")
self.attributes("-fullscreen", True)
self.configure(bg="#212121")
self.title("Dome")
keypad_frame = Frame(self, bg="#212121")
self.exit_frame = Frame(self, bg="#212121")
self.label_frame = Frame(self, bg="#212121")
if settings.camera_available() == True :
aperture = int(settings.image_data("aperture")['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
model = settings.image_data("cameramodel")['Current'].split(':')[-1]
which["Camera"]["Model"] = model
try:
which["Camera"]["Focal"] = focal
except:
pass
which["Camera"]["Iso"] = iso
which["Camera"]["Aperture"] = aperture
which["Camera"]["Whitebalance"] = whitebalance
which["Camera"]["Shutterspeed"] = shutterspeed
json_file(metadata(which=which))
try:
focal = settings.focal_length()
except:
focal="nA"
additional_parameters = {'Focal':focal, 'Aperture':aperture,
'ISO':iso, 'Whitebalance':whitebalance,
'Shutterspeed':shutterspeed, 'Model':model}
display_list = list(additional_parameters.keys())
for i,d in enumerate(display_list):
self.label = Label(self.label_frame, text=" "+d+" ", height=2, bd=2, width=15, relief="flat", bg='#424035', font=('helvetica', 12, 'bold'),
fg="#FFF3AE").grid(row=i+1, column=0, padx=40, pady=15, sticky='news')
camera_list = list(additional_parameters.values())
for i,d in enumerate(camera_list):
self.label = Label(self.label_frame, text=" "+str(d)+" ", height=2, bd=2, bg='#424035', fg="#FFF3AE", width=40, font=("Roboto Mono", 16 * -1, "bold")
).grid(row=i+1, column=1, padx=40, pady=15, sticky='news')
else :
self.label = Label(self, text=" Aucune caméra détectée, branchez la caméra SVP !", bg="#212121", width=50, font=("Roboto Mono", 16 * -1, "bold"),
fg="#FFF3AE").place(x=150, y=100)
self.image_quitter_icon = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self._icon_quitter_ = ImageTk.PhotoImage(master=self.label_frame, image=self.image_quitter_icon)
self.btn_quit = Button(self, text='Sortir', bg="#212121", command=self.destroy)
self.btn_quit['image'] = self._icon_quitter_
self.btn_quit.pack(side=TOP, anchor=NW)
self._logo_mercurio_cam = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__cam = ImageTk.PhotoImage(master=self, image=self._logo_mercurio_cam)
self.__label_logo__c = Label(self, image=self.__logo__cam, bg="#212121").place(x=-15, y=425)
self.label_frame.place(x=100, y=100)
def save_camera_data():
aperture = settings.image_data("aperture")
aperture = int(aperture['Current'].split(':')[-1])
iso = int(settings.image_data("iso")['Current'].split(':')[-1])
whitebalance = settings.image_data("whitebalance")['Current'].split(':')[-1]
shutterspeed = settings.image_data("shutterspeed")['Current'].split(':')[-1]
model = settings.image_data("cameramodel")['Current'].split(':')[-1]
try:
which["Camera"]["Focal"] = focal
except:
pass
which["Camera"]["Model"] = model
which["Camera"]["Iso"] = iso
which["Camera"]["Aperture"] = aperture
which["Camera"]["Whitebalance"] = whitebalance
which["Camera"]["Shutterspeed"] = shutterspeed
return which
class environement_data:
def __init__(self):
self.envi_wind = Tk()
#self.envi_wind.geometry("800x480")
self.envi_wind.attributes("-fullscreen", True)
self.envi_wind.title('environment')
self.envi_wind.configure(bg="#212121")
self.frame_exit = Frame(self.envi_wind, bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.environment_list = ["Technique", "Diamètre du Dôme mm"]
self.environment_to_edit = ["Appelation", "Description", "Projet", "LP Filename"]
self.sortir_icone = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.___sortir_icn__ = ImageTk.PhotoImage(master=self.envi_wind, image=self.sortir_icone)
self.button_exit = Button(self.envi_wind, text="Sortir", bg="#212121", command=self.envi_wind.destroy)
self.button_exit['image'] = self.___sortir_icn__
for i, data in enumerate(self.environment_to_edit+self.environment_list):
label = Label(self.frame, text=data, width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"))
label.grid(row=i, column=0, padx=10, pady=10, sticky='news')
self.entries = [Entry(self.frame, width=30, bd=2, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")) for i in range(len(self.environment_to_edit))]
self.entry_list = []
for i,e in enumerate(self.entries):
## e.grid(row=i, column=1, padx=5, pady=5)
self.entry_list.append(e)
self.entry_list[-4].insert(END, what["Appelation"])
self.entry_list[-1].insert(END, how["Modality"]["Protocol"]["Detail"]["LPFilename"])
for i,e in enumerate(self.entry_list):
e.grid(row=i, column=1, padx=5, pady=5)
self.label_technique = Label(self.frame, text="RTI", width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")
).grid(row=4, column=1, padx=5, pady=5, stick='news')
self.label_diam = Label(self.frame, text="750", width=30, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold")
).grid(row=5, column=1, padx=5, pady=5, stick='news')
self.btn_save = Button(self.envi_wind, text='Enregistrer', bd=2, fg='#FFF3AE', bg='#212121', font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, state=DISABLED, command=self.save_data)
self.btn_save.pack(anchor=NE)
cara = settings.clavier()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"), width=3,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=2, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], pady=2, sticky='news')
self.btn_delete = Button(keypad_frame, text='<', bg='#424035', fg='#FFF3AE', bd=5, font=("Roboto Mono", 15 * -1, "bold"),
borderwidth=0, command=self.delete_text).grid(row=8, column=11, pady=2, sticky='news')
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame.place(x=75, y=50)
keypad_frame.place(x=135, y=325)
self.button_exit.place(x=0, y=0)
self._logo_mercurio_env = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__env = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_env)
self.__label_logo__e = Label(self.envi_wind, image=self.__logo__env, bg="#212121").place(x=-15, y=425)
def set_text(self, text):
self.btn_save['state'] = NORMAL
self.btn_save['bg'] = "#424035"
widget = self.envi_wind.focus_get()
if widget in self.entries:
widget.insert("insert", text)
def delete_text(self):
widget = self.envi_wind.focus_get()
widget.delete(0, END)
def save_data(self):
global data
data_dict = {}
for s, i in enumerate(self.entry_list):
widget = i
data = widget.get()
data_dict[s] = data
data_dict[self.environment_to_edit[s]] = data_dict.pop(s)
what["Description"]= data_dict["Description"]
what["Appelation"]= data_dict["Appelation"]
why["Project"] = data_dict["Projet"]
how["Modality"]["Protocol"]["Detail"]["LPFilename"] = data_dict["LP Filename"]
json_file(metadata(what=what, why=why, how=how))
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
new_wind = Toplevel()
new_wind.title("info")
#new_wind.geometry("800x480")
new_wind.attributes("-fullscreen", True)
new_wind.configure(bg="#212121")
new_lab = Label(new_wind, text="Donneés enregistrées avec Succès !", bg="#212121", fg="#FFF3AE",
font=("Roboto Mono", 16 * -1, "bold")).place(x=150, y=100)
self.image_de_retour_ = Image.open(icons_path_+"IconeRetour.png").resize((75, 75))
self.icone_de_retour_ = ImageTk.PhotoImage(master=new_wind, image=self.image_de_retour_)
btn_quit_ = Button(new_wind, text="Sortir", bg="#212121", fg="#FFF3AE", image=self.icone_de_retour_, command=new_wind.destroy).pack(side=TOP, anchor=NW)
self._logo_mercurio_s = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__ = ImageTk.PhotoImage(master=new_wind, image=self._logo_mercurio_s)
self.__label_logo__ = Label(new_wind, image=self.__logo__, bg="#212121").place(x=-15, y=425)
class others:
def __init__(self):
self.envi_wind = Tk()
self.envi_wind.attributes('-fullscreen', True)
self.envi_wind.title('Autres')
self.envi_wind.configure(bg="#212121")
#self.envi_wind.geometry("800x480")
self.frame_exit = Frame(self.envi_wind, bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.environment_list = ["Version", "Contact", "A Propos"]
self.autres_data = ["1.0.0", "<EMAIL>", "Imagerie d'expertise"]
self.retour___icone = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.retour____ = ImageTk.PhotoImage(master=self.envi_wind, image=self.retour___icone)
self.button_exit = Button(self.frame_exit, text="Sortir", bg='#212121', command=self.envi_wind.destroy)
self.button_exit['image'] = self.retour____
for i, data in enumerate(self.environment_list):
label = Label(self.frame, text=data, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
label.grid(row=i, column=0, padx=25, pady=35, sticky='news')
for i, e in enumerate(self.autres_data):
label = Label(self.frame, text=e, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 15 * -1, "bold"), width=30)
label.grid(row=i, column=1, padx=5, pady=35, sticky='news')
web_label = Label(self.frame, text="Notre Site Web", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
web_label.grid(row=len(self.environment_list), column=0, padx=10, pady=35, sticky='news')
web_label_ = Label(self.frame, text="mercurioimaging.com", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 15 * -1, "bold"), cursor="hand2", width=30)
web_label_.grid(row=len(self.environment_list), column=1, padx=5, pady=35, sticky='news')
web_label_.bind("<Button-1>", lambda e: self.callback("https://mercurioimaging.com/"))
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame_exit.grid(row=0, column=0, sticky='nw')
self.frame.grid(row=0, column=0, sticky='n')
self.button_exit.grid(row=0, column=0, sticky='news')
self._logo_mercurio_au = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__au = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_au)
self.__label_logo__au = Label(self.envi_wind, image=self.__logo__au, bg="#212121").place(x=-15, y=425)
def callback(self, url):
webbrowser.open_new(url)
class _camera_folder_:
global camera_folder
def __init__(self):
self.envi_wind = Tk()
#self.envi_wind.attributes('-fullscreen', True)
self.envi_wind.title('environment')
self.envi_wind.geometry('800x480')
self.envi_wind.configure(bg="#212121")
self.frame = Frame(self.envi_wind, bg="#212121")
keypad_frame = Frame(self.envi_wind, bg="#212121")
self.camera_folder_label = "Dossier des images"
self.camera_folder = "/store_00020001/DCIM/100CANON"
self.icone_deRetour = Image.open(icons_path_+"IconeRetour.png").resize((65, 65))
self.icone_Ret = ImageTk.PhotoImage(master=self.envi_wind, image=self.icone_deRetour)
self.button_exit = Button(self.envi_wind, text="Sortir", bg='#212121', command=self.envi_wind.destroy)
self.button_exit['image'] = self.icone_Ret
self.button_exit.pack(side=TOP, anchor=NW)
self.button_modifier = Button(self.envi_wind, text="Modifier", bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"),
command=self.edit_camera_folder)
self.button_modifier.pack(anchor=NE)
self.label_camera_text = Label(self.frame, text=self.camera_folder_label, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
self.label_camera_text.grid(row=1, column=0, sticky='news')
self.label_camera_folder = Label(self.frame, text=self.camera_folder, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=30)
self.label_camera_folder.grid(row=1, column=1, sticky='news')
self.envi_wind.rowconfigure(0, weight=1)
self.envi_wind.columnconfigure(0, weight=1)
self.frame.place(x=100, y=100)
self._logo_mercurio_a = Image.open(icons_path_+"logo_mercurio.png").resize((100, 60))
self.__logo__a = ImageTk.PhotoImage(master=self.envi_wind, image=self._logo_mercurio_a)
self.__label_logo__a = Label(self.envi_wind, image=self.__logo__a, bg="#212121").place(x=-15, y=425)
def edit_camera_folder(self):
self.camera_folder_editer = Entry(self.frame, bg='#212121', fg='#FFF3AE', font=("Roboto Mono", 13 * -1, "bold"), width=4)
self.camera_folder_editer.grid(row=1, column=1, pady=10, sticky='news')
self.button_modifier['text'] = "Enregistrer"
self.button_modifier['command'] = self.save_data
keypad_frame = Frame(self.envi_wind, bg="#212121")
cara = settings.numerical_pad()
for car, grid_value in cara.items():
if grid_value[0] == 5:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 6:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 7:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
if grid_value[0] == 8:
button = Button(keypad_frame, text=str(car), bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), width=4,
borderwidth=0, command=lambda x=car: self.set_text(x)).grid(row=grid_value[0], column=grid_value[1], padx=1, pady=3, sticky='news')
delete_button = Button(keypad_frame, text="<", bg='#424035', fg='#FFF3AE', font=("Roboto Mono", 16 * -1, "bold"), padx=1, width=4,
borderwidth=0, command=self.delete_text).grid(row=8, column=4, pady=2, sticky='news')
keypad_frame.place(x=300, y=300)
def set_text(self, text):
self.camera_folder_editer.insert("insert", text)
def delete_text(self):
self.camera_folder_editer.delete(0, END)
def save_data(self):
camera_folder = "/store_00020001/DCIM/"+str(self.camera_folder_editer.get())+"CANON"
print(camera_folder)
self.label_camera_text.config(text="Dossier des images")
self.label_camera_text.grid(row=2, column=0, pady=10, sticky='news')
self.label_camera_folder.config(text=camera_folder)
self.label_camera_folder.grid(row=2, column=1, pady=10, sticky='news')
self.button_modifier['text'] = "Modifier"
self.button_modifier['command'] = self.edit_camera_folder
try:
bus.write_byte(0x44, 13)
time.sleep(0.1)
bus.write_byte(0x44, 0)
except:
pass
##### -- -- ---
def main():
settings.killprocess()
return user_interface()
def copy_to_usb(folder, project_name):
mario_sound(100)
media_path = "/media/pi/"
folders_in_media = os.listdir(media_path)
blink = threading.Thread(target=flash_green)
blink.start()
if len(folders_in_media) == 0:
print("Inserérez une clé USB")
else:
usb_path = media_path+folders_in_media[0]
### Make Zip
shutil.make_archive(usb_path+"/"+project_name, 'zip', folder)
def mario_sound(frq):
try:
bus.write_block_data(0x44, 0, [8, frq])
except:
pass
def trois_colors(frq):
try:
bus.write_byte(0x44, 11)
time.sleep(frq)
bus.write_byte(0x44, 12)
time.sleep(frq)
bus.write_byte(0x44, 13)
time.sleep(frq)
except:
pass
def trois_colors_250():
try:
bus.write_byte(0x44, 11)
time.sleep(0.25)
bus.write_byte(0x44, 12)
time.sleep(0.25)
bus.write_byte(0x44, 13)
time.sleep(0.25)
except:
pass
def flash_green():
try:
for i in range(20):
bus.write_byte(0x44, 11)
time.sleep(0.1)
except:
pass
def mario_s():
mario_sound(2640)
time.sleep(0.15)
mario_sound(2640)
time.sleep(0.3)
mario_sound(2640)
time.sleep(0.3)
mario_sound(2040)
time.sleep(0.1)
mario_sound(2640)
time.sleep(0.3)
mario_sound(3080)
time.sleep(0.55)
mario_sound(1520)
time.sleep(0.575)
def led_1_ctrl(state): ### state should be 0 or 1
try:
bus.write_block_data(0x44, 0, [10, state])
except:
pass
def led_2_ctrl(state):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(4,GPIO.OUT)
if state == 1:
GPIO.output(4, GPIO.HIGH)
elif state == 0:
GPIO.output(4, GPIO.LOW)
if __name__ == '__main__':
bus = smbus.SMBus(1) #### Enable i2c
try:
os.system("gphoto2 --set-config capturetarget=1")
except:
pass
led_1_ctrl(0)
led_2_ctrl(0)
icons_path_ = "/home/pi/grandDome/ICONES/"
### ---- Create Folders ---------------
try:
os.mkdir("/home/pi/grandDome/data")
os.mkdir("/home/pi/grandDome/images/rti")
except:
pass
data_path = "/home/pi/grandDome/data/"
image_path = "/home/pi/grandDome/images/"
rti_path = "/home/pi/grandDome/images/rti/"
lp_path = "/home/pi/grandDome/LPFiles/"
### Camera options
camera_folder = "/store_00020001/DCIM/100CANON"
try:
subprocess.run(["gphoto2", "--set-config", "eosremoterelease=4"]) #### Release = Immediate 5 --- Release Full 4
except:
pass
trigCMD = ["--trigger-capture"]
download_allCMD = ["--get-all-files"] ## download files
clearCMD = ["--folder", camera_folder, "-R", "--delete-all-files"] ### To Change if the camera is not Canon !!
shot_date = datetime.datetime.now().strftime("%Y%m%d")
shot_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
picID = "RTI"
rti_folder_name = picID + shot_time
save_location = "./images/"
trois_colors(500)
mario_sound(3000)
main = main()
led_1_ctrl(0)
led_2_ctrl(0)
main.mainloop()
|
[
"os.mkdir",
"webbrowser.open_new",
"json.dumps",
"settings.killprocess",
"tkinter.ttk.Progressbar",
"i2c_devices.i2c_checker",
"glob.glob",
"RPi.GPIO.output",
"settings.numerical_pad",
"smbus.SMBus",
"shutil.make_archive",
"RPi.GPIO.setup",
"settings.clavier",
"settings.check_memory",
"datetime.datetime.now",
"sh.gphoto2",
"threading.Thread",
"RPi.GPIO.setmode",
"os.rename",
"os.system",
"time.sleep",
"subprocess.call",
"settings.camera_available",
"settings.image_data",
"RPi.GPIO.setwarnings",
"os.listdir",
"settings.focal_length",
"subprocess.run",
"PIL.ImageTk.PhotoImage",
"PIL.Image.open",
"shutil.disk_usage",
"settings.about_camera",
"shutil.move"
] |
[((74, 136), 'subprocess.call', 'call', (['[\'espeak "Welcome to granDome" 2>/dev/null\']'], {'shell': '(True)'}), '([\'espeak "Welcome to granDome" 2>/dev/null\'], shell=True)\n', (78, 136), False, 'from subprocess import call\n'), ((2281, 2299), 'settings.clavier', 'settings.clavier', ([], {}), '()\n', (2297, 2299), False, 'import settings\n'), ((1615, 1633), 'os.mkdir', 'os.mkdir', (['"""./json"""'], {}), "('./json')\n", (1623, 1633), False, 'import os, shutil, subprocess, signal\n'), ((1999, 2029), 'json.dumps', 'json.dumps', (['metadata'], {'indent': '(4)'}), '(metadata, indent=4)\n', (2009, 2029), False, 'import json\n'), ((63037, 63068), 'settings.image_data', 'settings.image_data', (['"""aperture"""'], {}), "('aperture')\n", (63056, 63068), False, 'import settings\n'), ((79327, 79349), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (79347, 79349), False, 'import settings\n'), ((79498, 79520), 'os.listdir', 'os.listdir', (['media_path'], {}), '(media_path)\n', (79508, 79520), False, 'import os, shutil, subprocess, signal\n'), ((79534, 79570), 'threading.Thread', 'threading.Thread', ([], {'target': 'flash_green'}), '(target=flash_green)\n', (79550, 79570), False, 'import webbrowser, threading\n'), ((79773, 79838), 'shutil.make_archive', 'shutil.make_archive', (["(usb_path + '/' + project_name)", '"""zip"""', 'folder'], {}), "(usb_path + '/' + project_name, 'zip', folder)\n", (79792, 79838), False, 'import shutil\n'), ((80631, 80647), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (80641, 80647), False, 'import smbus, time, datetime\n'), ((80676, 80691), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (80686, 80691), False, 'import smbus, time, datetime\n'), ((80720, 80735), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (80730, 80735), False, 'import smbus, time, datetime\n'), ((80764, 80779), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (80774, 80779), False, 'import smbus, time, datetime\n'), ((80808, 80823), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (80818, 80823), False, 'import smbus, time, datetime\n'), ((80852, 80868), 'time.sleep', 'time.sleep', (['(0.55)'], {}), '(0.55)\n', (80862, 80868), False, 'import smbus, time, datetime\n'), ((80897, 80914), 'time.sleep', 'time.sleep', (['(0.575)'], {}), '(0.575)\n', (80907, 80914), False, 'import smbus, time, datetime\n'), ((81088, 81110), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (81100, 81110), True, 'import RPi.GPIO as GPIO\n'), ((81116, 81139), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (81132, 81139), True, 'import RPi.GPIO as GPIO\n'), ((81145, 81168), 'RPi.GPIO.setup', 'GPIO.setup', (['(4)', 'GPIO.OUT'], {}), '(4, GPIO.OUT)\n', (81155, 81168), True, 'import RPi.GPIO as GPIO\n'), ((81320, 81334), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (81331, 81334), False, 'import smbus, time, datetime\n'), ((720, 743), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (741, 743), False, 'import smbus, time, datetime\n'), ((776, 799), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (797, 799), False, 'import smbus, time, datetime\n'), ((2200, 2233), 'shutil.move', 'shutil.move', (['json_file_name', 'path'], {}), '(json_file_name, path)\n', (2211, 2233), False, 'import shutil\n'), ((13694, 13756), 'tkinter.ttk.Progressbar', 'Progressbar', (['self.capture_frame'], {'orient': 'HORIZONTAL', 'length': '(375)'}), '(self.capture_frame, orient=HORIZONTAL, length=375)\n', (13705, 13756), False, 'from tkinter.ttk import Progressbar\n'), ((20073, 20093), 'os.listdir', 'os.listdir', (['rti_path'], {}), '(rti_path)\n', (20083, 20093), False, 'import os, shutil, subprocess, signal\n'), ((21794, 21845), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['self.previewImg', 'Image.BILINEAR'], {}), '(self.previewImg, Image.BILINEAR)\n', (21812, 21845), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((22467, 22538), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.message_box_usb', 'image': 'self.icon_retusb'}), '(master=self.message_box_usb, image=self.icon_retusb)\n', (22485, 22538), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((23366, 23388), 'os.listdir', 'os.listdir', (['media_path'], {}), '(media_path)\n', (23376, 23388), False, 'import os, shutil, subprocess, signal\n'), ((24741, 24819), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.message_box_usb', 'image': 'self._logo_mercurio_usb'}), '(master=self.message_box_usb, image=self._logo_mercurio_usb)\n', (24759, 24819), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((26464, 26516), 'subprocess.run', 'subprocess.run', (["['rm', '-rf', directotory_to_remove]"], {}), "(['rm', '-rf', directotory_to_remove])\n", (26478, 26516), False, 'import os, shutil, subprocess, signal\n'), ((26819, 26852), 'os.system', 'os.system', (['"""sudo shutdown -h now"""'], {}), "('sudo shutdown -h now')\n", (26828, 26852), False, 'import os, shutil, subprocess, signal\n'), ((27922, 27936), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (27933, 27936), False, 'import smbus, time, datetime\n'), ((27946, 27968), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (27966, 27968), False, 'import settings\n'), ((27978, 27990), 'sh.gphoto2', 'gp', (['clearCMD'], {}), '(clearCMD)\n', (27980, 27990), True, 'from sh import gphoto2 as gp\n'), ((28210, 28271), 'os.system', 'os.system', (['"""gphoto2 --trigger-capture --wait-event=FILEADDED"""'], {}), "('gphoto2 --trigger-capture --wait-event=FILEADDED')\n", (28219, 28271), False, 'import os, shutil, subprocess, signal\n'), ((28281, 28303), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (28301, 28303), False, 'import settings\n'), ((28360, 28426), 'os.system', 'os.system', (["('gphoto2 --filename=' + thumb_name + ' --get-all-files')"], {}), "('gphoto2 --filename=' + thumb_name + ' --get-all-files')\n", (28369, 28426), False, 'import os, shutil, subprocess, signal\n'), ((28432, 28447), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (28442, 28447), False, 'import smbus, time, datetime\n'), ((28504, 28526), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (28524, 28526), False, 'import settings\n'), ((28673, 28688), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (28683, 28688), False, 'import smbus, time, datetime\n'), ((29184, 29197), 'i2c_devices.i2c_checker', 'i2c_checker', ([], {}), '()\n', (29195, 29197), False, 'from i2c_devices import i2c_checker\n'), ((29609, 29636), 'settings.camera_available', 'settings.camera_available', ([], {}), '()\n', (29634, 29636), False, 'import settings\n'), ((45929, 45944), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (45939, 45944), False, 'import smbus, time, datetime\n'), ((54389, 54448), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self', 'image': 'self.image_de_retour'}), '(master=self, image=self.image_de_retour)\n', (54407, 54448), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((54932, 54950), 'settings.clavier', 'settings.clavier', ([], {}), '()\n', (54948, 54950), False, 'import settings\n'), ((56842, 56901), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self', 'image': 'self._logo_mercurio_'}), '(master=self, image=self._logo_mercurio_)\n', (56860, 56901), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((58657, 58721), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'new_wind', 'image': 'self.image_de_retour_'}), '(master=new_wind, image=self.image_de_retour_)\n', (58675, 58721), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((59014, 59078), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'new_wind', 'image': 'self._logo_mercurio_s'}), '(master=new_wind, image=self._logo_mercurio_s)\n', (59032, 59078), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((62353, 62427), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.label_frame', 'image': 'self.image_quitter_icon'}), '(master=self.label_frame, image=self.image_quitter_icon)\n', (62371, 62427), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((62755, 62817), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self', 'image': 'self._logo_mercurio_cam'}), '(master=self, image=self._logo_mercurio_cam)\n', (62773, 62817), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((64563, 64629), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self.sortir_icone'}), '(master=self.envi_wind, image=self.sortir_icone)\n', (64581, 64629), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((66519, 66537), 'settings.clavier', 'settings.clavier', ([], {}), '()\n', (66535, 66537), False, 'import settings\n'), ((68629, 68701), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self._logo_mercurio_env'}), '(master=self.envi_wind, image=self._logo_mercurio_env)\n', (68647, 68701), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((70474, 70538), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'new_wind', 'image': 'self.image_de_retour_'}), '(master=new_wind, image=self.image_de_retour_)\n', (70492, 70538), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((70831, 70895), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'new_wind', 'image': 'self._logo_mercurio_s'}), '(master=new_wind, image=self._logo_mercurio_s)\n', (70849, 70895), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((71745, 71813), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self.retour___icone'}), '(master=self.envi_wind, image=self.retour___icone)\n', (71763, 71813), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((73576, 73647), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self._logo_mercurio_au'}), '(master=self.envi_wind, image=self._logo_mercurio_au)\n', (73594, 73647), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((73817, 73841), 'webbrowser.open_new', 'webbrowser.open_new', (['url'], {}), '(url)\n', (73836, 73841), False, 'import webbrowser, threading\n'), ((74537, 74605), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self.icone_deRetour'}), '(master=self.envi_wind, image=self.icone_deRetour)\n', (74555, 74605), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((75860, 75930), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'master': 'self.envi_wind', 'image': 'self._logo_mercurio_a'}), '(master=self.envi_wind, image=self._logo_mercurio_a)\n', (75878, 75930), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((76534, 76558), 'settings.numerical_pad', 'settings.numerical_pad', ([], {}), '()\n', (76556, 76558), False, 'import settings\n'), ((80025, 80040), 'time.sleep', 'time.sleep', (['frq'], {}), '(frq)\n', (80035, 80040), False, 'import smbus, time, datetime\n'), ((80084, 80099), 'time.sleep', 'time.sleep', (['frq'], {}), '(frq)\n', (80094, 80099), False, 'import smbus, time, datetime\n'), ((80143, 80158), 'time.sleep', 'time.sleep', (['frq'], {}), '(frq)\n', (80153, 80158), False, 'import smbus, time, datetime\n'), ((80266, 80282), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (80276, 80282), False, 'import smbus, time, datetime\n'), ((80326, 80342), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (80336, 80342), False, 'import smbus, time, datetime\n'), ((80386, 80402), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (80396, 80402), False, 'import smbus, time, datetime\n'), ((81197, 81222), 'RPi.GPIO.output', 'GPIO.output', (['(4)', 'GPIO.HIGH'], {}), '(4, GPIO.HIGH)\n', (81208, 81222), True, 'import RPi.GPIO as GPIO\n'), ((81370, 81419), 'os.system', 'os.system', (['"""gphoto2 --set-config capturetarget=1"""'], {}), "('gphoto2 --set-config capturetarget=1')\n", (81379, 81419), False, 'import os, shutil, subprocess, signal\n'), ((81597, 81632), 'os.mkdir', 'os.mkdir', (['"""/home/pi/grandDome/data"""'], {}), "('/home/pi/grandDome/data')\n", (81605, 81632), False, 'import os, shutil, subprocess, signal\n'), ((81642, 81683), 'os.mkdir', 'os.mkdir', (['"""/home/pi/grandDome/images/rti"""'], {}), "('/home/pi/grandDome/images/rti')\n", (81650, 81683), False, 'import os, shutil, subprocess, signal\n'), ((81996, 82061), 'subprocess.run', 'subprocess.run', (["['gphoto2', '--set-config', 'eosremoterelease=4']"], {}), "(['gphoto2', '--set-config', 'eosremoterelease=4'])\n", (82010, 82061), False, 'import os, shutil, subprocess, signal\n'), ((7107, 7121), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (7118, 7121), False, 'import smbus, time, datetime\n'), ((23645, 23672), 'shutil.disk_usage', 'shutil.disk_usage', (['usb_path'], {}), '(usb_path)\n', (23662, 23672), False, 'import shutil\n'), ((30404, 30490), 'subprocess.run', 'subprocess.run', (["['gphoto2', '--folder', camera_folder, '-R', '--delete-all-files']"], {}), "(['gphoto2', '--folder', camera_folder, '-R',\n '--delete-all-files'])\n", (30418, 30490), False, 'import os, shutil, subprocess, signal\n'), ((32908, 32922), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (32919, 32922), False, 'import smbus, time, datetime\n'), ((34968, 35041), 'threading.Thread', 'threading.Thread', ([], {'target': 'settings.get_data_from_camera', 'args': '(file_name,)'}), '(target=settings.get_data_from_camera, args=(file_name,))\n', (34984, 35041), False, 'import webbrowser, threading\n'), ((35644, 35668), 'glob.glob', 'glob', (["(rti_path + '*.JPG')"], {}), "(rti_path + '*.JPG')\n", (35648, 35668), False, 'from glob import glob\n'), ((37178, 37264), 'subprocess.run', 'subprocess.run', (["['gphoto2', '--folder', camera_folder, '-R', '--delete-all-files']"], {}), "(['gphoto2', '--folder', camera_folder, '-R',\n '--delete-all-files'])\n", (37192, 37264), False, 'import os, shutil, subprocess, signal\n'), ((37512, 37539), 'settings.camera_available', 'settings.camera_available', ([], {}), '()\n', (37537, 37539), False, 'import settings\n'), ((48809, 48836), 'settings.camera_available', 'settings.camera_available', ([], {}), '()\n', (48834, 48836), False, 'import settings\n'), ((48913, 48936), 'settings.about_camera', 'settings.about_camera', ([], {}), '()\n', (48934, 48936), False, 'import settings\n'), ((58070, 58085), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (58080, 58085), False, 'import smbus, time, datetime\n'), ((59841, 59868), 'settings.camera_available', 'settings.camera_available', ([], {}), '()\n', (59866, 59868), False, 'import settings\n'), ((69867, 69882), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (69877, 69882), False, 'import smbus, time, datetime\n'), ((79156, 79171), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (79166, 79171), False, 'import smbus, time, datetime\n'), ((80542, 80557), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (80552, 80557), False, 'import smbus, time, datetime\n'), ((81254, 81278), 'RPi.GPIO.output', 'GPIO.output', (['(4)', 'GPIO.LOW'], {}), '(4, GPIO.LOW)\n', (81265, 81278), True, 'import RPi.GPIO as GPIO\n'), ((82373, 82396), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (82394, 82396), False, 'import smbus, time, datetime\n'), ((82433, 82456), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (82454, 82456), False, 'import smbus, time, datetime\n'), ((22384, 22427), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (22394, 22427), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((24651, 24696), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (24661, 24696), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((29532, 29555), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29553, 29555), False, 'import smbus, time, datetime\n'), ((36195, 36232), 'os.rename', 'os.rename', (['img', "(renamed_file + '.JPG')"], {}), "(img, renamed_file + '.JPG')\n", (36204, 36232), False, 'import os, shutil, subprocess, signal\n'), ((54298, 54341), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (54308, 54341), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((56756, 56801), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (56766, 56801), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((57832, 57855), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (57853, 57855), False, 'import smbus, time, datetime\n'), ((57901, 57924), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (57922, 57924), False, 'import smbus, time, datetime\n'), ((58565, 58608), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (58575, 58608), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((58927, 58972), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (58937, 58972), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((60805, 60828), 'settings.focal_length', 'settings.focal_length', ([], {}), '()\n', (60826, 60828), False, 'import settings\n'), ((62263, 62306), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (62273, 62306), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((62665, 62710), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (62675, 62710), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((64472, 64515), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (64482, 64515), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((68539, 68584), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (68549, 68584), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((70382, 70425), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (70392, 70425), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((70744, 70789), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (70754, 70789), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((71659, 71702), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (71669, 71702), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((73487, 73532), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (73497, 73532), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((74452, 74495), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (74462, 74495), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((75772, 75817), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (75782, 75817), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((1867, 1890), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1888, 1890), False, 'import smbus, time, datetime\n'), ((3232, 3276), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeAnnuler.png')"], {}), "(icons_path_ + 'IconeAnnuler.png')\n", (3242, 3276), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((3358, 3403), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeSettings.png')"], {}), "(icons_path_ + 'IconeSettings.png')\n", (3368, 3403), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((3489, 3533), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'menu_capture.png')"], {}), "(icons_path_ + 'menu_capture.png')\n", (3499, 3533), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((3622, 3666), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'menu_projets.png')"], {}), "(icons_path_ + 'menu_projets.png')\n", (3632, 3666), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((3750, 3795), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeEteindre.png')"], {}), "(icons_path_ + 'IconeEteindre.png')\n", (3760, 3795), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((3877, 3922), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (3887, 3922), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((7885, 7925), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeFaq.png')"], {}), "(icons_path_ + 'IconeFaq.png')\n", (7895, 7925), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((8009, 8057), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'reglage_metadata.png')"], {}), "(icons_path_ + 'reglage_metadata.png')\n", (8019, 8057), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((8143, 8194), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'reglage_dome_tester.png')"], {}), "(icons_path_ + 'reglage_dome_tester.png')\n", (8153, 8194), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((8282, 8335), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'reglage_camera_tester.png')"], {}), "(icons_path_ + 'reglage_camera_tester.png')\n", (8292, 8335), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((8417, 8460), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (8427, 8460), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((8544, 8589), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (8554, 8589), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((12004, 12049), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (12014, 12049), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((12248, 12291), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (12258, 12291), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((12673, 12714), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'allumeLed.png')"], {}), "(icons_path_ + 'allumeLed.png')\n", (12683, 12714), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((12802, 12848), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'aq_rapide_icon.png')"], {}), "(icons_path_ + 'aq_rapide_icon.png')\n", (12812, 12848), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((18565, 18608), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (18575, 18608), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((18885, 18930), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'logo_mercurio.png')"], {}), "(icons_path_ + 'logo_mercurio.png')\n", (18895, 18930), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((35237, 35261), 'glob.glob', 'glob', (["(rti_path + '*.JPG')"], {}), "(rti_path + '*.JPG')\n", (35241, 35261), False, 'from glob import glob\n'), ((38253, 38296), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'utilisateur.png')"], {}), "(icons_path_ + 'utilisateur.png')\n", (38263, 38296), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((38383, 38426), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'camera_info.png')"], {}), "(icons_path_ + 'camera_info.png')\n", (38393, 38426), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((38513, 38557), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'environement.png')"], {}), "(icons_path_ + 'environement.png')\n", (38523, 38557), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((38644, 38682), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'autres.png')"], {}), "(icons_path_ + 'autres.png')\n", (38654, 38682), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((38765, 38808), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (38775, 38808), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((41971, 42014), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (41981, 42014), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((42100, 42143), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'toutAllumer.png')"], {}), "(icons_path_ + 'toutAllumer.png')\n", (42110, 42143), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((42232, 42276), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'toutEteindre.png')"], {}), "(icons_path_ + 'toutEteindre.png')\n", (42242, 42276), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((42359, 42404), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'allumerledXon.png')"], {}), "(icons_path_ + 'allumerledXon.png')\n", (42369, 42404), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((42488, 42531), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'eteindreLed.png')"], {}), "(icons_path_ + 'eteindreLed.png')\n", (42498, 42531), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((48298, 48348), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'camera_deconnectee.png')"], {}), "(icons_path_ + 'camera_deconnectee.png')\n", (48308, 48348), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((48439, 48482), 'PIL.Image.open', 'Image.open', (["(icons_path_ + 'IconeRetour.png')"], {}), "(icons_path_ + 'IconeRetour.png')\n", (48449, 48482), False, 'from PIL import ImageTk, Image, ImageGrab\n'), ((50188, 50214), 'settings.image_data', 'settings.image_data', (['param'], {}), '(param)\n', (50207, 50214), False, 'import settings\n'), ((63214, 63249), 'settings.image_data', 'settings.image_data', (['"""whitebalance"""'], {}), "('whitebalance')\n", (63233, 63249), False, 'import settings\n'), ((63296, 63331), 'settings.image_data', 'settings.image_data', (['"""shutterspeed"""'], {}), "('shutterspeed')\n", (63315, 63331), False, 'import settings\n'), ((63371, 63405), 'settings.image_data', 'settings.image_data', (['"""cameramodel"""'], {}), "('cameramodel')\n", (63390, 63405), False, 'import settings\n'), ((33138, 33160), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (33158, 33160), False, 'import settings\n'), ((33543, 33591), 'subprocess.run', 'subprocess.run', (["['gphoto2', '--trigger-capture']"], {}), "(['gphoto2', '--trigger-capture'])\n", (33557, 33591), False, 'import os, shutil, subprocess, signal\n'), ((33617, 33632), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (33627, 33632), False, 'import smbus, time, datetime\n'), ((33856, 33878), 'settings.killprocess', 'settings.killprocess', ([], {}), '()\n', (33876, 33878), False, 'import settings\n'), ((34216, 34264), 'subprocess.run', 'subprocess.run', (["['gphoto2', '--trigger-capture']"], {}), "(['gphoto2', '--trigger-capture'])\n", (34230, 34264), False, 'import os, shutil, subprocess, signal\n'), ((34286, 34301), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (34296, 34301), False, 'import smbus, time, datetime\n'), ((53299, 53322), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (53320, 53322), False, 'import smbus, time, datetime\n'), ((53612, 53635), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (53633, 53635), False, 'import smbus, time, datetime\n'), ((63140, 63166), 'settings.image_data', 'settings.image_data', (['"""iso"""'], {}), "('iso')\n", (63159, 63166), False, 'import settings\n'), ((1710, 1733), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1731, 1733), False, 'import smbus, time, datetime\n'), ((49236, 49271), 'settings.image_data', 'settings.image_data', (['"""whitebalance"""'], {}), "('whitebalance')\n", (49255, 49271), False, 'import settings\n'), ((49326, 49361), 'settings.image_data', 'settings.image_data', (['"""shutterspeed"""'], {}), "('shutterspeed')\n", (49345, 49361), False, 'import settings\n'), ((60077, 60112), 'settings.image_data', 'settings.image_data', (['"""whitebalance"""'], {}), "('whitebalance')\n", (60096, 60112), False, 'import settings\n'), ((60167, 60202), 'settings.image_data', 'settings.image_data', (['"""shutterspeed"""'], {}), "('shutterspeed')\n", (60186, 60202), False, 'import settings\n'), ((60250, 60284), 'settings.image_data', 'settings.image_data', (['"""cameramodel"""'], {}), "('cameramodel')\n", (60269, 60284), False, 'import settings\n'), ((4237, 4260), 'settings.check_memory', 'settings.check_memory', ([], {}), '()\n', (4258, 4260), False, 'import settings\n'), ((49072, 49103), 'settings.image_data', 'settings.image_data', (['"""aperture"""'], {}), "('aperture')\n", (49091, 49103), False, 'import settings\n'), ((49154, 49180), 'settings.image_data', 'settings.image_data', (['"""iso"""'], {}), "('iso')\n", (49173, 49180), False, 'import settings\n'), ((59913, 59944), 'settings.image_data', 'settings.image_data', (['"""aperture"""'], {}), "('aperture')\n", (59932, 59944), False, 'import settings\n'), ((59995, 60021), 'settings.image_data', 'settings.image_data', (['"""iso"""'], {}), "('iso')\n", (60014, 60021), False, 'import settings\n')]
|
# using the requests library to access internet data
#import the requests library
import requests
import json
def main():
# Use requests to issue a standard HTTP GET request
url = "http://httpbin.org/json"
result = requests.get(url)
# Use the built-in JSON function to return parsed data
dataobj = result.json()
print(json.dumps(dataobj,indent=4))
# Access data in the python object
print(list(dataobj.keys()))
print(dataobj['slideshow']['title'])
print("There are {0} slides".format(len(dataobj['slideshow']['slides'])))
if __name__ == "__main__":
main()
|
[
"requests.get",
"json.dumps"
] |
[((230, 247), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (242, 247), False, 'import requests\n'), ((346, 375), 'json.dumps', 'json.dumps', (['dataobj'], {'indent': '(4)'}), '(dataobj, indent=4)\n', (356, 375), False, 'import json\n')]
|
#%%
import numpy as np
import sys
sys.path.append('..')
from utils.tester import Tester
import pickle
import os
import matplotlib.pyplot as plt
import math
import networkx as nx
import random
city_name = 'Phoenix'
save_file_name = '2021-04-23_14-02-29'
seed = 45
# city_name = 'Seattle'
# save_file_name = '2021-03-21_23-18-05'
# seed = 10
# city_name = 'Dallas'
# save_file_name = '2021-04-09_21-11-28'
fontsize = 20
legend_fontsize = 20
# %%
# Load results data
base_directory = os.getcwd()
base_directory = base_directory[0:base_directory.find('src')+3]
file_path = os.path.join(base_directory, 'optimization', 'save', save_file_name)
with open(file_path,'rb') as f:
tester = pickle.load(f)
if city_name == 'Phoenix':
data_folder_name = 'Phoenix'
if city_name == 'Seattle':
data_folder_name = 'IntercityFlow_Seattle'
if city_name == 'Dallas':
data_folder_name = 'Intercity_Dallas'
# Load city data
city_data_file_path = os.path.join(base_directory, '..', 'data', data_folder_name, 'data_processing_outputs', 'city_data.p')
with open(city_data_file_path,'rb') as f:
city_data = pickle.load(f)
city_list = list(city_data.keys())
num_cities = len(city_list)
num_city = tester.params['m']
num_time = tester.params['n']
num_entity = tester.params['num_entity']
phi_val = np.array(tester.results['phi_best'])
scale_frac = tester.params['scale_frac']
phi_average = np.zeros((num_city, num_city), dtype=np.float)
for i in range(num_city):
for j in range(num_city):
if not (i == j) and np.average(phi_val[:,i,j]) > 0.0:
phi_average[i,j] = np.average(phi_val[:, i, j])
for city_ind in range(num_city):
phi_average[city_ind,:] = phi_average[city_ind,:] * tester.problem_data['Ntot'][city_ind] * scale_frac
phi_average[:,:] = np.log(phi_average[:,:]+1e-3)
max_val = np.max(phi_average[:,:])
phi_average = phi_average / max_val
# print(phi_average)
# %%
edge_weight_list = []
# Visualize the resulting adjacency matrix
G = nx.DiGraph()
for i in range(num_cities):
G.add_node(city_list[i])
for j in range(num_cities):
if phi_average[i,j] > 0.0:
G.add_edge(city_list[i], city_list[j], weight=phi_average[i,j])
edge_weight_list.append(phi_average[i,j])
if city_name == 'Dallas':
city_data['Johnson']['y_loc'] = 32.385655
city_data['Johnson']['x_loc'] = -97.335191
city_data['Ellis']['y_loc'] = 32.362181
city_data['Ellis']['x_loc'] = -96.803901
city_data['Kaufman']['y_loc'] = 32.613997
city_data['Kaufman']['x_loc'] = -96.283543
city_data['Parker']['y_loc'] = 32.783855
city_data['Parker']['x_loc'] = -97.802077
city_data['Rockwall']['y_loc'] = 32.900920
city_data['Rockwall']['x_loc'] = -96.404271
city_data['Collin']['y_loc'] = 33.20671
city_data['Collin']['x_loc'] = -96.587485
city_data['Denton']['y_loc'] = 33.199884
city_data['Denton']['x_loc'] = -97.089478
city_data['Wise']['y_loc'] = 33.219515
city_data['Wise']['x_loc'] = -97.647529
city_data['Tarrant']['y_loc'] = 32.770195
city_data['Tarrant']['x_loc'] = -97.264026
city_data['Dallas']['y_loc'] = 32.77
city_data['Dallas']['x_loc'] = -96.79
pos = dict()
for i in range(num_cities):
city = city_list[i]
x_loc = city_data[city]['x_loc']
y_loc = city_data[city]['y_loc']
pos[city] = np.array([x_loc, y_loc])
edge_width_list = np.array(edge_weight_list)
edge_width_list = np.exp(edge_width_list)
edge_width_list = edge_width_list / np.max(edge_width_list)
edge_width_list = edge_width_list * 5
options = {
"node_color": "#A0CBE2",
"edge_color": edge_weight_list,
"node_size": tester.problem_data['Ntot'],
"width": edge_width_list,
"edge_cmap": plt.cm.Blues,
"with_labels": False,
"edge_vmin": 0.0,
# "edge_vmax": 100.0
}
print(phi_average[1,:])
print(edge_weight_list)
random.seed(seed)
np.random.seed(seed=seed)
pos = nx.spring_layout(G)
# pos = nx.spectral_layout(G)
#print(city_data['Dallas']['population'])
# %%
plt.figure(figsize=(20,10))
# nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos)
# nx.draw_networkx_edges(G_fully_connected, pos, edge_color='red')
nx.draw(G, pos, **options)
save_location = os.path.join(base_directory, 'plotting', city_name, 'saved_plots')
filename = os.path.join(save_location, '{}scale_cost_by_pop_phi_graph.png'.format(save_file_name))
plt.savefig(filename, bbox_inches='tight')
plt.show()
#plt.title('Adjacency Matrix with Scaled Demand Threshold {},\n Total Number of Edges: {}'.format(0.02, np.sum(adj_mat)), fontsize=15)
# %%
|
[
"numpy.random.seed",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.exp",
"networkx.draw_networkx_labels",
"os.path.join",
"sys.path.append",
"numpy.max",
"random.seed",
"matplotlib.pyplot.show",
"numpy.average",
"networkx.draw",
"networkx.DiGraph",
"numpy.log",
"os.getcwd",
"numpy.zeros",
"networkx.spring_layout",
"numpy.array",
"matplotlib.pyplot.savefig"
] |
[((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((488, 499), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (497, 499), False, 'import os\n'), ((577, 645), 'os.path.join', 'os.path.join', (['base_directory', '"""optimization"""', '"""save"""', 'save_file_name'], {}), "(base_directory, 'optimization', 'save', save_file_name)\n", (589, 645), False, 'import os\n'), ((949, 1055), 'os.path.join', 'os.path.join', (['base_directory', '""".."""', '"""data"""', 'data_folder_name', '"""data_processing_outputs"""', '"""city_data.p"""'], {}), "(base_directory, '..', 'data', data_folder_name,\n 'data_processing_outputs', 'city_data.p')\n", (961, 1055), False, 'import os\n'), ((1300, 1336), 'numpy.array', 'np.array', (["tester.results['phi_best']"], {}), "(tester.results['phi_best'])\n", (1308, 1336), True, 'import numpy as np\n'), ((1393, 1439), 'numpy.zeros', 'np.zeros', (['(num_city, num_city)'], {'dtype': 'np.float'}), '((num_city, num_city), dtype=np.float)\n', (1401, 1439), True, 'import numpy as np\n'), ((1779, 1812), 'numpy.log', 'np.log', (['(phi_average[:, :] + 0.001)'], {}), '(phi_average[:, :] + 0.001)\n', (1785, 1812), True, 'import numpy as np\n'), ((1820, 1845), 'numpy.max', 'np.max', (['phi_average[:, :]'], {}), '(phi_average[:, :])\n', (1826, 1845), True, 'import numpy as np\n'), ((1978, 1990), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1988, 1990), True, 'import networkx as nx\n'), ((3377, 3403), 'numpy.array', 'np.array', (['edge_weight_list'], {}), '(edge_weight_list)\n', (3385, 3403), True, 'import numpy as np\n'), ((3422, 3445), 'numpy.exp', 'np.exp', (['edge_width_list'], {}), '(edge_width_list)\n', (3428, 3445), True, 'import numpy as np\n'), ((3855, 3872), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3866, 3872), False, 'import random\n'), ((3873, 3898), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (3887, 3898), True, 'import numpy as np\n'), ((3905, 3924), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3921, 3924), True, 'import networkx as nx\n'), ((4005, 4033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (4015, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4066, 4097), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {}), '(G, pos)\n', (4089, 4097), True, 'import networkx as nx\n'), ((4165, 4191), 'networkx.draw', 'nx.draw', (['G', 'pos'], {}), '(G, pos, **options)\n', (4172, 4191), True, 'import networkx as nx\n'), ((4209, 4275), 'os.path.join', 'os.path.join', (['base_directory', '"""plotting"""', 'city_name', '"""saved_plots"""'], {}), "(base_directory, 'plotting', city_name, 'saved_plots')\n", (4221, 4275), False, 'import os\n'), ((4375, 4417), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (4386, 4417), True, 'import matplotlib.pyplot as plt\n'), ((4418, 4428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((691, 705), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (702, 705), False, 'import pickle\n'), ((1110, 1124), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1121, 1124), False, 'import pickle\n'), ((3333, 3357), 'numpy.array', 'np.array', (['[x_loc, y_loc]'], {}), '([x_loc, y_loc])\n', (3341, 3357), True, 'import numpy as np\n'), ((3482, 3505), 'numpy.max', 'np.max', (['edge_width_list'], {}), '(edge_width_list)\n', (3488, 3505), True, 'import numpy as np\n'), ((1589, 1617), 'numpy.average', 'np.average', (['phi_val[:, i, j]'], {}), '(phi_val[:, i, j])\n', (1599, 1617), True, 'import numpy as np\n'), ((1524, 1552), 'numpy.average', 'np.average', (['phi_val[:, i, j]'], {}), '(phi_val[:, i, j])\n', (1534, 1552), True, 'import numpy as np\n')]
|
import twint
c = twint.Config()
c.Since = "2019-02-01"
c.Until = "2019-03-14"
c.Search = "(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \
((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \
(ingeniería OR ciencia OR stem)) OR \
(tecnología OR software OR metalurgía OR minería OR agronomía OR automotriz)"
c.Lang = "es"
c.Store_csv = True
c.Output = "./Query3.2_2019.csv"
twint.run.Search(c)
|
[
"twint.run.Search",
"twint.Config"
] |
[((18, 32), 'twint.Config', 'twint.Config', ([], {}), '()\n', (30, 32), False, 'import twint\n'), ((408, 427), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (424, 427), False, 'import twint\n')]
|
#!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
if len(sys.argv) == 3 and sys.argv[1] == '-seed':
# overwrite value SEED defined in module parameters
SEED = int(sys.argv[2])
else:
SEED = 1
if len(sys.argv) == 5 and sys.argv[3] == '-bngl':
# overwrite value SEED defined in module parameters
bngl_file = sys.argv[4]
else:
bngl_file = 'test.bngl'
params = m.bngl_utils.load_bngl_parameters(bngl_file)
ITERATIONS = int(params['ITERATIONS'])
if 'MCELL_TIME_STEP' in params:
TIME_STEP = float(params['MCELL_TIME_STEP'])
else:
TIME_STEP = 1e-6
DUMP = True
EXPORT_DATA_MODEL = True
# ---- load bngl file ----
model = m.Model()
if 'MCELL_DEFAULT_COMPARTMENT_VOLUME' in params:
MCELL_DEFAULT_COMPARTMENT_VOLUME = params['MCELL_DEFAULT_COMPARTMENT_VOLUME']
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = MCELL_DEFAULT_COMPARTMENT_VOLUME**(1.0/3.0)
default_compartment = m.geometry_utils.create_box(
'default_compartment', MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
)
model.add_geometry_object(default_compartment)
else:
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = 1
default_compartment = None
viz_output = m.VizOutput(
mode = m.VizMode.ASCII,
output_files_prefix = './viz_data/seed_' + str(SEED).zfill(5) + '/Scene',
every_n_timesteps = 1
)
model.add_viz_output(viz_output)
model.load_bngl(bngl_file, './react_data/seed_' + str(SEED).zfill(5) + '/', default_compartment)
cp = model.find_geometry_object('CP')
transp = m.SurfaceClass(
name = 'transp',
type = m.SurfacePropertyType.TRANSPARENT,
affected_complex_pattern = m.AllMolecules
)
model.add_surface_class(transp)
cp.surface_class = transp
# ---- configuration ----
model.config.time_step = TIME_STEP
model.config.seed = SEED
model.config.total_iterations = ITERATIONS
model.config.partition_dimension = MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
model.config.subpartition_dimension = MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
model.initialize()
if DUMP:
model.dump_internal_state()
if EXPORT_DATA_MODEL and model.viz_outputs:
model.export_data_model()
model.run_iterations(ITERATIONS)
model.end_simulation()
|
[
"os.path.join",
"mcell.bngl_utils.load_bngl_parameters",
"mcell.Model",
"os.environ.get",
"mcell.SurfaceClass",
"mcell.geometry_utils.create_box",
"sys.exit"
] |
[((59, 91), 'os.environ.get', 'os.environ.get', (['"""MCELL_PATH"""', '""""""'], {}), "('MCELL_PATH', '')\n", (73, 91), False, 'import os\n'), ((627, 671), 'mcell.bngl_utils.load_bngl_parameters', 'm.bngl_utils.load_bngl_parameters', (['bngl_file'], {}), '(bngl_file)\n', (660, 671), True, 'import mcell as m\n'), ((901, 910), 'mcell.Model', 'm.Model', ([], {}), '()\n', (908, 910), True, 'import mcell as m\n'), ((1742, 1856), 'mcell.SurfaceClass', 'm.SurfaceClass', ([], {'name': '"""transp"""', 'type': 'm.SurfacePropertyType.TRANSPARENT', 'affected_complex_pattern': 'm.AllMolecules'}), "(name='transp', type=m.SurfacePropertyType.TRANSPARENT,\n affected_complex_pattern=m.AllMolecules)\n", (1756, 1856), True, 'import mcell as m\n'), ((262, 273), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (270, 273), False, 'import sys\n'), ((1158, 1251), 'mcell.geometry_utils.create_box', 'm.geometry_utils.create_box', (['"""default_compartment"""', 'MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH'], {}), "('default_compartment',\n MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH)\n", (1185, 1251), True, 'import mcell as m\n'), ((127, 158), 'os.path.join', 'os.path.join', (['MCELL_PATH', '"""lib"""'], {}), "(MCELL_PATH, 'lib')\n", (139, 158), False, 'import os\n')]
|
#!/usr/bin/env python
import os
import re
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup # type: ignore
NAME = "contaxy"
MAIN_PACKAGE = NAME # Change if main package != NAME
DESCRIPTION = "Python package template."
URL = "https://github.com/ml-tooling/contaxy"
EMAIL = "<EMAIL>"
AUTHOR = "ML Tool<NAME>"
LICENSE = "MIT"
REQUIRES_PYTHON = ">=3.8"
VERSION = None # Only set version if you like to overwrite the version in _about.py
PWD = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
try:
with open(os.path.join(PWD, "README.md"), encoding="utf-8") as f:
long_description = f.read()
except FileNotFoundError:
long_description = ""
# Extract the version from the _about.py module.
if not VERSION:
try:
with open(os.path.join(PWD, "src", MAIN_PACKAGE, "_about.py")) as f: # type: ignore
VERSION = re.findall(r"__version__\s*=\s*\"(.+)\"", f.read())[0]
except FileNotFoundError:
VERSION = "0.0.0"
# Where the magic happens:
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
license=LICENSE,
packages=find_packages(where="src", exclude=("tests", "test", "examples", "docs")),
package_dir={"": "src"} if os.path.exists("src") else {},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
zip_safe=False,
install_requires=[
"typer", # TODO: remove typer?
"pydantic[dotenv,email]",
"fastapi",
"requests",
# Better logging
"loguru",
# Used for multipart stream parsing in file manager
"streaming_form_data",
"filetype",
"addict",
],
# deprecated: dependency_links=dependency_links,
extras_require={
"server": [
# Add all the runtime requirements here:
"kubernetes",
"docker",
# TODO: Dev only - timing
"fastapi-utils",
# Required by fastapi.security OAuth2PasswordBearer & fastapi.UploadFile for example
"python-multipart",
"psutil",
"uvicorn",
"sqlalchemy>=1.4.0",
# Postgres Driver
"psycopg2",
# Generates concise, unambiguous, URL-safe UUIDs.
"shortuuid",
# Create slugs from unicode strings
"python-slugify",
# Used in MinioFileManager
"minio",
# Used in AzureBlobFileManager
"azure-storage-blob",
# Used for jwt handling
"python-jose[cryptography]",
# Used for password hashing
"passlib[bcrypt]",
# TODO: FOR in-memory dict db: Merge dictionaries via json merge patch
"json-merge-patch",
# TODO: FOR in-memory dict db: Merge dictionaries via json merge patch
"jsonpath-ng",
# TODO: Improve
"jinja2",
# Used for OIDC handling
"requests_oauthlib",
# Create fake data for testing
"faker",
],
"dev": [
"setuptools",
"wheel",
"twine",
"flake8",
"pytest",
"pytest-mock",
"pytest-cov",
"mypy",
"types-python-slugify",
"types-requests",
"types-cachetools",
"black",
"pydocstyle",
"isort",
"lazydocs",
"locust",
# Test profiling
"pyinstrument",
# Export profiling information about the tests
"pytest-profiling",
# For better print debugging via debug
"devtools[pygments]",
# For Jupyter Kernel support
"ipykernel",
# TODO: Move to required when necessary
"universal-build",
"requests",
],
},
include_package_data=True,
package_data={
# If there are data files included in your packages that need to be
# 'sample': ['package_data.dat'],
"contaxy.api.endpoints": ["templates/*"]
},
classifiers=[
# TODO: Update based on https://pypi.org/classifiers/
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
],
project_urls={
"Changelog": URL + "/releases",
"Issue Tracker": URL + "/issues",
"Documentation": URL + "#documentation",
"Source": URL,
},
entry_points={"console_scripts": [f"{NAME}={MAIN_PACKAGE}._cli:cli"]},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
)
|
[
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"glob.glob",
"os.path.join",
"setuptools.find_packages"
] |
[((517, 542), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (532, 542), False, 'import os\n'), ((1382, 1455), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""', 'exclude': "('tests', 'test', 'examples', 'docs')"}), "(where='src', exclude=('tests', 'test', 'examples', 'docs'))\n", (1395, 1455), False, 'from setuptools import find_packages, setup\n'), ((620, 650), 'os.path.join', 'os.path.join', (['PWD', '"""README.md"""'], {}), "(PWD, 'README.md')\n", (632, 650), False, 'import os\n'), ((1488, 1509), 'os.path.exists', 'os.path.exists', (['"""src"""'], {}), "('src')\n", (1502, 1509), False, 'import os\n'), ((857, 908), 'os.path.join', 'os.path.join', (['PWD', '"""src"""', 'MAIN_PACKAGE', '"""_about.py"""'], {}), "(PWD, 'src', MAIN_PACKAGE, '_about.py')\n", (869, 908), False, 'import os\n'), ((1575, 1591), 'glob.glob', 'glob', (['"""src/*.py"""'], {}), "('src/*.py')\n", (1579, 1591), False, 'from glob import glob\n'), ((1544, 1558), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (1552, 1558), False, 'from os.path import basename, splitext\n')]
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
This module exposes an Optimizer wrapper to get regular tf.train.Optimizers to
allow for selecting the slots FP precision independently of the variable type.
Currently only supports Adam
"""
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.training.optimizer import _var_key
from tensorflow.python.training import slot_creator
from tensorflow.python.training.adam import AdamOptimizer
from typing import Type
from logging import getLogger
tf.disable_v2_behavior()
tf.disable_eager_execution()
logger = getLogger(os.path.basename(__file__))
def SelectableSlotFPFormatOptimizer(cls: Type[tf.train.Optimizer]) -> Type[tf.train.Optimizer]:
if not issubclass(cls, AdamOptimizer):
raise ValueError(f'Class {cls} does not inherit from tf.python.training.adam.AdamOptimizer')
class Wrapped(cls):
def __init__(self, slots_dtype, force_fp32_weight_update=True, use_nesterov=False, *args, **kwargs):
self.slots_dtype = tf.as_dtype(slots_dtype)
self.use_nesterov = use_nesterov
self.force_fp32_weight_update = force_fp32_weight_update
super(Wrapped, self).__init__(*args, **kwargs)
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
This is effectively a copy of the original TF optimizer method
excepts this one allows to pass a dtype to `create_zeros_slot`.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name,
dtype=self.slots_dtype)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return tf.cast(named_slots[_var_key(var)], var.dtype)
def _apply_weight_update(self, grad, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, use_nesterov):
if self.force_fp32_weight_update:
# Cast to fp32 for extra precision
weight_update_dtype = tf.float32
else:
weight_update_dtype = var.dtype
# cast all variables to the same desired dtype for the update
m_c = tf.convert_to_tensor(tf.cast(m, weight_update_dtype))
v_c = tf.convert_to_tensor(tf.cast(v, weight_update_dtype))
var_c = tf.cast(var, weight_update_dtype)
lr_c = tf.cast(lr, weight_update_dtype)
beta1_power_c = tf.cast(beta1_power, weight_update_dtype)
beta2_power_c = tf.cast(beta2_power, weight_update_dtype)
beta1_c = tf.cast(beta1, weight_update_dtype)
beta2_c = tf.cast(beta2, weight_update_dtype)
epsilon_c = tf.cast(epsilon, weight_update_dtype)
grad_c = tf.cast(grad, weight_update_dtype)
# correct for the bias of the first and second order moments
alpha = lr_c * math_ops.sqrt(1 - beta2_power_c) / (1 - beta1_power_c)
# update the first order moment
m_t = beta1_c * m_c + (1.0 - beta1_c) * grad_c
# update the second order moment
v_t = beta2_c * v_c + (1.0 - beta2_c) * grad_c * grad_c
# store the moments in the right dtype
assign_m = tf.assign(m, tf.cast(m_t, self.slots_dtype))
assign_v = tf.assign(v, tf.cast(v_t, self.slots_dtype))
# update the variable
with tf.control_dependencies([assign_m, assign_v]):
if use_nesterov:
return tf.cast(var_c - ((grad_c * (1.0 - beta1_c) + beta1_c * m_t) * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
else:
return tf.cast(var_c - (m_t * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return var.assign(
self._apply_weight_update(
grad=grad,
var=var,
m=m,
v=v,
beta1_power=beta1_power,
beta2_power=beta2_power,
lr=self._lr_t,
beta1=self._beta1_t,
beta2=self._beta2_t,
epsilon=self._epsilon_t,
use_nesterov=self.use_nesterov))
return Wrapped
|
[
"tensorflow.compat.v1.cast",
"tensorflow.python.training.optimizer._var_key",
"os.path.basename",
"tensorflow.python.training.slot_creator.create_zeros_slot",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.as_dtype"
] |
[((565, 589), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (587, 589), True, 'import tensorflow.compat.v1 as tf\n'), ((590, 618), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (616, 618), True, 'import tensorflow.compat.v1 as tf\n'), ((639, 665), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (655, 665), False, 'import os\n'), ((1074, 1098), 'tensorflow.compat.v1.as_dtype', 'tf.as_dtype', (['slots_dtype'], {}), '(slots_dtype)\n', (1085, 1098), True, 'import tensorflow.compat.v1 as tf\n'), ((2962, 2995), 'tensorflow.compat.v1.cast', 'tf.cast', (['var', 'weight_update_dtype'], {}), '(var, weight_update_dtype)\n', (2969, 2995), True, 'import tensorflow.compat.v1 as tf\n'), ((3015, 3047), 'tensorflow.compat.v1.cast', 'tf.cast', (['lr', 'weight_update_dtype'], {}), '(lr, weight_update_dtype)\n', (3022, 3047), True, 'import tensorflow.compat.v1 as tf\n'), ((3076, 3117), 'tensorflow.compat.v1.cast', 'tf.cast', (['beta1_power', 'weight_update_dtype'], {}), '(beta1_power, weight_update_dtype)\n', (3083, 3117), True, 'import tensorflow.compat.v1 as tf\n'), ((3146, 3187), 'tensorflow.compat.v1.cast', 'tf.cast', (['beta2_power', 'weight_update_dtype'], {}), '(beta2_power, weight_update_dtype)\n', (3153, 3187), True, 'import tensorflow.compat.v1 as tf\n'), ((3210, 3245), 'tensorflow.compat.v1.cast', 'tf.cast', (['beta1', 'weight_update_dtype'], {}), '(beta1, weight_update_dtype)\n', (3217, 3245), True, 'import tensorflow.compat.v1 as tf\n'), ((3268, 3303), 'tensorflow.compat.v1.cast', 'tf.cast', (['beta2', 'weight_update_dtype'], {}), '(beta2, weight_update_dtype)\n', (3275, 3303), True, 'import tensorflow.compat.v1 as tf\n'), ((3328, 3365), 'tensorflow.compat.v1.cast', 'tf.cast', (['epsilon', 'weight_update_dtype'], {}), '(epsilon, weight_update_dtype)\n', (3335, 3365), True, 'import tensorflow.compat.v1 as tf\n'), ((3387, 3421), 'tensorflow.compat.v1.cast', 'tf.cast', (['grad', 'weight_update_dtype'], {}), '(grad, weight_update_dtype)\n', (3394, 3421), True, 'import tensorflow.compat.v1 as tf\n'), ((1897, 1910), 'tensorflow.python.training.optimizer._var_key', '_var_key', (['var'], {}), '(var)\n', (1905, 1910), False, 'from tensorflow.python.training.optimizer import _var_key\n'), ((1967, 2035), 'tensorflow.python.training.slot_creator.create_zeros_slot', 'slot_creator.create_zeros_slot', (['var', 'op_name'], {'dtype': 'self.slots_dtype'}), '(var, op_name, dtype=self.slots_dtype)\n', (1997, 2035), False, 'from tensorflow.python.training import slot_creator\n'), ((2837, 2868), 'tensorflow.compat.v1.cast', 'tf.cast', (['m', 'weight_update_dtype'], {}), '(m, weight_update_dtype)\n', (2844, 2868), True, 'import tensorflow.compat.v1 as tf\n'), ((2909, 2940), 'tensorflow.compat.v1.cast', 'tf.cast', (['v', 'weight_update_dtype'], {}), '(v, weight_update_dtype)\n', (2916, 2940), True, 'import tensorflow.compat.v1 as tf\n'), ((3882, 3912), 'tensorflow.compat.v1.cast', 'tf.cast', (['m_t', 'self.slots_dtype'], {}), '(m_t, self.slots_dtype)\n', (3889, 3912), True, 'import tensorflow.compat.v1 as tf\n'), ((3950, 3980), 'tensorflow.compat.v1.cast', 'tf.cast', (['v_t', 'self.slots_dtype'], {}), '(v_t, self.slots_dtype)\n', (3957, 3980), True, 'import tensorflow.compat.v1 as tf\n'), ((4034, 4079), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', (['[assign_m, assign_v]'], {}), '([assign_m, assign_v])\n', (4057, 4079), True, 'import tensorflow.compat.v1 as tf\n'), ((2284, 2297), 'tensorflow.python.training.optimizer._var_key', '_var_key', (['var'], {}), '(var)\n', (2292, 2297), False, 'from tensorflow.python.training.optimizer import _var_key\n'), ((2359, 2372), 'tensorflow.python.training.optimizer._var_key', '_var_key', (['var'], {}), '(var)\n', (2367, 2372), False, 'from tensorflow.python.training.optimizer import _var_key\n'), ((3523, 3555), 'tensorflow.python.ops.math_ops.sqrt', 'math_ops.sqrt', (['(1 - beta2_power_c)'], {}), '(1 - beta2_power_c)\n', (3536, 3555), False, 'from tensorflow.python.ops import math_ops\n'), ((4213, 4231), 'tensorflow.python.ops.math_ops.sqrt', 'math_ops.sqrt', (['v_t'], {}), '(v_t)\n', (4226, 4231), False, 'from tensorflow.python.ops import math_ops\n'), ((4339, 4357), 'tensorflow.python.ops.math_ops.sqrt', 'math_ops.sqrt', (['v_t'], {}), '(v_t)\n', (4352, 4357), False, 'from tensorflow.python.ops import math_ops\n')]
|
from contextlib import contextmanager
from pathlib import Path
import os
from typing import Callable, NamedTuple, Type
@contextmanager
def work_dir(dir_path: Path):
"""
Path('.') will change.
"""
org_dir_path = Path(os.getcwd())
os.chdir(dir_path)
try:
yield
finally:
os.chdir(org_dir_path)
@contextmanager
def after_end(cb_fun: Callable):
"""
with after_end(cb_fun) as cb_fun:
...
with after_end(cb_fun=lambda: shutil.rmtree(temp_dir)) as _: # make sure the temp_dir will remove after finished.
...
with after_end(cb_fun=lambda: [os.remove(file) for file in [_ for _ in work_dir.glob('*.*') if _.suffix[1:] in ('html',)]]) as _
...
"""
try:
yield cb_fun
finally:
cb_fun()
def init_namedtuple(init_func_name):
"""
Run the job when the class is born.
USAGE::
@init_namedtuple('init_xxx')
class MyClass(NamedTuple):
def init_xxx(self):
...
"""
def wrap(class_obj: Type[NamedTuple]):
def new_instance(*args, **kwargs):
instance_obj = class_obj(*args, **kwargs)
init_func = getattr(instance_obj, init_func_name)
if init_func:
init_func()
return instance_obj
return new_instance
return wrap
|
[
"os.getcwd",
"os.chdir"
] |
[((251, 269), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (259, 269), False, 'import os\n'), ((234, 245), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (243, 245), False, 'import os\n'), ((314, 336), 'os.chdir', 'os.chdir', (['org_dir_path'], {}), '(org_dir_path)\n', (322, 336), False, 'import os\n')]
|
# coding=utf-8
#python 3getdataEveryGroup.py ./data/data1234.xlsx ./data/data5.xlsx ./data_11_4/
from langconv import Converter
import pandas as pd
import csv
import math
import re
import argparse
def rmSymbol(sent):
return re.sub("|/\n", "", sent)
'''
input : 4個同組的question
output : [[][]
[][]
[][]] 6個question pair
'''
class Combination:
def combine(self, text, n, k):
res = []
self.backtrack(text, n, k, res, [], 1)
return res
def backtrack(self, text, n, k, res, path, index):
if len(path) == k:
res.append(path)
return
for i in range(index, n + 1):
self.backtrack(text, n, k, res, path + [text[i - 1]], i + 1)
if __name__ == '__main__':
#give number of negative sample 1 : 1000 => 6 * 1000 * 999 = 5994000
#取1/3 = 1998000
#取1/3 = 666000 這樣是6000 : 666000 = 1 : 100
numOfNegSample = 666000
parser = argparse.ArgumentParser()
parser.add_argument("input_file")
parser.add_argument("input_file2")
parser.add_argument("output_directory")
args = parser.parse_args()
data = pd.read_excel(args.input_file, sheet_name='Sheet1', header=0)
data2 = pd.read_excel(args.input_file2, sheet_name='Sheet1', header=0)
n = len(data)
m = len(data2)
#n = 4000
#5% 會生成neg數量:60,0000 == (4000 * 0.05 = 200) * 3 * 1000
#2% 會生成neg數量:24,0000 == (4000 * 0.02 = 80) * 3 * 1000. 但dev為 * 4 * 1000 = 32000
#0.2% 會生成neg數量:2,4000 == (4000 * 0.002 = 8) * 3 * 1000
#0.2% 會生成neg數量:1,2000
#0.00025 總共生成4000(每一個問題類一個)
perc = min(3996, math.floor(n * 0.001))
print("negative sample number %d" % perc)
allGroup = [] #for negative sample
with open(args.output_directory + '/train.csv','w', newline='', encoding="utf8") as tCSV, \
open(args.output_directory + '/dev.csv','w', newline='', encoding="utf8") as dCSV:
tCSVW = csv.writer(tCSV, lineterminator='\n')
dCSVW = csv.writer(dCSV, lineterminator='\n')
for i in range(0, n, 4):
curGroup = []
for j in range(4):
index = i + j
curGroup.append(data['測試題'][index])
allGroup.append(curGroup)
#create QuestionPair
combination = Combination()
curQuestionPairs = combination.combine(curGroup, 3, 2)
#add postitive Question pair to .csv
for index, QuestionPair in enumerate(curQuestionPairs):
tCSVW.writerow(
[1,
rmSymbol(QuestionPair[0]),
rmSymbol(QuestionPair[1])])
#add postitive Question pair to .dev
for k in range(0, 3, 1):
dCSVW.writerow(
[1, rmSymbol(curGroup[3]),
rmSymbol(curGroup[k])])
#At last, add negative Question pair to .csv
if i == n - 4:
for firstIndex in range(len(allGroup)):
otherData = data[
data['正確標準問題'] != data['正確標準問題'][firstIndex * 4]]
otherQuestion = otherData['測試題']
for firstEle in range(3):
otherQuestion = otherQuestion.sample(n=perc)
for negativeQuestion in otherQuestion.values:
tCSVW.writerow([
0,
rmSymbol(allGroup[firstIndex][firstEle]),
rmSymbol(negativeQuestion)
])
for firstEleForDev in range(4):
otherQuestionForDev = otherQuestion.sample(n=perc)
for negativeQuestionForDev in otherQuestionForDev.values:
dCSVW.writerow([
0,
rmSymbol(allGroup[firstIndex][firstEleForDev]),
rmSymbol(negativeQuestionForDev)
])
##write to test.csv
# s = 0
# with open(args.output_directory + '/test.csv',
# 'w',
# newline='',
# encoding="utf8") as testCSV:
# testCSVW = csv.writer(testCSV, lineterminator='\n')
# for i in range(m):
# for j in range(n):
# testCSVW.writerow(
# [0, rmSymbol(data2['測試題'][i]),
# rmSymbol(data['測試題'][j])])
##for 查看
# s = s + 1
# if s > 4002:
# break
# if s > 4002:
# break
|
[
"csv.writer",
"argparse.ArgumentParser",
"math.floor",
"pandas.read_excel",
"re.sub"
] |
[((232, 256), 're.sub', 're.sub', (['"""|/\n"""', '""""""', 'sent'], {}), "('|/\\n', '', sent)\n", (238, 256), False, 'import re\n'), ((946, 971), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (969, 971), False, 'import argparse\n'), ((1136, 1197), 'pandas.read_excel', 'pd.read_excel', (['args.input_file'], {'sheet_name': '"""Sheet1"""', 'header': '(0)'}), "(args.input_file, sheet_name='Sheet1', header=0)\n", (1149, 1197), True, 'import pandas as pd\n'), ((1210, 1272), 'pandas.read_excel', 'pd.read_excel', (['args.input_file2'], {'sheet_name': '"""Sheet1"""', 'header': '(0)'}), "(args.input_file2, sheet_name='Sheet1', header=0)\n", (1223, 1272), True, 'import pandas as pd\n'), ((1607, 1628), 'math.floor', 'math.floor', (['(n * 0.001)'], {}), '(n * 0.001)\n', (1617, 1628), False, 'import math\n'), ((1922, 1959), 'csv.writer', 'csv.writer', (['tCSV'], {'lineterminator': '"""\n"""'}), "(tCSV, lineterminator='\\n')\n", (1932, 1959), False, 'import csv\n'), ((1976, 2013), 'csv.writer', 'csv.writer', (['dCSV'], {'lineterminator': '"""\n"""'}), "(dCSV, lineterminator='\\n')\n", (1986, 2013), False, 'import csv\n')]
|
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify, render_template, json, redirect, url_for
from flask_cors import CORS
from pymongo import MongoClient # 몽고디비
import requests # 서버 요청 패키지
import os
from pprint import pprint
import hashlib
import jwt
import datetime
from urllib.parse import parse_qsl
KAKAO_REDIRECT_URI = 'https://www.mysmallmeal.shop/redirect'
application = Flask(__name__)
application.config['TEMPLATES_AUTO_RELOAD'] = True
cors = CORS(application, resources={r"/*": {"origins": "*"}})
if application.env == 'development':
os.popen('mongod')
KAKAO_REDIRECT_URI = 'http://localhost:5000/redirect'
# 배포 전에 원격 db로 교체!
client = MongoClient(os.environ.get("DB_PATH"))
os.environ['JWT_KEY'] = 'JARYOGOOJO'
SECRET_KEY = os.environ.get("JWT_KEY")
client_id = 'b702be3ada9cbd8f018e7545d0eb4a8d'
db = client.dbGoojo
restaurant_col = db.restaurant
bookmarked_col = db.bookmark
users = db.users
members = db.members
print(client.address)
# sort_list = 기본 정렬(랭킹순), 별점 순, 리뷰 수, 최소 주문 금액순, 거리 순, 배달 보증 시간순
sort_list = ["rank", "review_avg", "review_count", "min_order_value", "distance"]
order = sort_list[0]
headers = {'accept': 'application/json', 'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/x-www-form-urlencoded',
'referer': 'https://www.yogiyo.co.kr/mobile/',
'sec-ch-ua': '"Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/94.0.4606.71 Safari/537.36',
'x-apikey': 'iphoneap', 'x-apisecret': X_API_SECRET}
@application.route('/')
def hello_world(): # put application's code here
return render_template("index.html")
@application.route('/login')
def login():
msg = request.args.get("msg")
return render_template('login.html', ID=client_id, URI=KAKAO_REDIRECT_URI, msg=msg)
@application.route('/register')
def register():
return render_template('register.html')
@application.route('/kakao_login')
def kakao_login():
return render_template('kakao_login.html')
@application.route('/api/login', methods=['POST'])
def api_login():
request.form = json.loads(request.data)
pprint(request.form)
email_receive = request.form['email']
password = request.form['pw']
# 회원가입 때와 같은 방법으로 pw를 암호화합니다.
hashed_pw = hashlib.sha256(password.encode('utf-8')).hexdigest()
# id, 암호화된 pw 을 가지고 해당 유저를 찾습니다.
result = members.find_one({'email': email_receive, 'pw': hashed_pw}, {"_id": False})
# 찾으면 JWT 토큰을 만들어 발급합니다.
if result:
pprint(result)
# JWT 토큰에는, payload 와 시크릿키가 필요합니다.
# 시크릿키가 있어야 토큰을 디코딩(=풀기) 해서 payload 값을 볼 수 있습니다.
# 아래에선 id와 exp 를 담았습니다. 즉, JWT 토큰을 풀면 유저 ID 값을 알 수 있습니다.
# exp 에는 만료시간을 넣어줍니다. 만료시간이 지나면, 시크릿키로 토큰을 풀 때 만료되었다고 에러가 납니다.
nickname_receive = result['nick']
payload = {
'email': email_receive,
'nick': nickname_receive,
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=3)
}
token = jwt.encode(payload=payload, key=SECRET_KEY, algorithm='HS256')
pprint(payload)
# token 을 줍니다.
return jsonify({'result': 'success', 'token': token})
# 찾지 못하면
else:
return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})
@application.route('/api/register', methods=['POST'])
def api_register():
request.form = json.loads(request.data)
email_receive = request.form['email']
password = request.form['pw']
nickname = request.form['nickname']
uuid = request.form['uuid']
pprint(request.form)
print('api_register uuid', uuid)
hashed_pw = hashlib.sha256(password.encode('utf-8')).hexdigest()
user_exists = bool(members.find_one({"email": email_receive}))
if user_exists:
return jsonify({'result': 'fail', 'msg': '같은 이메일의 유저가 존재합니다.'})
find_member = members.find_one({"email": email_receive}, {"_id": False})
if not find_member:
user = {
'provider': 'mysmallmeal',
'email': email_receive,
'pw': hashed_pw,
'nick': nickname,
'uuid': uuid,
}
pprint(user)
members.update_one({"email": email_receive}, {"$set": user}, upsert=True)
return jsonify({'result': 'success', 'user': nickname, 'msg': '가입이 완료되었습니다.'})
return jsonify({'result': 'fail', 'msg': '가입에 실패했습니다.'})
@application.route('/api/valid', methods=['GET'])
def api_valid():
"""
try 아래를 실행했다가, 에러가 있으면 except 구분으로 가란 얘기입니다.
token 을 시크릿키로 디코딩합니다.
보실 수 있도록 payload 를 print 해두었습니다. 우리가 로그인 시 넣은 그 payload 와 같은 것이 나옵니다.
payload 안에 id가 들어있습니다. 이 id로 유저정보를 찾습니다.
여기에선 그 예로 닉네임을 보내주겠습니다.
:return:
"""
token_receive = request.args.get('token')
try:
payload = jwt.decode(token_receive, key=SECRET_KEY, algorithms=['HS256'])
pprint(payload)
# find_member = members.find_one({'email': payload['email']}, {'_id': 0})
return jsonify({'result': 'success', 'nickname': payload['nick']})
except jwt.ExpiredSignatureError:
print("ExpiredSignatureError:: 로그인 시간이 만료되었습니다!")
return redirect(url_for("login", msg="login timeout"))
except jwt.exceptions.DecodeError:
print("DecodeError:: 로그인 정보가 없습니다!")
return redirect(url_for("login", msg="Cannot Login!"))
@application.route('/redirect')
def kakao_redirect():
# code 가져 오기
qs = dict(parse_qsl(request.query_string))
code = qs.get(b'code').decode('utf-8')
# 토큰요청
url = 'https://kauth.kakao.com/oauth/token'
body = {
"grant_type": "authorization_code",
"client_id": client_id,
"redirect_uri": KAKAO_REDIRECT_URI,
"code": code
}
token_header = {'Content-Type': 'application/x-www-form-urlencoded;charset=urf-8'}
req = requests.post(url=url, headers=token_header, data=body).json()
pprint(req)
# 사용자 정보
url = 'https://kapi.kakao.com/v2/user/me'
info_header = {'Authorization': f'Bearer {req["access_token"]}',
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'}
user_info = requests.post(url, headers=info_header).json()
print(user_info)
kakao_account = user_info.get('kakao_account')
email = kakao_account.get('email')
user_id = user_info.get('id')
prop = user_info.get('properties')
nickname = "Guest"
if prop:
nickname = prop.get('nickname')
profile = prop.get("thumbnail_image")
print(nickname, profile)
user = {
'providerId': user_id,
'nick': nickname,
'provider': 'kakao',
'age': kakao_account.get('age_range')
}
print(user)
# db에 저장
members.update({'email': email},
{"$set": user}, upsert=True)
# jwt 토큰 발급
payload = {
'id': user_id,
'nick': nickname,
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=3)
}
token = jwt.encode(payload=payload, key=SECRET_KEY, algorithm='HS256')
# kakaoLogin 리다이렉트
return redirect(url_for("kakao_login",
token=token, providerId=user_id, email=email, nickname=nickname))
@application.route('/api/like', methods=['POST'])
def like():
"""
메인 로직 중 하나입니다. 웬만하면 건드리지 말기..!
사용자의 id와 점포의 id를 POST 요청의 바디에 담아와,
db에 해당하는 유저가 존재하는 지 확인하고, 있을 경우,
1. 좋아요를 클릭한 경우 점포를 restaurants DB 에도 등록하고, 사용자의 점포 리스트에도 등록한다.
2. 싫어요를 클릭한 경우 점포를 restaurants DB 에서 제외한다.\n
:return: Response(json)
"""
request.form = json.loads(request.data)
uuid = request.form.get('uuid') # uuid
_id = request.form.get('_id') # ssid
action = request.form.get('action')
min_order = request.form.get('min_order')
user = users.find_one({"uuid": uuid})
pprint(request.form)
put_restaurant(_id, min_order)
if action == 'like':
if not user:
good_list = [_id]
users.insert_one({"_id": uuid, "uuid": uuid, "like_list": good_list})
elif _id in user['like_list']:
pass
else:
good_list = user['like_list']
good_list.append(_id)
users.update_one({"_id": uuid, "uuid": uuid}, {"$set": {"like_list": good_list}}, upsert=True)
elif user and _id in user['like_list']:
good_list = user['like_list']
good_list.remove(_id)
users.update_one({"_id": uuid, "uuid": uuid}, {"$set": {"like_list": good_list}}, upsert=True)
return jsonify(user)
@application.route('/api/like', methods=['GET'])
def show_bookmark():
"""
사용자의 uuid 를 조회해 좋아요한 상품들의 리스트를 불러온다.
* 추가할 내용 restaurants DB 에서 해당 상품들 조회해 오기\n
:return: Response(json)
"""
uuid = request.args.get('uuid')
user = users.find_one({"uuid": uuid})
good_list = []
if user:
good_list = user['like_list']
restaurants = []
for restaurant in good_list:
rest = list(bookmarked_col.find({"_id": restaurant}))
if len(rest) > 0:
restaurants.extend(rest)
return jsonify({"user": user, "restaurants": restaurants})
@application.route('/api/shop', methods=['GET'])
def get_restaurant():
"""
위치 권한 허용 시 셋팅되는 기본 메소드. 요기요 서버에 사용자의 위도와 경도를 보내 주변 배달 점포를 조회해서
필요한 데이터만 가공해서 리스트 형태로 프론트 엔드에 넘긴다.\n
:return: Response(json)
"""
lat = request.args.get('lat')
long = request.args.get('lng')
global order
order = request.args.get('order')
if not order:
order = "rank"
url = f'https://www.yogiyo.co.kr/api/v1/restaurants-geo/?category=1인분주문&items=99&lat={lat}&lng={long}&order={order}'
res = requests.get(url, headers=headers).json()
shops = res.get('restaurants')
restaurants = list()
for shop in shops:
rest = dict()
if not bool(int(shop["phone"])):
continue
rest['_id'] = shop.get('id')
rest['name'] = shop.get('name')
rest['reviews'] = shop.get('review_count')
rest['owner'] = shop.get('owner_reply_count')
rest['categories'] = shop.get('categories')
rest['image'] = shop.get('thumbnail_url')
rest['logo'] = shop.get('logo_url')
rest['address'] = shop.get('address')
rest['rating'] = shop.get('review_avg')
rest['time'] = f"{shop.get('begin')[:5]} - {shop.get('end')[:5]}"
rest['min_order'] = shop.get('min_order_amount')
rest['lng'] = shop.get('lng')
rest['lat'] = shop.get('lat')
rest['phone'] = shop.get('phone')
restaurants.append(rest)
restaurant_col.update_one({"_id": shop['id']}, {"$set": rest}, upsert=True)
pprint(restaurants[0])
return jsonify(restaurants)
@application.route('/api/detail', methods=["GET"])
def show_modal():
_id = request.args.get('_id')
restaurant = bookmarked_col.find_one({"_id": int(_id)})
return jsonify(restaurant)
@application.route('/api/address', methods=["POST"])
def search_add():
data = request.get_data()
query = json.loads(data, encoding='utf-8')['query']
# query = request.json.get('query')
return jsonify(search_address(query))
#
#
# @application.route('api/weather', methods=["GET"])
# def declare_weather():
# weather_code = request.args.get('code')
# image_format = request.args.get('size')
# # result = weather.get_weather(code=weather_code, size=image_format)
# return jsonify({'result': result})
def put_restaurant(_id, min_order):
"""
즐겨찾기 버튼을 클릭한 점포를 데이터베이스에 저장합니다.
:param _id: 요기요 데이터베이스 상점 id
:param min_order: 최소 주문금액
:return: None
"""
if list(bookmarked_col.find({"_id": _id})):
return
url = 'https://www.yogiyo.co.kr/api/v1/restaurants/' + str(_id)
req = requests.post(url, headers=headers)
result = req.json()
print(result)
doc = {
"_id": _id,
"time": result.get("open_time_description"),
"phone": result.get("phone"),
"name": result.get("name"),
"categories": result.get("categories"),
"delivery": result.get("estimated_delivery_time"),
"address": result.get("address"),
"image": result.get("background_url"),
"min_order": min_order,
'lat': result.get("lat"),
'lng': result.get("lng"),
}
bookmarked_col.update_one({"_id": _id}, {"$set": doc}, upsert=True)
def search_address(query):
"""
사용자가 검색 창에 직접 주소를 입력했을 때, 카카오맵 api 를 통해 주소를 위도경도로 변환합니다.\n
:param query: 찾고자 하는 주소
:return: doc(dict) {
address: 찾고자 하는 주소 도로명 주소,
lat: 찾고자 하는 지역의 x좌표,
long: 찾고자 하는 지역의 y 좌표
}
"""
url = 'https://dapi.kakao.com/v2/local/search/address.json?query=' + query
_header = {
'Host': 'dapi.kakao.com',
'Authorization': 'KakaoAK <KEY>'}
req = requests.get(url, headers=_header)
result = req.json()
pprint(result)
documents = result['documents'][0]
address = documents['address_name']
lat = documents['y']
lng = documents['x']
doc = {
"address": address,
"lat": lat,
"long": lng
}
return doc
if __name__ == '__main__':
application.debug = True
application.run(port=8000, debug=True)
|
[
"flask.request.form.get",
"flask_cors.CORS",
"os.popen",
"jwt.encode",
"datetime.datetime.utcnow",
"flask.jsonify",
"flask.url_for",
"pprint.pprint",
"requests.post",
"jwt.decode",
"flask.request.args.get",
"datetime.timedelta",
"flask.render_template",
"requests.get",
"flask.request.get_data",
"flask.json.loads",
"urllib.parse.parse_qsl",
"flask.Flask",
"os.environ.get"
] |
[((394, 409), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (399, 409), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((468, 521), 'flask_cors.CORS', 'CORS', (['application'], {'resources': "{'/*': {'origins': '*'}}"}), "(application, resources={'/*': {'origins': '*'}})\n", (472, 521), False, 'from flask_cors import CORS\n'), ((759, 784), 'os.environ.get', 'os.environ.get', (['"""JWT_KEY"""'], {}), "('JWT_KEY')\n", (773, 784), False, 'import os\n'), ((564, 582), 'os.popen', 'os.popen', (['"""mongod"""'], {}), "('mongod')\n", (572, 582), False, 'import os\n'), ((682, 707), 'os.environ.get', 'os.environ.get', (['"""DB_PATH"""'], {}), "('DB_PATH')\n", (696, 707), False, 'import os\n'), ((1997, 2026), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2012, 2026), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2081, 2104), 'flask.request.args.get', 'request.args.get', (['"""msg"""'], {}), "('msg')\n", (2097, 2104), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2116, 2192), 'flask.render_template', 'render_template', (['"""login.html"""'], {'ID': 'client_id', 'URI': 'KAKAO_REDIRECT_URI', 'msg': 'msg'}), "('login.html', ID=client_id, URI=KAKAO_REDIRECT_URI, msg=msg)\n", (2131, 2192), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2254, 2286), 'flask.render_template', 'render_template', (['"""register.html"""'], {}), "('register.html')\n", (2269, 2286), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2354, 2389), 'flask.render_template', 'render_template', (['"""kakao_login.html"""'], {}), "('kakao_login.html')\n", (2369, 2389), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2479, 2503), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (2489, 2503), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((2508, 2528), 'pprint.pprint', 'pprint', (['request.form'], {}), '(request.form)\n', (2514, 2528), False, 'from pprint import pprint\n'), ((3738, 3762), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3748, 3762), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((3915, 3935), 'pprint.pprint', 'pprint', (['request.form'], {}), '(request.form)\n', (3921, 3935), False, 'from pprint import pprint\n'), ((4690, 4739), 'flask.jsonify', 'jsonify', (["{'result': 'fail', 'msg': '가입에 실패했습니다.'}"], {}), "({'result': 'fail', 'msg': '가입에 실패했습니다.'})\n", (4697, 4739), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((5080, 5105), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (5096, 5105), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((6231, 6242), 'pprint.pprint', 'pprint', (['req'], {}), '(req)\n', (6237, 6242), False, 'from pprint import pprint\n'), ((7294, 7356), 'jwt.encode', 'jwt.encode', ([], {'payload': 'payload', 'key': 'SECRET_KEY', 'algorithm': '"""HS256"""'}), "(payload=payload, key=SECRET_KEY, algorithm='HS256')\n", (7304, 7356), False, 'import jwt\n'), ((7871, 7895), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (7881, 7895), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((7907, 7931), 'flask.request.form.get', 'request.form.get', (['"""uuid"""'], {}), "('uuid')\n", (7923, 7931), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((7950, 7973), 'flask.request.form.get', 'request.form.get', (['"""_id"""'], {}), "('_id')\n", (7966, 7973), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((7995, 8021), 'flask.request.form.get', 'request.form.get', (['"""action"""'], {}), "('action')\n", (8011, 8021), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((8038, 8067), 'flask.request.form.get', 'request.form.get', (['"""min_order"""'], {}), "('min_order')\n", (8054, 8067), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((8114, 8134), 'pprint.pprint', 'pprint', (['request.form'], {}), '(request.form)\n', (8120, 8134), False, 'from pprint import pprint\n'), ((8807, 8820), 'flask.jsonify', 'jsonify', (['user'], {}), '(user)\n', (8814, 8820), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((9036, 9060), 'flask.request.args.get', 'request.args.get', (['"""uuid"""'], {}), "('uuid')\n", (9052, 9060), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((9363, 9414), 'flask.jsonify', 'jsonify', (["{'user': user, 'restaurants': restaurants}"], {}), "({'user': user, 'restaurants': restaurants})\n", (9370, 9414), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((9650, 9673), 'flask.request.args.get', 'request.args.get', (['"""lat"""'], {}), "('lat')\n", (9666, 9673), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((9685, 9708), 'flask.request.args.get', 'request.args.get', (['"""lng"""'], {}), "('lng')\n", (9701, 9708), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((9738, 9763), 'flask.request.args.get', 'request.args.get', (['"""order"""'], {}), "('order')\n", (9754, 9763), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((10937, 10959), 'pprint.pprint', 'pprint', (['restaurants[0]'], {}), '(restaurants[0])\n', (10943, 10959), False, 'from pprint import pprint\n'), ((10971, 10991), 'flask.jsonify', 'jsonify', (['restaurants'], {}), '(restaurants)\n', (10978, 10991), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((11073, 11096), 'flask.request.args.get', 'request.args.get', (['"""_id"""'], {}), "('_id')\n", (11089, 11096), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((11168, 11187), 'flask.jsonify', 'jsonify', (['restaurant'], {}), '(restaurant)\n', (11175, 11187), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((11272, 11290), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (11288, 11290), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((12031, 12066), 'requests.post', 'requests.post', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (12044, 12066), False, 'import requests\n'), ((13084, 13118), 'requests.get', 'requests.get', (['url'], {'headers': '_header'}), '(url, headers=_header)\n', (13096, 13118), False, 'import requests\n'), ((13147, 13161), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (13153, 13161), False, 'from pprint import pprint\n'), ((2886, 2900), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (2892, 2900), False, 'from pprint import pprint\n'), ((3374, 3436), 'jwt.encode', 'jwt.encode', ([], {'payload': 'payload', 'key': 'SECRET_KEY', 'algorithm': '"""HS256"""'}), "(payload=payload, key=SECRET_KEY, algorithm='HS256')\n", (3384, 3436), False, 'import jwt\n'), ((3445, 3460), 'pprint.pprint', 'pprint', (['payload'], {}), '(payload)\n', (3451, 3460), False, 'from pprint import pprint\n'), ((3499, 3545), 'flask.jsonify', 'jsonify', (["{'result': 'success', 'token': token}"], {}), "({'result': 'success', 'token': token})\n", (3506, 3545), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((3584, 3642), 'flask.jsonify', 'jsonify', (["{'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'}"], {}), "({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})\n", (3591, 3642), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((4144, 4200), 'flask.jsonify', 'jsonify', (["{'result': 'fail', 'msg': '같은 이메일의 유저가 존재합니다.'}"], {}), "({'result': 'fail', 'msg': '같은 이메일의 유저가 존재합니다.'})\n", (4151, 4200), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((4497, 4509), 'pprint.pprint', 'pprint', (['user'], {}), '(user)\n', (4503, 4509), False, 'from pprint import pprint\n'), ((4607, 4678), 'flask.jsonify', 'jsonify', (["{'result': 'success', 'user': nickname, 'msg': '가입이 완료되었습니다.'}"], {}), "({'result': 'success', 'user': nickname, 'msg': '가입이 완료되었습니다.'})\n", (4614, 4678), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((5133, 5196), 'jwt.decode', 'jwt.decode', (['token_receive'], {'key': 'SECRET_KEY', 'algorithms': "['HS256']"}), "(token_receive, key=SECRET_KEY, algorithms=['HS256'])\n", (5143, 5196), False, 'import jwt\n'), ((5205, 5220), 'pprint.pprint', 'pprint', (['payload'], {}), '(payload)\n', (5211, 5220), False, 'from pprint import pprint\n'), ((5318, 5377), 'flask.jsonify', 'jsonify', (["{'result': 'success', 'nickname': payload['nick']}"], {}), "({'result': 'success', 'nickname': payload['nick']})\n", (5325, 5377), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((5771, 5802), 'urllib.parse.parse_qsl', 'parse_qsl', (['request.query_string'], {}), '(request.query_string)\n', (5780, 5802), False, 'from urllib.parse import parse_qsl\n'), ((7400, 7491), 'flask.url_for', 'url_for', (['"""kakao_login"""'], {'token': 'token', 'providerId': 'user_id', 'email': 'email', 'nickname': 'nickname'}), "('kakao_login', token=token, providerId=user_id, email=email,\n nickname=nickname)\n", (7407, 7491), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((11303, 11337), 'flask.json.loads', 'json.loads', (['data'], {'encoding': '"""utf-8"""'}), "(data, encoding='utf-8')\n", (11313, 11337), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((6164, 6219), 'requests.post', 'requests.post', ([], {'url': 'url', 'headers': 'token_header', 'data': 'body'}), '(url=url, headers=token_header, data=body)\n', (6177, 6219), False, 'import requests\n'), ((6473, 6512), 'requests.post', 'requests.post', (['url'], {'headers': 'info_header'}), '(url, headers=info_header)\n', (6486, 6512), False, 'import requests\n'), ((7220, 7246), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7244, 7246), False, 'import datetime\n'), ((7249, 7275), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (7267, 7275), False, 'import datetime\n'), ((9936, 9970), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (9948, 9970), False, 'import requests\n'), ((3292, 3318), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3316, 3318), False, 'import datetime\n'), ((3321, 3347), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (3339, 3347), False, 'import datetime\n'), ((5498, 5535), 'flask.url_for', 'url_for', (['"""login"""'], {'msg': '"""login timeout"""'}), "('login', msg='login timeout')\n", (5505, 5535), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n'), ((5645, 5682), 'flask.url_for', 'url_for', (['"""login"""'], {'msg': '"""Cannot Login!"""'}), "('login', msg='Cannot Login!')\n", (5652, 5682), False, 'from flask import Flask, request, jsonify, render_template, json, redirect, url_for\n')]
|
import sys
def myinput():
return sys.stdin.readline()
N = int(myinput())
data = myinput().split()
dict = {
'L' : [0, -1],
'R' : [0, +1],
'U' : [-1, 0],
'D' : [+1, 0],
}
start = [1, 1]
for cmd in data:
next = [start[i] + dict[cmd][i] for i in range(2)]
# print(f'next is {next}')
if next[0] >= 1 and next[1] >= 1:
start = next
# print(f'start is {start}')
else: # out of map
pass
# print(f'start is {start}')
print(start)
'''
<Lesson learned>
list 길이 구하기 len(list)
size()나 length 같은거 아니다!
list 각 요소간의 합은 list comprehension 사용
list + list는 리스트 확장이다.
python의 비교 연산자는 &&이 아니라 and 이다
<Answer>
n = int(input())
x, y = 1, 1
plans = input().split()
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
move_types = ['L', 'R', 'U', 'D']
for plan in plans:
for i in range(len(move_types)):
if plan == move_types[i]:
nx = x + dx[i]
ny = y + dy[i]
if nx < 1 or ny < 1 or nx > n or ny > n:
continue
x, y = nx, ny
print(x, y)
'''
|
[
"sys.stdin.readline"
] |
[((38, 58), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (56, 58), False, 'import sys\n')]
|
import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import random
import math
import os
import time
from sklearn.metrics import average_precision_score
# ------------------------------------------------------ loading libraries ----
# --- setting random seed -----------------------------------------------------
seed_n = 42
np.random.seed(seed_n)
random.seed(seed_n)
tf.random.set_seed(seed_n)
combination = 3057
# loading model
model = tf.keras.models.load_model('/project/M-ABeICU176709/delirium/data/outputs/models/{:06d}/model.hdf5'.format(combination))
# loading data
X_adm_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_validation.pickle', 'rb'))
X_temp_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_validation.pickle', 'rb'))
y_12h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_validation.pickle', 'rb'))
y_24h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_validation.pickle', 'rb'))
# loading data
X_adm_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_train.pickle', 'rb'))
X_temp_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_train.pickle', 'rb'))
y_12h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_train.pickle', 'rb'))
y_24h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_train.pickle', 'rb'))
# -----------------------------------------------------------------------------
for set in [('train', X_adm_train, X_temp_train, y_12h_train, y_24h_train), ('validation', X_adm_val, X_temp_val, y_12h_val, y_24h_val)]:
# Predicting y_12h and y_24h
results = model.predict(x = [set[1], set[2]],
verbose = 0)
y_12h_hat = results[0]
y_24h_hat = results[1]
AUPRC_12h = average_precision_score(set[3], y_12h_hat)
AUPRC_24h = average_precision_score(set[4], y_24h_hat)
AUPRC_mean = (AUPRC_12h + AUPRC_24h) / 2
print(f'set: {set[0]}, AUPRC_12h: {AUPRC_12h}, AUPRC_24h: {AUPRC_24h}, AUPRC_mean: {AUPRC_mean}')
|
[
"tensorflow.random.set_seed",
"sklearn.metrics.average_precision_score",
"random.seed",
"numpy.random.seed"
] |
[((353, 375), 'numpy.random.seed', 'np.random.seed', (['seed_n'], {}), '(seed_n)\n', (367, 375), True, 'import numpy as np\n'), ((376, 395), 'random.seed', 'random.seed', (['seed_n'], {}), '(seed_n)\n', (387, 395), False, 'import random\n'), ((396, 422), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed_n'], {}), '(seed_n)\n', (414, 422), True, 'import tensorflow as tf\n'), ((2012, 2054), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['set[3]', 'y_12h_hat'], {}), '(set[3], y_12h_hat)\n', (2035, 2054), False, 'from sklearn.metrics import average_precision_score\n'), ((2071, 2113), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['set[4]', 'y_24h_hat'], {}), '(set[4], y_24h_hat)\n', (2094, 2113), False, 'from sklearn.metrics import average_precision_score\n')]
|
import pytest
from django.apps import apps
@pytest.mark.django_db
def test_models_passthrough(settings):
MyModel = apps.get_model("test_app.MyModel")
entered = "c++"
expected = "c"
m = MyModel(title=entered)
m.save()
assert m.django_extensions_slug == expected
@pytest.mark.django_db
def test_models(settings):
settings.SLUGIFY_PROCESSORS = ["test_app.coding.slugify_programming"]
MyModel = apps.get_model("test_app.MyModel")
entered = "c++"
expected = "cpp"
m = MyModel(title=entered)
m.save()
assert m.django_extensions_slug == expected
|
[
"django.apps.apps.get_model"
] |
[((122, 156), 'django.apps.apps.get_model', 'apps.get_model', (['"""test_app.MyModel"""'], {}), "('test_app.MyModel')\n", (136, 156), False, 'from django.apps import apps\n'), ((430, 464), 'django.apps.apps.get_model', 'apps.get_model', (['"""test_app.MyModel"""'], {}), "('test_app.MyModel')\n", (444, 464), False, 'from django.apps import apps\n')]
|
"""
Settings specific to development environments
"""
from os import path
from settings.base import PROJECT_DIR, MIDDLEWARE_CLASSES, INSTALLED_APPS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_DIR, 'data', 'data.db'),
}
}
DEBUG = True
TEMPLATE_DEBUG = True
SITE_ID = 1
INCLUDE_DOMAIN = 'localhost:8000'
INCLUDE_URL = INCLUDE_DOMAIN + '/include/'
STATIC_URL = '/static/'
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar,
}
INTERNAL_IPS = ('127.0.0.1', '10.0.1.3',)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INSTALLED_APPS = INSTALLED_APPS + [
'debug_toolbar',
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': path.join(PROJECT_DIR, 'cache'),
'TIMEOUT': 60 * 60 * 24 * 365
}
}
COMPRESS_ENABLED = True
|
[
"os.path.join"
] |
[((246, 287), 'os.path.join', 'path.join', (['PROJECT_DIR', '"""data"""', '"""data.db"""'], {}), "(PROJECT_DIR, 'data', 'data.db')\n", (255, 287), False, 'from os import path\n'), ((924, 955), 'os.path.join', 'path.join', (['PROJECT_DIR', '"""cache"""'], {}), "(PROJECT_DIR, 'cache')\n", (933, 955), False, 'from os import path\n')]
|
import carla
import random
from carla_painter import CarlaPainter
def do_something(data):
pass
def main():
try:
# initialize one painter
painter = CarlaPainter('localhost', 8089)
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.get_world()
# set synchronous mode
previous_settings = world.get_settings()
world.apply_settings(carla.WorldSettings(
synchronous_mode=True,
fixed_delta_seconds=1.0 / 30.0))
# randomly spawn an ego vehicle and several other vehicles
spawn_points = world.get_map().get_spawn_points()
blueprints_vehicles = world.get_blueprint_library().filter("vehicle.*")
ego_transform = spawn_points[random.randint(0, len(spawn_points) - 1)]
other_vehicles_transforms = []
for _ in range(3):
other_vehicles_transforms.append(spawn_points[random.randint(0, len(spawn_points) - 1)])
blueprints_vehicles = [x for x in blueprints_vehicles if int(x.get_attribute('number_of_wheels')) == 4]
# set ego vehicle's role name to let CarlaViz know this vehicle is the ego vehicle
blueprints_vehicles[0].set_attribute('role_name', 'ego') # or set to 'hero'
batch = [carla.command.SpawnActor(blueprints_vehicles[0], ego_transform).then(carla.command.SetAutopilot(carla.command.FutureActor, True))]
results = client.apply_batch_sync(batch, True)
if not results[0].error:
ego_vehicle = world.get_actor(results[0].actor_id)
else:
print('spawn ego error, exit')
ego_vehicle = None
return
other_vehicles = []
batch = []
for i in range(3):
batch.append(carla.command.SpawnActor(blueprints_vehicles[i + 1], other_vehicles_transforms[i]).then(carla.command.SetAutopilot(carla.command.FutureActor, True)))
# set autopilot for all these actors
ego_vehicle.set_autopilot(True)
results = client.apply_batch_sync(batch, True)
for result in results:
if not result.error:
other_vehicles.append(result.actor_id)
# attach a camera and a lidar to the ego vehicle
camera = None
blueprint_camera = world.get_blueprint_library().find('sensor.camera.rgb')
blueprint_camera.set_attribute('image_size_x', '640')
blueprint_camera.set_attribute('image_size_y', '480')
blueprint_camera.set_attribute('fov', '110')
blueprint_camera.set_attribute('sensor_tick', '0.1')
transform_camera = carla.Transform(carla.Location(y=+3.0, z=5.0))
camera = world.spawn_actor(blueprint_camera, transform_camera, attach_to=ego_vehicle)
camera.listen(lambda data: do_something(data))
lidar = None
blueprint_lidar = world.get_blueprint_library().find('sensor.lidar.ray_cast')
blueprint_lidar.set_attribute('range', '30')
blueprint_lidar.set_attribute('rotation_frequency', '10')
blueprint_lidar.set_attribute('channels', '32')
blueprint_lidar.set_attribute('lower_fov', '-30')
blueprint_lidar.set_attribute('upper_fov', '30')
blueprint_lidar.set_attribute('points_per_second', '56000')
transform_lidar = carla.Transform(carla.Location(x=0.0, z=5.0))
lidar = world.spawn_actor(blueprint_lidar, transform_lidar, attach_to=ego_vehicle)
lidar.listen(lambda data: do_something(data))
# tick to generate these actors in the game world
world.tick()
# save vehicles' trajectories to draw in the frontend
trajectories = [[]]
while (True):
world.tick()
ego_location = ego_vehicle.get_location()
trajectories[0].append([ego_location.x, ego_location.y, ego_location.z])
# draw trajectories
painter.draw_polylines(trajectories)
# draw ego vehicle's velocity just above the ego vehicle
ego_velocity = ego_vehicle.get_velocity()
velocity_str = "{:.2f}, ".format(ego_velocity.x) + "{:.2f}".format(ego_velocity.y) \
+ ", {:.2f}".format(ego_velocity.z)
painter.draw_texts([velocity_str],
[[ego_location.x, ego_location.y, ego_location.z + 10.0]], size=20)
finally:
if previous_settings is not None:
world.apply_settings(previous_settings)
if lidar is not None:
lidar.stop()
lidar.destroy()
if camera is not None:
camera.stop()
camera.destroy()
if ego_vehicle is not None:
ego_vehicle.destroy()
if other_vehicles is not None:
client.apply_batch([carla.command.DestroyActor(x) for x in other_vehicles])
if __name__ == "__main__":
main()
|
[
"carla_painter.CarlaPainter",
"carla.command.DestroyActor",
"carla.command.SpawnActor",
"carla.command.SetAutopilot",
"carla.WorldSettings",
"carla.Client",
"carla.Location"
] |
[((174, 205), 'carla_painter.CarlaPainter', 'CarlaPainter', (['"""localhost"""', '(8089)'], {}), "('localhost', 8089)\n", (186, 205), False, 'from carla_painter import CarlaPainter\n'), ((224, 255), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (236, 255), False, 'import carla\n'), ((434, 508), 'carla.WorldSettings', 'carla.WorldSettings', ([], {'synchronous_mode': '(True)', 'fixed_delta_seconds': '(1.0 / 30.0)'}), '(synchronous_mode=True, fixed_delta_seconds=1.0 / 30.0)\n', (453, 508), False, 'import carla\n'), ((2636, 2665), 'carla.Location', 'carla.Location', ([], {'y': '(+3.0)', 'z': '(5.0)'}), '(y=+3.0, z=5.0)\n', (2650, 2665), False, 'import carla\n'), ((3324, 3352), 'carla.Location', 'carla.Location', ([], {'x': '(0.0)', 'z': '(5.0)'}), '(x=0.0, z=5.0)\n', (3338, 3352), False, 'import carla\n'), ((1362, 1421), 'carla.command.SetAutopilot', 'carla.command.SetAutopilot', (['carla.command.FutureActor', '(True)'], {}), '(carla.command.FutureActor, True)\n', (1388, 1421), False, 'import carla\n'), ((1293, 1356), 'carla.command.SpawnActor', 'carla.command.SpawnActor', (['blueprints_vehicles[0]', 'ego_transform'], {}), '(blueprints_vehicles[0], ego_transform)\n', (1317, 1356), False, 'import carla\n'), ((1870, 1929), 'carla.command.SetAutopilot', 'carla.command.SetAutopilot', (['carla.command.FutureActor', '(True)'], {}), '(carla.command.FutureActor, True)\n', (1896, 1929), False, 'import carla\n'), ((4774, 4803), 'carla.command.DestroyActor', 'carla.command.DestroyActor', (['x'], {}), '(x)\n', (4800, 4803), False, 'import carla\n'), ((1782, 1868), 'carla.command.SpawnActor', 'carla.command.SpawnActor', (['blueprints_vehicles[i + 1]', 'other_vehicles_transforms[i]'], {}), '(blueprints_vehicles[i + 1],\n other_vehicles_transforms[i])\n', (1806, 1868), False, 'import carla\n')]
|
import numpy as np
def quotient(rri):
rri = np.array(rri)
L = len(rri) - 1
indices = np.where((rri[:L - 1] / rri[1:L] < 0.8) |
(rri[:L - 1] / rri[1:L] > 1.2) |
(rri[1:L] / rri[:L - 1] < 0.8) |
(rri[1:L] / rri[:L - 1] > 1.2))
return np.delete(rri, indices)
def moving_average(rri, order=3):
return _moving_function(rri, order, np.mean)
def moving_median(rri, order=3):
return _moving_function(rri, order, np.median)
def _moving_function(rri, order, func):
offset = int(order / 2)
filt_rri = np.array(rri.copy(), dtype=np.float64)
for i in range(offset, len(rri) - offset, 1):
filt_rri[i] = func(rri[i - offset:i + offset + 1])
return filt_rri
|
[
"numpy.where",
"numpy.array",
"numpy.delete"
] |
[((50, 63), 'numpy.array', 'np.array', (['rri'], {}), '(rri)\n', (58, 63), True, 'import numpy as np\n'), ((100, 243), 'numpy.where', 'np.where', (['((rri[:L - 1] / rri[1:L] < 0.8) | (rri[:L - 1] / rri[1:L] > 1.2) | (rri[1:L\n ] / rri[:L - 1] < 0.8) | (rri[1:L] / rri[:L - 1] > 1.2))'], {}), '((rri[:L - 1] / rri[1:L] < 0.8) | (rri[:L - 1] / rri[1:L] > 1.2) |\n (rri[1:L] / rri[:L - 1] < 0.8) | (rri[1:L] / rri[:L - 1] > 1.2))\n', (108, 243), True, 'import numpy as np\n'), ((321, 344), 'numpy.delete', 'np.delete', (['rri', 'indices'], {}), '(rri, indices)\n', (330, 344), True, 'import numpy as np\n')]
|
#Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
import boto3
from botocore.exceptions import ClientError
from os import environ
if __name__ == "__main__":
collectionId='MyCollection'
print('Attempting to delete collection ' + collectionId)
client=boto3.client('rekognition')
statusCode=''
try:
response=client.delete_collection(CollectionId=collectionId)
statusCode=response['StatusCode']
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print ('The collection ' + collectionId + ' was not found ')
else:
print ('Error other than Not Found occurred: ' + e.response['Error']['Message'])
statusCode=e.response['ResponseMetadata']['HTTPStatusCode']
print('Operation returned Status Code: ' + str(statusCode))
print('Done...')
|
[
"boto3.client"
] |
[((431, 458), 'boto3.client', 'boto3.client', (['"""rekognition"""'], {}), "('rekognition')\n", (443, 458), False, 'import boto3\n')]
|
import itertools
import os
import re
import sys
# scans files to construct an empirical prior
from bifs import BIFS
# numpy >= 1.17
from numpy.random import Generator, PCG64
import numpy as np
class RunningMean:
"""Accepts values one at a time and computes the mean and sd of all values seen so far.
The inputs are arrays, which must all have the same shape. Mean and sd are accumulated
separately for each cell in the array.
"""
def __init__(self, sd=True):
"""If sd is false do not accumulate second moment.
Clients should not request information related to the sd in that case.
"""
self.n = 0
self._second = sd # as in second moment
def observation(self, x):
"x is an array-like object which is considered a single observation"
self.n += 1
if self.n == 1:
self._mns = x
if self._second:
# ss will turn into a matrix later
self._ss = 0.0
else:
lastdelta = x-self._mns
self._mns += (lastdelta)/self.n
if self._second:
# element by element multiplication in next line
self._ss += lastdelta*(x-self._mns)
def mean(self):
"return array of means so far"
return self._mns
def sd(self):
"return array of sd so far"
# element by element square root
return np.sqrt(self._ss/(self.n-1))
class AbstractEmpiricalScanner:
""" This class consumes a list of images and computes statistics on them. Each statistic is computed separately for
each voxel, i.e. the result in the (2, 5) cell refers to all the (2, 5) cells in all the images (or their Fourier counterparts).
All images must have the same dimensions, and they should be aligned with each other
for the results to be meaningful.
The mean and sd of the modulus is always accumulated; values for the phase can be requested as well, as can the correlations between the
phase and modulus (again, at each point in Fourier space).
Finally, one can request a sample of the original voxels in image space.
Concrete classes provide particular ways to get images. They then pass the images to _statsAccumulate and,
optionally, _voxAccumulate (possibly different images for each) and call
_post when done. At that point, and only that point, are results available from self.modulus() and, if requested,
self.phase(), self.corr(), and self.voxels().
For backward compatility, self.mns, self.sds, and self.vox accessor return the mean and sd of self.modulus() and the voxels.
Don't rely on that in new code.
image_mask optionally indicates which areas of the image to ignore.
It must be a boolean array with the same shape as image files.
All voxels selected by image_mask are set to zero before doing BIFS processing.
The mask applies to the original image NOT to the fourier space version, which will
generally have non-0 values in the image_mask region.
It is the subclass responsibility to implement these semantics.
Note the "mask" here is not a mask in the numpy sense of a masked array, which
concerns missing values.
voxel sampling only considers the non-ignored regions, but the number sampled will be based on
the total voxel count before masking.
"""
def __init__(self, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False):
"""Setup for scan of images
if sampleFraction is >0 (and it should be <=1) then that fraction of the image voxels will be retained.
In that case, seed is used to set the random number generator.
If phase is true, accumulate statistics on the phase as well as the modulus.
If corr is true, accumulate statistics on the phase and its covariance with the modulus.
Covariance is on a cell by cell basis.
"""
self.sampleFraction = sampleFraction
self._modulus = RunningMean()
if phase or corr:
self._getPhase = True
self._phase = RunningMean()
if corr:
self._getcorr = True
self._xy = RunningMean(sd=False)
if sampleFraction>0:
self._voxels = []
self._rg = Generator(PCG64(seed))
self.masking = (image_mask is not None)
if self.masking:
self.image_mask = image_mask
self.image_keep = np.logical_not(image_mask)
self._benchmarkHdr = None # checks for consistent headers
self._mismatch = set() # holds keys that had a mismatch
self._bifs = BIFS()
def modulus(self)->RunningMean:
return self._modulus
def phase(self)->RunningMean:
return self._phase
def corr(self):
"Note we return the correlation matrix itself, not an accumulator"
return (self._xy.mean()-self._modulus.mean()*self._phase.mean())/ \
(self._modulus.sd()*self._phase.sd())
def voxels(self):
"return 1-d array sorted by intensity"
return self._voxels
def __getattr__(self, name):
## backward compatibility only
if name == "mns":
return self.modulus().mean()
if name == "sds":
return self.modulus().sd()
if name == "vox":
return self.voxels()
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
def _do_one(self, file):
"file is a path-like object. Read it in and accumulate information"
self._bifs.load_image_file(file)
if self.masking:
# dirty trick. But doesn't invalidate anything else in _bifs.
self._bifs._init_image[self.image_mask] = 0.0
self._modulus.observation(self._bifs.mod_image())
if self._getPhase:
self._phase.observation(self._bifs.phase_image())
if self._getcorr:
# next multiplication is element by element
self._xy.observation(self._bifs.phase_image()*self._bifs.mod_image())
if self.sampleFraction>0:
self._voxAccumulate(self._bifs.init_image())
hdr = self._bifs.read_imfile.header
if not self._benchmarkHdr:
# first header encountered
self._benchmarkHdr = hdr
# could not delete the following key
# it actually doesn't appear in the objects attributes
#del benchmarkHdr.__dict__['db_name'] # differences expected and no concern
else:
for key in self._benchmarkHdr:
if key == 'db_name':
continue
if key.startswith("scl_"):
# values were array(nan, dtype=float32) and I had no luck testing for them
# in various ways
continue
v1 = self._benchmarkHdr[key]
v2 = hdr[key]
if (v1 != v2).any():
self._mismatch.add(key)
def _voxAccumulate(self, m):
"""accumulate voxel values.
In the most likely case, the voxels are from image space while the empirical prior
is from k-space. So we provide seperate functions for the 2 values.
Calling this is pointless unless sampleFraction>0.
"""
# always base number sampled on the complete image size
nSamp = int(m.size*self.sampleFraction)
if self.masking:
self._voxels.append(self._rg.choice(m[self.image_keep], nSamp))
else:
# m.ravel is not an acceptable first argument to choice
# actually, it should have been np.ravel(m)
# m.flatten and the mask selection above both create copies, unfortunately
self._voxels.append(self._rg.choice(m.flatten(), nSamp))
def _statsPost(self):
"""
Finalize computation of voxel by voxel statistics for all images.
Call after all images have been seen.
Results returned as arrays self.mns and self.sds.
"""
# currently handled by RunningMean instances automatically
pass
def _voxPost(self):
"""
Finalize accumulated voxels.
"""
if self.sampleFraction>0:
self._voxels = np.concatenate(self._voxels)
self._voxels.sort()
def _post(self):
"wrap up all processing"
self._statsPost()
self._voxPost()
def nImages(self) -> int:
"number of images processed so far = number of files read unless error"
return self._modulus.n
class EmpiricalScanner(AbstractEmpiricalScanner):
"""Scan selected images on disk, ensuring they are alll compatible.
topDir path like object indicating where in the file system the scan should start
all subdirectories will be scanned recursively unless they are excluded.
matchFile <String> regular expression for the file name of image files we want.
Matching is on the file name only, not its full path.
exclude <String> optional regular expression. Any directory matching this pattern is excluded.
Any file that satisfies matchFile is excluded if it also matches exclude.
ostr A stream-like object that will receive routines notices of skipped files and statistics.
See AbstractEmpiricalScanner for sampleFraction, seed and image_mask.
The files are read in and converted to k-space. We compute the mean and sd of the k-space images,
and optionally accumulate voxels from the original image.
We also check that the headers are consistent. This works for .nii files, and may or may not for others.
"""
def __init__(self, sampleFraction=0, seed=85792359, topDir=".", matchFile="", exclude=None, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._topDir = topDir
self._matchRE = re.compile(matchFile, re.I)
if exclude:
self._excludeRE = re.compile(exclude, re.I)
else:
self._excludeRE = None
self.go(ostr=ostr)
def go(self, ostr=sys.stdout):
"""Actually perform the scan.
Note this is triggered by object initialization.
Repeated calls may not work.
ostr is an output stream
"""
for root, dirs, files in os.walk(self._topDir):
if self._excludeRE:
# avoid directories with our target case for whom we are trying to predict
iKill = [ i for i, d in zip(itertools.count(), dirs) if self._excludeRE.search(d)]
if iKill:
nKill = 0
for i in iKill:
i -= nKill
print("Skipping {}".format(dirs[i]), file=ostr)
del dirs[i-nKill]
nKill += 1
# look for files to import
if files:
for f in files:
if not self._matchRE.search(f):
continue
if self._excludeRE:
if self._excludeRE.search(f):
print("Skipping {}".format(f), file=ostr)
continue
self._do_one(os.path.join(root, f))
self._post()
class FeedScanner(AbstractEmpiricalScanner):
"""A scanner that accepts anything iterable as a list of file names to scan"""
def __init__(self, files, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._files = files
self.go(ostr=ostr)
def go(self):
for f in self._files:
self._do_one(f)
self._post()
|
[
"numpy.random.PCG64",
"numpy.logical_not",
"os.walk",
"itertools.count",
"bifs.BIFS",
"numpy.sqrt",
"os.path.join",
"numpy.concatenate",
"re.compile"
] |
[((1426, 1458), 'numpy.sqrt', 'np.sqrt', (['(self._ss / (self.n - 1))'], {}), '(self._ss / (self.n - 1))\n', (1433, 1458), True, 'import numpy as np\n'), ((4677, 4683), 'bifs.BIFS', 'BIFS', ([], {}), '()\n', (4681, 4683), False, 'from bifs import BIFS\n'), ((10015, 10042), 're.compile', 're.compile', (['matchFile', 're.I'], {}), '(matchFile, re.I)\n', (10025, 10042), False, 'import re\n'), ((10442, 10463), 'os.walk', 'os.walk', (['self._topDir'], {}), '(self._topDir)\n', (10449, 10463), False, 'import os\n'), ((4497, 4523), 'numpy.logical_not', 'np.logical_not', (['image_mask'], {}), '(image_mask)\n', (4511, 4523), True, 'import numpy as np\n'), ((8336, 8364), 'numpy.concatenate', 'np.concatenate', (['self._voxels'], {}), '(self._voxels)\n', (8350, 8364), True, 'import numpy as np\n'), ((10093, 10118), 're.compile', 're.compile', (['exclude', 're.I'], {}), '(exclude, re.I)\n', (10103, 10118), False, 'import re\n'), ((4340, 4351), 'numpy.random.PCG64', 'PCG64', (['seed'], {}), '(seed)\n', (4345, 4351), False, 'from numpy.random import Generator, PCG64\n'), ((11376, 11397), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (11388, 11397), False, 'import os\n'), ((10632, 10649), 'itertools.count', 'itertools.count', ([], {}), '()\n', (10647, 10649), False, 'import itertools\n')]
|
from django.core.management.base import BaseCommand
from django.utils import timezone
from datetime import timedelta, date
from django.contrib import messages
from skeleton.utils import get_current_season, get_site_season_start_end
from skeleton.models import Reading, Site, Farm, WeatherStation, Season
import os
import json
import requests
import re
# Get an instance of a logger
import logging
logger = logging.getLogger(__name__)
'''
From command line can just run 'python manage.py request_to_hortplus --stations=HAV'
'''
class Command(BaseCommand):
help = 'Requests data from hortplus'
def add_arguments(self, parser):
parser.add_argument('-P', '--purpose', type=str, help='One of process_readings or generate_eoy_data')
parser.add_argument('-s', '--serial', type=str, help='Hortplus serial number generated individually for a user')
parser.add_argument('-p', '--period', type=int, help='The number of records for the specified interval, counting backwards from now (unless a startdate provided)')
parser.add_argument('-d', '--startdate', type=str, help='The date to start providing data from. This forces the period to count forwards from this date. Format YYYY-MMDD')
parser.add_argument('-f', '--format', type=str, help='The format the resulting data should be provided as')
parser.add_argument('-i', '--interval', type=str, help='The type of weather data. H for hourly and D for daily.')
parser.add_argument('-t', '--stations', type=str, help='The list of weather station ids separated by a comma.')
parser.add_argument('-m', '--metvars', type=str, help='The list of weather variable and measurement type TD_M,RN_T combined with an underscore, separated by a comma.')
parser.add_argument('--sites', type=open, help='A list of sites to get request rainfall for.')
def handle(self, *args, **kwargs):
response_text = None
# get arguments from command line or use ones that will be done autoamtically
serial = kwargs['serial'] if kwargs['serial'] else os.getenv('HORTPLUS_JACK_KEY')
if kwargs['purpose'] is None:
data = {
'period': kwargs['period'], # 7
'format': kwargs['format'], # csv
'interval': kwargs['interval'], # D
'stations': kwargs['stations'], # HAV
'metvars' : kwargs['metvars'] # RN_T
}
# startdate is optional
if kwargs['startdate']:
data['startdate'] = kwargs['startdate']
response_text = post_request(data, serial)
elif kwargs['purpose'] == 'process_readings':
logger.info('Start processing of readings')
readings = None
if kwargs['sites']:
sites = kwargs['sites']
logger.info('Starting update of rainfall for sites that have just been uploaded and have a null rain reading.' + str(sites))
readings = Reading.objects.select_related('site__farm__weatherstation').filter(site__in=sites, rain__isnull=True, type=1)
else:
logger.info('Starting update of rainfall for all sites that have a null rain reading')
readings = Reading.objects.select_related('site__farm__weatherstation').filter(rain__isnull=True, type=1)
for reading in readings:
logger.debug('Reading object to process: ' + str(reading))
season = get_current_season()
dates = get_site_season_start_end(reading.site, season)
# If a site has only one reading we cannot calculate the previous reading date. A try block is the only way to catch this
try:
previous_reading = reading.get_previous_by_date(site=reading.site, type=1, date__range=(dates.period_from, dates.period_to))
except:
previous_reading = None
if previous_reading:
site = reading.site
farm = site.farm
weatherstation = farm.weatherstation
days = (reading.date - previous_reading.date).days - 1
logger.debug('Previous Reading:' + str(previous_reading))
logger.debug(days)
startdate = previous_reading.date + timedelta(days=1)
logger.debug('startdate' + str(startdate))
data = {
'period': days,
'startdate' : str(startdate),
'format' : 'csv',
'interval': 'D',
'stations': weatherstation.code,
'metvars' : 'RN_T'
}
response_text = post_request(data, serial)
lines = response_text.split("\n")
del lines[0]
rainfall = 0
for line in lines:
valid = re.search("^\w.*", line) # make sure we have a valid line to split
if valid:
fields = line.split(",")
if fields[3] != '-' and fields[3] != '.':
rainfall += float(fields[3])
logger.debug(str(rainfall))
reading.rain = round(rainfall, 1)
reading.save()
else:
logger.debug('No previous reading for site so cannot calculate a rain reading')
elif kwargs['purpose'] == 'generate_eoy_data':
rain_data = {} # Keyed by the weatherstation code and the value will be the sum of rain / 10 years
#current_rain_data = {}
start_dates = [] # 10 start dates starting at the 1st October of current year. Actually 2nd cause of the way API works
season = Season.objects.get(current_flag=True)
current_year = season.formatted_season_start_year
current_year_date = str(current_year) + '-10-02'
start_dates.append(current_year_date)
station = kwargs['stations']
logger.debug("Generating average rainfall for last 10 years back from " + current_year_date + " for station " + station)
for month in ['10','11','12','01','02','03','04','05','06']:
rain_data[month] = {
'avg' : 0,
'cur' : 0
}
x = 0
while x < 10:
year = (int(current_year) -1) - x
# Start Date will always be 1st of October of year we got for current.
date = str(year) + '-10-02'
start_dates.append(date)
x = x + 1
logger.debug('We will be getting rainfall data for ' + str(start_dates) + ' + 272 days')
# We will have the current year, and the previous 10 years in array
for start_date in start_dates:
data = {
'period': 272, # 272 days will take us to 30th of June (except leap years but don't need to be exact)
'startdate' : start_date,
'format' : 'csv',
'interval': 'D',
'stations': station,
'metvars' : 'RN_T'
}
response_text = post_request(data, serial)
lines = response_text.split("\n")
del lines[0]
for line in lines:
valid = re.search("^\w.*", line) # make sure we have a valid line to split
if valid:
fields = line.split(",")
station = fields[0]
start = fields[1]
split_start = start.split("-") # Split from date "2019-10-17 08:00:00"
month = split_start[1] # Month which is the key to our rain_data dict is the second part of date
rain = fields[3]
if rain != '-' and rain != '.':
if start_date == current_year_date:
rain_data[month]['cur'] += float(rain)
else:
rain_data[month]['avg'] += float(rain)
else:
logger.error("Unidentifiable value for rain of:" + rain)
return json.dumps(rain_data)
else:
logger.error('Unidentified purpose of requesting hortplus data')
'''
post_request
'''
def post_request(data, serial):
try:
r = requests.post('https://hortplus.metwatch.nz/index.php?pageID=wxn_wget_post&serial=' + serial, data=data)
logger.debug('data in request ' + str(data))
if r.status_code == 200:
logger.debug('response ' + str(r.text))
return r.text
else:
raise Exception("Error processing request:" + str(r.text))
except Exception as e:
messages.error(request, "Error: " + str(e))
|
[
"skeleton.utils.get_site_season_start_end",
"skeleton.utils.get_current_season",
"skeleton.models.Season.objects.get",
"json.dumps",
"skeleton.models.Reading.objects.select_related",
"datetime.timedelta",
"requests.post",
"re.search",
"os.getenv",
"logging.getLogger"
] |
[((408, 435), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (425, 435), False, 'import logging\n'), ((8725, 8838), 'requests.post', 'requests.post', (["('https://hortplus.metwatch.nz/index.php?pageID=wxn_wget_post&serial=' + serial\n )"], {'data': 'data'}), "(\n 'https://hortplus.metwatch.nz/index.php?pageID=wxn_wget_post&serial=' +\n serial, data=data)\n", (8738, 8838), False, 'import requests\n'), ((2076, 2106), 'os.getenv', 'os.getenv', (['"""HORTPLUS_JACK_KEY"""'], {}), "('HORTPLUS_JACK_KEY')\n", (2085, 2106), False, 'import os\n'), ((3489, 3509), 'skeleton.utils.get_current_season', 'get_current_season', ([], {}), '()\n', (3507, 3509), False, 'from skeleton.utils import get_current_season, get_site_season_start_end\n'), ((3534, 3581), 'skeleton.utils.get_site_season_start_end', 'get_site_season_start_end', (['reading.site', 'season'], {}), '(reading.site, season)\n', (3559, 3581), False, 'from skeleton.utils import get_current_season, get_site_season_start_end\n'), ((5949, 5986), 'skeleton.models.Season.objects.get', 'Season.objects.get', ([], {'current_flag': '(True)'}), '(current_flag=True)\n', (5967, 5986), False, 'from skeleton.models import Reading, Site, Farm, WeatherStation, Season\n'), ((8532, 8553), 'json.dumps', 'json.dumps', (['rain_data'], {}), '(rain_data)\n', (8542, 8553), False, 'import json\n'), ((2998, 3058), 'skeleton.models.Reading.objects.select_related', 'Reading.objects.select_related', (['"""site__farm__weatherstation"""'], {}), "('site__farm__weatherstation')\n", (3028, 3058), False, 'from skeleton.models import Reading, Site, Farm, WeatherStation, Season\n'), ((3257, 3317), 'skeleton.models.Reading.objects.select_related', 'Reading.objects.select_related', (['"""site__farm__weatherstation"""'], {}), "('site__farm__weatherstation')\n", (3287, 3317), False, 'from skeleton.models import Reading, Site, Farm, WeatherStation, Season\n'), ((4376, 4393), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4385, 4393), False, 'from datetime import timedelta, date\n'), ((5042, 5067), 're.search', 're.search', (['"""^\\\\w.*"""', 'line'], {}), "('^\\\\w.*', line)\n", (5051, 5067), False, 'import re\n'), ((7612, 7637), 're.search', 're.search', (['"""^\\\\w.*"""', 'line'], {}), "('^\\\\w.*', line)\n", (7621, 7637), False, 'import re\n')]
|
import argparse
import datetime
import mock
import pytest
from batch.etrade_csv_ingestor import EtradeIngestor
from batch.etrade_csv_ingestor import RowParserException
from stock_analysis.logic import order_history
class TestEtradeCsvIngestor(object):
def test_init(self):
batch = EtradeIngestor()
args = batch.arg_parser.parse_args(['--csv-path', './path/to/data.csv'])
assert args == argparse.Namespace(csv_path='./path/to/data.csv')
assert type(batch.order_logic) == order_history.OrderHistoryLogic
def test_run(self):
batch = EtradeIngestor()
csv_path = './path/to/data.csv'
mock_args = mock.Mock()
mock_args.csv_path = csv_path
mock_parsed_orders = mock.Mock()
with \
mock.patch.object(
batch.arg_parser, 'parse_args', return_value=mock_args
) as patch_parse_args, \
mock.patch.object(
batch, 'parse_orders_from_csv', return_value=mock_parsed_orders
) as patch_parse_orders, \
mock.patch.object(batch.order_logic, 'add_orders') as patch_add_orders:
batch.run()
assert patch_parse_args.called
assert patch_parse_orders.call_args_list == [mock.call(csv_path)]
assert patch_add_orders.call_args_list == [mock.call(mock_parsed_orders)]
def test_parse_orders_from_csv(self):
csv_path = '/path/to/csv'
mock_reader = mock.Mock()
mock_orders = mock.Mock()
batch = EtradeIngestor()
with \
mock.patch('builtins.open') as patch_open,\
mock.patch('csv.reader', return_value=mock_reader) as patch_reader,\
mock.patch.object(
batch, 'parse_orders_from_csv_reader', return_value=mock_orders
) as patch_parse_orders:
orders = batch.parse_orders_from_csv(csv_path)
assert patch_open.call_args_list == [mock.call(csv_path)]
assert patch_reader.called
assert patch_parse_orders.call_args_list == [mock.call(mock_reader)]
assert orders == mock_orders
def test_parse_orders_from_csv_reader(self):
batch = EtradeIngestor()
# the reader is reader in a loop (i.e., for row in reader:)
reader = [
'06/12/17,Bought,EQ,NFLX,19,-2924.87,153.6799,4.95,NETFLIX COM INC'.split(','),
'06/08/17,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
]
orders = batch.parse_orders_from_csv_reader(reader)
assert set(orders) == set([
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 12).date(),
19,
153.6799
),
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 8).date(),
39, 151.9
),
])
def test_extract_order_from_row_skips_malformed_row(self):
batch = EtradeIngestor()
row = 'MALFORMED_DATE,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(',')
with pytest.raises(RowParserException):
batch.extract_order_from_row(row)
def test_extract_order_from_row_ignores_non_bought_txn_type(self):
batch = EtradeIngestor()
row = '06/12/17,UKNOWN_TXN_TYPE,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(',')
order = batch.extract_order_from_row(row)
assert order is None
def test_parse_orders_from_csv_reader_skips_malformed_lines(self):
batch = EtradeIngestor()
# the reader is reader in a loop (i.e., for row in reader:)
reader = [
'06/12/17,Bought,EQ,NFLX,19,-2924.87,153.6799,4.95,NETFLIX COM INC'.split(','),
'06/08/17,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
'MALFORMED_DATE,Bought,EQ,NFLX,39,-2924.46,151.9,4.95,NETFLIX COM INC'.split(','),
]
orders = batch.parse_orders_from_csv_reader(reader)
assert set(orders) == set([
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 12).date(),
19,
153.6799
),
order_history.Order(
batch.user_id,
order_history.BUY_ORDER_TYPE,
'NFLX',
datetime.datetime(2017, 6, 8).date(),
39, 151.9
),
])
|
[
"argparse.Namespace",
"mock.patch.object",
"batch.etrade_csv_ingestor.EtradeIngestor",
"mock.call",
"mock.patch",
"datetime.datetime",
"pytest.raises",
"mock.Mock"
] |
[((297, 313), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (311, 313), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((583, 599), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (597, 599), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((660, 671), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (669, 671), False, 'import mock\n'), ((739, 750), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (748, 750), False, 'import mock\n'), ((1498, 1509), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1507, 1509), False, 'import mock\n'), ((1532, 1543), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1541, 1543), False, 'import mock\n'), ((1560, 1576), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (1574, 1576), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((2254, 2270), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (2268, 2270), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((3215, 3231), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (3229, 3231), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((3511, 3527), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (3525, 3527), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((3795, 3811), 'batch.etrade_csv_ingestor.EtradeIngestor', 'EtradeIngestor', ([], {}), '()\n', (3809, 3811), False, 'from batch.etrade_csv_ingestor import EtradeIngestor\n'), ((418, 467), 'argparse.Namespace', 'argparse.Namespace', ([], {'csv_path': '"""./path/to/data.csv"""'}), "(csv_path='./path/to/data.csv')\n", (436, 467), False, 'import argparse\n'), ((782, 855), 'mock.patch.object', 'mock.patch.object', (['batch.arg_parser', '"""parse_args"""'], {'return_value': 'mock_args'}), "(batch.arg_parser, 'parse_args', return_value=mock_args)\n", (799, 855), False, 'import mock\n'), ((933, 1020), 'mock.patch.object', 'mock.patch.object', (['batch', '"""parse_orders_from_csv"""'], {'return_value': 'mock_parsed_orders'}), "(batch, 'parse_orders_from_csv', return_value=\n mock_parsed_orders)\n", (950, 1020), False, 'import mock\n'), ((1095, 1145), 'mock.patch.object', 'mock.patch.object', (['batch.order_logic', '"""add_orders"""'], {}), "(batch.order_logic, 'add_orders')\n", (1112, 1145), False, 'import mock\n'), ((1608, 1635), 'mock.patch', 'mock.patch', (['"""builtins.open"""'], {}), "('builtins.open')\n", (1618, 1635), False, 'import mock\n'), ((1668, 1718), 'mock.patch', 'mock.patch', (['"""csv.reader"""'], {'return_value': 'mock_reader'}), "('csv.reader', return_value=mock_reader)\n", (1678, 1718), False, 'import mock\n'), ((1753, 1840), 'mock.patch.object', 'mock.patch.object', (['batch', '"""parse_orders_from_csv_reader"""'], {'return_value': 'mock_orders'}), "(batch, 'parse_orders_from_csv_reader', return_value=\n mock_orders)\n", (1770, 1840), False, 'import mock\n'), ((3342, 3375), 'pytest.raises', 'pytest.raises', (['RowParserException'], {}), '(RowParserException)\n', (3355, 3375), False, 'import pytest\n'), ((1292, 1311), 'mock.call', 'mock.call', (['csv_path'], {}), '(csv_path)\n', (1301, 1311), False, 'import mock\n'), ((1368, 1397), 'mock.call', 'mock.call', (['mock_parsed_orders'], {}), '(mock_parsed_orders)\n', (1377, 1397), False, 'import mock\n'), ((2006, 2025), 'mock.call', 'mock.call', (['csv_path'], {}), '(csv_path)\n', (2015, 2025), False, 'import mock\n'), ((2123, 2145), 'mock.call', 'mock.call', (['mock_reader'], {}), '(mock_reader)\n', (2132, 2145), False, 'import mock\n'), ((2796, 2826), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(12)'], {}), '(2017, 6, 12)\n', (2813, 2826), False, 'import datetime\n'), ((3045, 3074), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(8)'], {}), '(2017, 6, 8)\n', (3062, 3074), False, 'import datetime\n'), ((4432, 4462), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(12)'], {}), '(2017, 6, 12)\n', (4449, 4462), False, 'import datetime\n'), ((4681, 4710), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(8)'], {}), '(2017, 6, 8)\n', (4698, 4710), False, 'import datetime\n')]
|
#!/usr/bin/env python3
"""Sends out a message to a selected group of Google Hangouts contacts."""
import time
import pyautogui
def auto_message(name, message):
"""Searches for friend on Google Hangouts and messages them."""
print("Make sure the Google Hangout 'Conversations' page is visible and "
"your cursor is not currently on the page.")
time.sleep(3)
search_bar = pyautogui.locateOnScreen('search.png')
pyautogui.click(search_bar)
pyautogui.typewrite(name)
time.sleep(1)
online_select = pyautogui.locateOnScreen('online-friend.png')
if online_select is None:
print('Friend not found or currently offline.')
return
else:
pyautogui.doubleClick(online_select)
attempts = 3
while attempts > 0:
message_box = pyautogui.locateOnScreen('message.png')
pyautogui.click(message_box)
pyautogui.typewrite(message)
# If it can no longer be found it is because the message was entered.
if pyautogui.locateOnScreen('message.png') is None:
pyautogui.press('enter')
pyautogui.press('esc')
print('Message sent to {}'.format(name))
break
else:
if attempts == 1:
print('Unable to send message to {}.'.format(name))
pyautogui.press('esc')
else:
print('Sending message to {} failed. Another {} attempts will '
'be made before moving on.'.format(name, attempts))
attempts -= 1
print('Enter the contacts you wish to send a message to (e.g. Bob, Bill):')
send_to = input().split(',')
print('Enter the message you wish to send out to them:')
to_send = input()
for contact in send_to:
user = contact.strip()
auto_message(user, to_send)
|
[
"pyautogui.typewrite",
"pyautogui.press",
"time.sleep",
"pyautogui.locateOnScreen",
"pyautogui.click",
"pyautogui.doubleClick"
] |
[((369, 382), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (379, 382), False, 'import time\n'), ((401, 439), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['"""search.png"""'], {}), "('search.png')\n", (425, 439), False, 'import pyautogui\n'), ((444, 471), 'pyautogui.click', 'pyautogui.click', (['search_bar'], {}), '(search_bar)\n', (459, 471), False, 'import pyautogui\n'), ((476, 501), 'pyautogui.typewrite', 'pyautogui.typewrite', (['name'], {}), '(name)\n', (495, 501), False, 'import pyautogui\n'), ((506, 519), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (516, 519), False, 'import time\n'), ((541, 586), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['"""online-friend.png"""'], {}), "('online-friend.png')\n", (565, 586), False, 'import pyautogui\n'), ((706, 742), 'pyautogui.doubleClick', 'pyautogui.doubleClick', (['online_select'], {}), '(online_select)\n', (727, 742), False, 'import pyautogui\n'), ((807, 846), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['"""message.png"""'], {}), "('message.png')\n", (831, 846), False, 'import pyautogui\n'), ((855, 883), 'pyautogui.click', 'pyautogui.click', (['message_box'], {}), '(message_box)\n', (870, 883), False, 'import pyautogui\n'), ((892, 920), 'pyautogui.typewrite', 'pyautogui.typewrite', (['message'], {}), '(message)\n', (911, 920), False, 'import pyautogui\n'), ((1011, 1050), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['"""message.png"""'], {}), "('message.png')\n", (1035, 1050), False, 'import pyautogui\n'), ((1072, 1096), 'pyautogui.press', 'pyautogui.press', (['"""enter"""'], {}), "('enter')\n", (1087, 1096), False, 'import pyautogui\n'), ((1109, 1131), 'pyautogui.press', 'pyautogui.press', (['"""esc"""'], {}), "('esc')\n", (1124, 1131), False, 'import pyautogui\n'), ((1331, 1353), 'pyautogui.press', 'pyautogui.press', (['"""esc"""'], {}), "('esc')\n", (1346, 1353), False, 'import pyautogui\n')]
|
from dependency.status import Status
from subprocess import run, Popen, PIPE
class Installer:
"""Installer class which chooses from the package manager,
then installs package.
"""
def __init__(self):
self._stat = Status()
def _apt(self, pkg):
"""Installs the required package with apt package manager
:param pkg: str package name
"""
self._stat.status(pkg, 'install')
run(['sudo', 'apt', 'install', pkg])
def _snap(self, pkg, oth=None):
"""Installs the required package with snap manager.
:param pkg: str package name
:param oth: str OPTIONAL classic
"""
if oth == 'classic':
self._stat.status(pkg, 'install')
run(['sudo', 'snap', 'install', pkg, '--' + oth])
else:
self._stat.status(pkg, 'install')
run(['sudo', 'snap', 'install', pkg])
def _deb(self, oth):
"""Checks if gDebi is installed, and installs
the required package with gDebi manager
:param oth: str .deb package link
"""
installer = Popen('dpkg -l gdebi', shell=True, stdout=PIPE)
installer.wait()
if installer.returncode == 1:
self._stat.status('gdebi', 'install')
self.install('gdebi')
pkg_name = oth.rsplit('/', 1)[-1]
self._stat.status(pkg_name, 'install')
run(['wget', oth])
run(['sudo', 'gdebi', pkg_name, '-n'])
run(['rm', '-rf', pkg_name])
def _repo(self, pkg, oth):
"""Adds the required repository and installs the package.
:param pkg: str package name
:param oth: str repository
"""
name = oth.rsplit(':', 1)[-1]
self._stat.status(name, 'add')
run(['sudo', 'add-apt-repository', '-y', oth])
self.update()
self.install(['apt', pkg])
def update(self):
"""Method that updates the system.
"""
self._stat.status('system', 'update')
run(['sudo', 'apt', 'update', '-y'])
def install(self, packages):
"""Calls the required package manager method.
:param packages: list package properties
"""
mgr = packages[0].lower()
pkg = packages[1].lower() if len(packages) >= 2 else None
oth = packages[2] if len(packages) == 3 else None
if mgr == 'apt':
self._apt(pkg)
elif mgr == 'snap':
self._snap(pkg, oth)
elif mgr == 'deb':
self._deb(oth)
elif mgr == 'repo':
self._repo(pkg, oth)
|
[
"dependency.status.Status",
"subprocess.run",
"subprocess.Popen"
] |
[((239, 247), 'dependency.status.Status', 'Status', ([], {}), '()\n', (245, 247), False, 'from dependency.status import Status\n'), ((440, 476), 'subprocess.run', 'run', (["['sudo', 'apt', 'install', pkg]"], {}), "(['sudo', 'apt', 'install', pkg])\n", (443, 476), False, 'from subprocess import run, Popen, PIPE\n'), ((1115, 1162), 'subprocess.Popen', 'Popen', (['"""dpkg -l gdebi"""'], {'shell': '(True)', 'stdout': 'PIPE'}), "('dpkg -l gdebi', shell=True, stdout=PIPE)\n", (1120, 1162), False, 'from subprocess import run, Popen, PIPE\n'), ((1408, 1426), 'subprocess.run', 'run', (["['wget', oth]"], {}), "(['wget', oth])\n", (1411, 1426), False, 'from subprocess import run, Popen, PIPE\n'), ((1435, 1473), 'subprocess.run', 'run', (["['sudo', 'gdebi', pkg_name, '-n']"], {}), "(['sudo', 'gdebi', pkg_name, '-n'])\n", (1438, 1473), False, 'from subprocess import run, Popen, PIPE\n'), ((1482, 1510), 'subprocess.run', 'run', (["['rm', '-rf', pkg_name]"], {}), "(['rm', '-rf', pkg_name])\n", (1485, 1510), False, 'from subprocess import run, Popen, PIPE\n'), ((1779, 1825), 'subprocess.run', 'run', (["['sudo', 'add-apt-repository', '-y', oth]"], {}), "(['sudo', 'add-apt-repository', '-y', oth])\n", (1782, 1825), False, 'from subprocess import run, Popen, PIPE\n'), ((2015, 2051), 'subprocess.run', 'run', (["['sudo', 'apt', 'update', '-y']"], {}), "(['sudo', 'apt', 'update', '-y'])\n", (2018, 2051), False, 'from subprocess import run, Popen, PIPE\n'), ((752, 801), 'subprocess.run', 'run', (["['sudo', 'snap', 'install', pkg, '--' + oth]"], {}), "(['sudo', 'snap', 'install', pkg, '--' + oth])\n", (755, 801), False, 'from subprocess import run, Popen, PIPE\n'), ((874, 911), 'subprocess.run', 'run', (["['sudo', 'snap', 'install', pkg]"], {}), "(['sudo', 'snap', 'install', pkg])\n", (877, 911), False, 'from subprocess import run, Popen, PIPE\n')]
|
import math
import time
import numpy
import scipy.stats as stats
import utils as u
# Easy part : calculate number of gifts from house number
# part 1 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
def get_number_of_gifts(house_number):
result = 0
return 10 * sigma(house_number)
# store a little bunch of sigmas: for every prime number under 100
# sigma(x) = x + 1 (a prime number only divides itself and 1)
sigmas = {prime: prime + 1 for prime in u.PRIME_NUMBERS}
def sigma(number):
"""sum of divisors of number"""
# smartness #1: if we know it already, return it
if number in sigmas:
# print(f"smartness # we know already sigma({number})")
return sigmas[number]
# preparation for smartness #3: prime numbers are nice for sigma
is_probably_prime = True
# smartness #2: sigma is a multiplicative function:
# if math.gcd(x,y) == 1 then sigma(x * y) = sigma(x) * sigma(y)
# so we try to use memoization to avoid a huge loop when possible
for x in range(2, number // 2):
# if x does not divide number, continue:
if number % x != 0:
continue
# if we are here, we found a divider to number, it's not a prime
is_probably_prime = False
y = number // x
# if we cannot use the multiplicativeness of sigma, continue:
if math.gcd(x, y) > 1:
continue
# if we are here, great!
# print(f"hey! sigma({number}) = sigma({x})sigma({y})")
sigmas[number] = sigma(x) * sigma(y)
return sigmas[number]
# smartness 3: if we did not find any divider to number,
# calculate sigma as if it was a prime number. It is very difficult,
# as you can see:
if is_probably_prime:
sigmas[number] = number + 1
return sigmas[number]
# no smartness worked, just sum all the dividers already
sigmas[number] = sum(
divisor for divisor in range(1, number + 1) if number % divisor == 0
)
return sigmas[number]
u.assert_equals(get_number_of_gifts(1), 10)
u.assert_equals(get_number_of_gifts(2), 30)
u.assert_equals(get_number_of_gifts(3), 40)
u.assert_equals(get_number_of_gifts(4), 70)
u.assert_equals(get_number_of_gifts(5), 60)
u.assert_equals(get_number_of_gifts(6), 120)
u.assert_equals(get_number_of_gifts(7), 80)
u.assert_equals(get_number_of_gifts(8), 150)
u.assert_equals(get_number_of_gifts(9), 130)
TARGET = 36000000
top_gifts = 0
house_number = 0
init_time = time.time()
for i in range(2, 1000000):
if i % 1000 == 0:
print(f"------ {i} - {time.time() - init_time} -------")
nb_of_gifts = get_number_of_gifts(i)
if nb_of_gifts > top_gifts:
top_gifts = nb_of_gifts
house_number = i
print(f"{nb_of_gifts} gifts in house {i}")
if nb_of_gifts > TARGET:
u.answer_part_1(i)
break
# 36902400 gifts in house 831600
# [PART 1] 831600
# the code need to run for 883 seconds for that ><
|
[
"math.gcd",
"utils.answer_part_1",
"time.time"
] |
[((2502, 2513), 'time.time', 'time.time', ([], {}), '()\n', (2511, 2513), False, 'import time\n'), ((1380, 1394), 'math.gcd', 'math.gcd', (['x', 'y'], {}), '(x, y)\n', (1388, 1394), False, 'import math\n'), ((2856, 2874), 'utils.answer_part_1', 'u.answer_part_1', (['i'], {}), '(i)\n', (2871, 2874), True, 'import utils as u\n'), ((2595, 2606), 'time.time', 'time.time', ([], {}), '()\n', (2604, 2606), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import sys
import os
from autoaim import helpers
class Camera():
def __init__(self, source):
self.source = source
self.capture = cv2.VideoCapture(source)
if type(source) is int:
self.__camera = True
def snapshot(self, start, stop, interval, save_to, width=1024, height=768):
'''
start: "hour:minute:second"
stop : "hour:minute:second"
interval: 1000(ms)
save_to: url
'''
capture = self.capture
if self.__camera:
capture.set(cv2.CAP_PROP_FPS, 30)
capture.set(3, width)
capture.set(4, height)
start = self.__parse_time(start)
stop = self.__parse_time(stop)
for i in range(int((stop-start)*1000/interval)):
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
else:
fps = round(capture.get(cv2.CAP_PROP_FPS))
start = self.__parse_time(start) * fps
stop = self.__parse_time(stop) * fps
step = int(interval / 1000 * fps)
for i in range(start, stop, step):
capture.set(cv2.CAP_PROP_POS_FRAMES, i)
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
def release(self):
self.capture.release()
def __parse_time(self, str):
t = np.array([int(x) for x in str.split(':')])
w = np.array([3600, 60, 1])
return t.dot(w).item(0)
if __name__ == '__main__':
cam = Camera(0)
cam.snapshot('00:00:00', '00:01:00', 200, 'data/capture/')
|
[
"cv2.VideoCapture",
"autoaim.helpers.showoff",
"numpy.array"
] |
[((205, 229), 'cv2.VideoCapture', 'cv2.VideoCapture', (['source'], {}), '(source)\n', (221, 229), False, 'import cv2\n'), ((1730, 1753), 'numpy.array', 'np.array', (['[3600, 60, 1]'], {}), '([3600, 60, 1])\n', (1738, 1753), True, 'import numpy as np\n'), ((936, 987), 'autoaim.helpers.showoff', 'helpers.showoff', (['img'], {'timeout': 'interval', 'update': '(True)'}), '(img, timeout=interval, update=True)\n', (951, 987), False, 'from autoaim import helpers\n'), ((1461, 1512), 'autoaim.helpers.showoff', 'helpers.showoff', (['img'], {'timeout': 'interval', 'update': '(True)'}), '(img, timeout=interval, update=True)\n', (1476, 1512), False, 'from autoaim import helpers\n')]
|
import psycopg2
from psycopg2 import sql, extras
# If there is no venv, run schrodinger_virtualenv.py schrodinger.ve to install pycopg2
# In win powershell as admin:
# >Set-Location -Path "C:\Program Files\Schrodinger2020-3"
# >Set-ExecutionPolicy RemoteSigned
# >schrodinger.ve\Scripts\activate
# or source schrodinger.ve/bin/activate on unix
# Set-Location -Path "C:\Program Files\Schrodinger2020-3\myscripts_sequential"
# This file creates and populates DB, pipeline.py is worker test
# CREATE DATABASE sh_db0
def single_connection_query(check_sql, fetch=True, queue_concurrent=False, dbname='sh_db0_dev', values=[], user='shworker', password='<PASSWORD>'):
conn = psycopg2.connect(dbname=dbname, user=user,
password=password, host='localhost')
cursor = conn.cursor()
if queue_concurrent:
conn.set_isolation_level(extensions.ISOLATION_LEVEL_SERIALIZABLE)
if len(values) > 0:
extras.execute_values (
cursor, insert_query, values, template=None, page_size=100
)
conn.commit()
else:
cursor.execute(check_sql)
conn.commit()
if isinstance(fetch, bool):
if fetch is True:
res = cursor.fetchall()
return res
else:
if fetch == "rowcount":
res = cursor.rowcount
return res
'''
query = "select * from files.files;"
res = single_connection_query(query, True, dbname="sh_db0_dev", user='shworker', password='<PASSWORD>')
print("Fetched!", len(res))
res = [list(r) for r in res]
print("converted")
# Test fill DB
ids = [rec[0] for rec in res]
import random
random.shuffle(ids)
to_insert = []
for idx, item in enumerate(res):
item1 = item
item1[0] = ids[idx]
to_insert.append(item1)
'''
inserts = []
ids = 1602
with open("/home/ubuntu/pres_sub/preserved_substructure_6","r") as f:
for line in f:
line = line.strip()
if len(line) > 3:
inserts.append((line, None,None,None,None,0,1))
ids += 1
to_insert = inserts
insert_query = 'insert into files.files (smiles,num_conformers,docking_score,start_ts,stop_ts,status,priority) values %s'
single_connection_query(insert_query, fetch=False, dbname='sh_db0_dev', values = to_insert)
print(ids)
|
[
"psycopg2.extras.execute_values",
"psycopg2.connect"
] |
[((679, 758), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': 'dbname', 'user': 'user', 'password': 'password', 'host': '"""localhost"""'}), "(dbname=dbname, user=user, password=password, host='localhost')\n", (695, 758), False, 'import psycopg2\n'), ((953, 1038), 'psycopg2.extras.execute_values', 'extras.execute_values', (['cursor', 'insert_query', 'values'], {'template': 'None', 'page_size': '(100)'}), '(cursor, insert_query, values, template=None,\n page_size=100)\n', (974, 1038), False, 'from psycopg2 import sql, extras\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
""" model.py: A custom model for CityPersons. """
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
import data
def get_model():
''' Returns the model a pretrained model for finetunning on CityPersons. '''
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = 2 # 1 class (person) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def get_transform(train):
''' Converts a PIL image into PyTorch tensor. '''
transforms = []
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def save_model(model, path="./models/entire_model.pt"):
torch.save(model, path)
print('Model saved to ' + path)
def load_model(path="./models/entire_model.pt"):
if torch.cuda.is_available():
return torch.load(path)
else:
return torch.load(path, map_location=torch.device('cpu'))
def convert(img, img_raw):
'''
Converts the image from dataset back to the raw format:
* rescales it from [0,1] back to [0,255] range;
* flips the channels back to [height,width,3] format;
* converts from tensor to numpy array;
* converts from numpy array to PIL Image;
* checks if the image was augmented - flipped horizontally
'''
img = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
img = np.array(img)
print('img shape: %d x %d x %d' % img.shape)
img = Image.fromarray(np.uint8(img)).convert('RGB')
img_flipped = np.array(img.transpose(Image.FLIP_LEFT_RIGHT))
img_raw = np.array(img_raw)
img_was_flipped = np.sum(img_flipped.flatten() == img_raw.flatten()) == img_flipped.shape[0] * img_flipped.shape[1] * img_flipped.shape[2]
print('Image was flipped: %r' % img_was_flipped)
return img
## testing on images from Hambrug
if __name__ == "__main__":
img_path = './datasets/citypersons/hamburg/'
anno_path = './datasets/citypersons/CityPersons/annotations/'
# split dataset into train and test
dataset = data.HamburgDataset(img_path, anno_dict, get_transform(train=True))
dataset_test = data.HamburgDataset(img_path, anno_dict, get_transform(train=False))
# permute the indices
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
# train: 248 - 50 examples
# test: 50 examples
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
if train:
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
model = get_model()
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# Let's train the model for 10 epochs, evaluating at the end of every epoch.
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
save(model)
else:
model = load_model()
## error analysis
# raw image
img_raw = Image.open(img_path + imgs[0])
anno_raw = anno_dict[imgs[0]]
# same image from the dataset
idx = indices.index(0)
img, anno = dataset[idx]
img = convert_back(img, img_raw)
# put the model in evaluation mode
model.eval()
with torch.no_grad():
prediction = model([img.to(device)])[0]
preds = prediction['boxes'] # predicted bboxes
preds = preds.cpu().data.numpy() # to numpy array
scores = prediction['scores'] # scores of predicted bboxes
scores = scores.cpu().data.numpy()
# keep only bboxes where score > threshold:
threshold = .3
highs = list(np.where(scores > threshold)[0])
# transform the bboxes from tensor to list and back to [x, y, w, h] format
bboxes_x0x1y0y1 = []
for high in highs:
bboxes_x0x1y0y1.append(list(preds[high]))
bboxes = []
for bbox in bboxes_x0x1y0y1:
bbox = list(bbox)
x0, y0 = bbox[0], bbox[1]
x1, y1 = bbox[2], bbox[3]
bboxes.append([x0, y0, x1 - x0, y1 - y0])
# draw the predicted bounding boxes
# TODO: add ground truth bboxes in green
plt.rcParams['figure.figsize'] = [12, 8]
fig, ax = plt.subplots()
ax.imshow(img);
for bbox in bboxes:
rect = patches.Rectangle(
(bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.title(img_name)
plt.show()
|
[
"torchvision.models.detection.faster_rcnn.FastRCNNPredictor",
"torch.optim.lr_scheduler.StepLR",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"engine.train_one_epoch",
"numpy.uint8",
"torch.manual_seed",
"transforms.RandomHorizontalFlip",
"torch.cuda.is_available",
"torch.utils.data.Subset",
"torch.save",
"engine.evaluate",
"transforms.Compose",
"numpy.where",
"numpy.array",
"transforms.ToTensor",
"torch.optim.SGD"
] |
[((496, 565), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (548, 565), False, 'import torchvision\n'), ((926, 969), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (943, 969), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((1317, 1338), 'transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (1326, 1338), True, 'import transforms as T\n'), ((1400, 1423), 'torch.save', 'torch.save', (['model', 'path'], {}), '(model, path)\n', (1410, 1423), False, 'import torch\n'), ((1517, 1542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1540, 1542), False, 'import torch\n'), ((2107, 2120), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2115, 2120), True, 'import numpy as np\n'), ((2306, 2323), 'numpy.array', 'np.array', (['img_raw'], {}), '(img_raw)\n', (2314, 2323), True, 'import numpy as np\n'), ((2955, 2975), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (2972, 2975), False, 'import torch\n'), ((3098, 3145), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset', 'indices[:-50]'], {}), '(dataset, indices[:-50])\n', (3121, 3145), False, 'import torch\n'), ((3165, 3217), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset_test', 'indices[-50:]'], {}), '(dataset_test, indices[-50:])\n', (3188, 3217), False, 'import torch\n'), ((1112, 1124), 'transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1122, 1124), True, 'import transforms as T\n'), ((1559, 1575), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1569, 1575), False, 'import torch\n'), ((3309, 3421), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(2)', 'shuffle': '(True)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset, batch_size=2, shuffle=True,\n num_workers=4, collate_fn=utils.collate_fn)\n', (3336, 3421), False, 'import torch\n'), ((3471, 3589), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset_test, batch_size=1, shuffle=False,\n num_workers=4, collate_fn=utils.collate_fn)\n', (3498, 3589), False, 'import torch\n'), ((3901, 3969), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.005)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n', (3916, 3969), False, 'import torch\n'), ((4135, 4201), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(3)', 'gamma': '(0.1)'}), '(optimizer, step_size=3, gamma=0.1)\n', (4166, 4201), False, 'import torch\n'), ((5138, 5153), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5151, 5153), False, 'import torch\n'), ((1276, 1303), 'transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (1298, 1303), True, 'import transforms as T\n'), ((3653, 3678), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3676, 3678), False, 'import torch\n'), ((3629, 3649), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3641, 3649), False, 'import torch\n'), ((3684, 3703), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3696, 3703), False, 'import torch\n'), ((4514, 4590), 'engine.train_one_epoch', 'train_one_epoch', (['model', 'optimizer', 'data_loader', 'device', 'epoch'], {'print_freq': '(10)'}), '(model, optimizer, data_loader, device, epoch, print_freq=10)\n', (4529, 4590), False, 'from engine import train_one_epoch, evaluate\n'), ((4717, 4765), 'engine.evaluate', 'evaluate', (['model', 'data_loader_test'], {'device': 'device'}), '(model, data_loader_test, device=device)\n', (4725, 4765), False, 'from engine import train_one_epoch, evaluate\n'), ((5497, 5525), 'numpy.where', 'np.where', (['(scores > threshold)'], {}), '(scores > threshold)\n', (5505, 5525), True, 'import numpy as np\n'), ((1631, 1650), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1643, 1650), False, 'import torch\n'), ((2196, 2209), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (2204, 2209), True, 'import numpy as np\n')]
|
import requests
def reportTemperature(cookie, token):
url = 'http://yiban.gxnu.edu.cn/v4/affairs/health-report/create'
headers = {
'Host': 'yiban.gxnu.edu.cn',
'Content-Type': 'application/json;charset=utf-8',
'X-Requested-With': 'XMLHttpRequest',
'X-Access-Token': token,
'Origin': 'http://yiban.gxnu.edu.cn',
'Cookie': cookie
}
body = {'data': {
'temperature': '36.8',
'remark': ''
}
}
response = requests.post(url=url, headers=headers,
json=body)
return response
|
[
"requests.post"
] |
[((511, 561), 'requests.post', 'requests.post', ([], {'url': 'url', 'headers': 'headers', 'json': 'body'}), '(url=url, headers=headers, json=body)\n', (524, 561), False, 'import requests\n')]
|
# Code adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
from __future__ import division
from pytorch3d.ops.knn import knn_points
import torch
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import helper_functions
FLOAT_EPS = np.finfo(np.float).eps
pixel_coords = None
import kornia
from scipy.spatial.transform import Rotation as R
def preprocess_depth_output_2_point_cloud_all(depth_maps, masks, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, num_views, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
point_cloud_list_all_views = []
rotated_point_cloud_list_all_views = []
for view in range(num_views):
src_camera_coords = pixel2cam(depth_maps[:, view].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
masks_batch = masks[:, view]
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# Mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
point_cloud_list_all_views.append(src_camera_coords_downsampled)
rotated_point_cloud_list_all_views.append(rot_src_camera_coords)
camera_point_clouds_downsampled = torch.stack(point_cloud_list_all_views, dim = 1) # [B views 2048]
rotated_camera_point_clouds_downsampled = torch.stack(rotated_point_cloud_list_all_views, dim = 1) # [B views 2048]
return camera_point_clouds_downsampled, rotated_camera_point_clouds_downsampled
def preprocess_depth_output_2_point_cloud(depth_maps, masks_batch, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, _, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
src_camera_coords = pixel2cam(depth_maps[:, 0].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
return src_camera_coords_downsampled, rot_src_camera_coords
def depth_decode(depth_image):
# # first 16 bits (first 2 channels) are 16-bit depth
# R is the 8 LSB and G are the others
depth_image_16 = depth_image[:,:,[1, 0]]
# B are 8-bit version
depth_image_8 = depth_image[:,:,2]
# last 8 are empty
depth_single_channel = np.zeros((depth_image_16.shape[0], depth_image_16.shape[1]))
# convert 16 bit to actual depth values
for i in range(depth_single_channel.shape[0]):
for j in range(depth_single_channel.shape[1]):
bit_str = '{0:08b}'.format(depth_image_16[i, j, 0]) + '{0:08b}'.format(depth_image_16[i, j, 1])
depth_single_channel[i, j] = int(bit_str, 2)
return depth_single_channel
def set_id_grid(depth):
global pixel_coords
b, _, h, w = depth.size()
i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]
j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]
ones = torch.ones(1,h,w).type_as(depth)
#print("i_range",i_range.device)
#print("j_range",j_range.device)
#print("ones",ones.device)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1).type_as(depth) # [1, 3, H, W]
pixel_coords.to(depth.device)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr):
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.float() @ cam_coords_flat
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr.float() # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-4)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
# print(pixel_coords.reshape(b,h,w,2).shape)
return pixel_coords.reshape(b,h,w,2)
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
b, _, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
pixel_coords = pixel_coords.to(depth.device)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).reshape(b, 3, -1) # [B, 3, H*W]
#print("-"*10)
#print("Pixel", pixel_coords.device)
#print("Depth", depth.device)
#print("intrinsics_inv",intrinsics_inv.device)
#print("current_pixel_coords",current_pixel_coords.device)
#print("-"*10)
cam_coords = (intrinsics_inv.float() @ current_pixel_coords.float())
cam_coords = cam_coords.reshape(b, 3, h, w)
return cam_coords * depth.clamp(min=1e-1)
def quat2mat(quat):
x, y, z, w = quat[:,0], quat[:,1], quat[:,2], quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
n = w2 + x2 + y2 + z2
x = x / n
y = y / n
z = z / n
w = w / n
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([1 - 2*y2 - 2*z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, 1 - 2*x2 - 2*z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, 1 - 2*x2 - 2*y2], dim=1).reshape(B, 3, 3)
return rotMat
def pose_vec2mat(vec):
size_list = list(vec.size())
if len(size_list) == 3:
# if dimension is [B 4 4] for multiview blender dataset
return vec
else:
# If dimension is [B 7] for multiview nocs dataset
b = vec.size(0)
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
rot_mat = quat2mat(rot) # [B, 3, 3]
invert_mat = torch.eye(4)
invert_mat[0, 0] *= -1
invert_mat[1, 1] *= -1
# Adding 0.5 offset for dataset
transform_mat = torch.cat([rot_mat, (translation) + 0.5], dim=2) # [B, 3, 4]
transform_mat = torch.cat([transform_mat, torch.tensor([[0,0,0,1]]).unsqueeze(0).expand(1,1,4).type_as(transform_mat).repeat(b, 1, 1)], dim=1) # [B, 4, 4]
return transform_mat @ invert_mat.type_as(transform_mat)
def inverse_warp(tgt_image, depth, intrinsics, src_pose, tgt_pose):
src_camera_coords = pixel2cam(depth, intrinsics.inverse())
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
tgt_cam_2_proj = intrinsics @ src_cam_to_tgt_cam[:, :3, :] # Bx3x3 Bx3x4
rot, tr = tgt_cam_2_proj[:,:,:3], tgt_cam_2_proj[:,:,-1:]
tgt_pix_coords = cam2pixel(src_camera_coords, rot, tr)
tgt_image = tgt_image.type_as(tgt_pix_coords)
projected_img = F.grid_sample(tgt_image, tgt_pix_coords, padding_mode='zeros', align_corners=False)
valid_points = tgt_pix_coords.abs().max(dim=-1)[0] <= 1
return projected_img, valid_points
def inverse_warp_2(tgt_image, depth, intrinsics, src_pose, tgt_pose):
'''
Inverse warp function using Kornia
'''
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
b = tgt_image.size(0)
h = torch.tensor(tgt_image.size(2)).repeat(b)
w = torch.tensor(tgt_image.size(3)).repeat(b)
intrinsics = torch.cat([intrinsics.float(), torch.tensor([[0, 0, 0]]).unsqueeze(2).expand(1, 3, 1).type_as(intrinsics).repeat(b, 1, 1).float()], dim = 2)
intrinsics = torch.cat([intrinsics, torch.tensor([[0, 0, 0, 1]]).expand(1, 1, 4).type_as(intrinsics).repeat(b, 1, 1).float() ], dim = 1)
pinhole_tgt = kornia.geometry.PinholeCamera(intrinsics, tgt_pose_mat.float(), h, w)
pinhole_src = kornia.geometry.PinholeCamera(intrinsics, src_pose_mat.float(), h, w)
image_src = kornia.geometry.depth_warp(pinhole_tgt, pinhole_src, depth.float(), tgt_image.float(), tgt_image.size(2), tgt_image.size(3))
return image_src, image_src
def project_depth_point_cloud(depth, intrinsics, src_pose, tgt_pose):
'''
Project point cloud from src to tgt pose
'''
src_camera_coords = pixel2cam(depth, intrinsics.inverse()) # [B, 3, H, W]
b, _, h, w = src_camera_coords.size()
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
# source camera coordinates
src_camera_coords = src_camera_coords.reshape(b, 3, h*w)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
ones = torch.ones((b, 1, h*w), device=src_camera_coords.device)
#print("ones",ones.device)
#print("src_camera_coords",src_camera_coords.device)
src_camera_coords_homogeneous = torch.cat([src_camera_coords, ones], dim = 1) # [B, 4, H*W]
# destination camera coordinates
projected_coords = src_cam_to_tgt_cam.float() @ src_camera_coords_homogeneous.float() # [B, 4, H*W]
projected_coords = projected_coords[:, :3, :]
return src_camera_coords, projected_coords
def NOCS_map_2_point_cloud(nocs_image_tensor, mask):
'''
Convert NOCS maps to point cloud
Input:
nocs_image_tensor - [B, 3, H, W] - torch tensor
mask - [B, H, W] - torch tensor
Returns:
nocs_point_cloud_list - B element list - [3, masked dims]
indices_list - B element list - [2, masked dims]
'''
indices_list = []
nocs_point_cloud_list = []
B, views, H, W = nocs_image_tensor.shape
for i in range(nocs_image_tensor.shape[0]):
ind = torch.from_numpy(((mask[i, :, :] > 0.5).nonzero().cpu()).numpy())
h = ind[:, 0]
w = ind[:, 1]
#torch.sigmoid((mask[i, :, :] - 0.5)* 100)
#h = h.detach()
#w = w.detach()
#print(h.max(), w.max(), h.min(), w.min())
nocs_point_cloud = nocs_image_tensor[i, :, h, w] # [3, mask]
nocs_point_cloud.detach_()
nocs_point_cloud_list.append(nocs_point_cloud)
indices_list.append(torch.stack([h, w]).detach()) # [2, mask]
return nocs_point_cloud_list, indices_list
def get_NOCS_correspondences(nocs_image_tensor_source, mask_source, nocs_image_tensor_target, mask_target):
'''
Get NOCS correspondences
Input:
nocs_image_tensor_source - [B, 3, H, W]
mask_source - [B, H, W]
nocs_image_tensor_target - [B, 3, H, W]
mask_target - [B, H, W]
Returns:
indices_depth_list - list of tensors with indices of shape [2, masked_dim]
'''
B, views, H, W = nocs_image_tensor_source.shape
indices_depth_list_target = []
indices_depth_list_source = []
for i in range(B):
nocs_point_cloud_list_source, indices_list_source = NOCS_map_2_point_cloud(nocs_image_tensor_source[i, :, :, :].unsqueeze(0), mask_source[i, 0, :, :].unsqueeze(0))
nocs_point_cloud_list_target, indices_list_target = NOCS_map_2_point_cloud(nocs_image_tensor_target[i, :, :, :].unsqueeze(0), mask_target[i, 0, :, :].unsqueeze(0))
pc_1, ind_1 = nocs_point_cloud_list_source[0], indices_list_source[0] # [3, mask_size], [2, mask_size]
pc_2, ind_2 = nocs_point_cloud_list_target[0], indices_list_target[0] # [3, mask_size]
# Perform NOCS KNN matching
out = knn_points(pc_1.transpose(0, 1).unsqueeze(0), pc_2.transpose(0, 1).unsqueeze(0)) # [1, masked_dim, 3]
corresponding_idx = out.idx[0, :, 0] # [masked_dim]
corresponding_idx = ind_2[:, corresponding_idx]
indices_depth_list_source.append(ind_1)
indices_depth_list_target.append(corresponding_idx)
return indices_depth_list_source, indices_depth_list_target
if __name__ == "__main__":
src_pose = torch.tensor([[1663.45703125, 46.258087158203128, -2127.346435546875, 0.008096654899418354, -0.3257482051849365, 0.0027897413820028307, 0.9454177618026733]])
tgt_pose = torch.tensor([[1889.214599609375, 221.49795532226563, -1699.667724609375, 0.039696164429187778, -0.4065377712249756, 0.01768353208899498, 0.9125999212265015]])
src_pose_2 = torch.tensor([[2011.62060546875, 374.8108215332031, -1255.8643798828125,0.06847226619720459, -0.48349833488464358, 0.03797297552227974, 0.8718366026878357]])
depth = Image.open('./test-images/depth.png')
depth = np.array(depth)
depth = depth_decode(depth)
depth = torch.tensor(depth).unsqueeze(0).unsqueeze(1).float()
# print(depth)
# plt.imshow(depth[0][0])
# plt.show()
tgt_image = cv2.imread('./test-images/rgb.png')
tgt_image = torch.tensor(tgt_image).unsqueeze(0).permute(0, 3, 1, 2).float() / 255.0
intrinsics = torch.tensor([
[617.1,0.0,320.0],
[0.0,617.1,240.0],
[0.0,0.0,1.0],
])
scale_factor = 1
src_pose[0, :3] = src_pose[0, :3] / scale_factor
tgt_pose[0, :3] = tgt_pose[0, :3] / scale_factor
src_pose_2[0, :3] = src_pose_2[0, :3] / scale_factor
x_factor = -1
src_pose[0, 0] = src_pose[0, 0] * x_factor
tgt_pose[0, 0] = tgt_pose[0, 0] * x_factor
src_pose_2[0, 0] = src_pose_2[0, 0] * x_factor
src_pose[0, 4:6] = src_pose[0, 4:6] * -1
tgt_pose[0, 4:6] = tgt_pose[0, 4:6] * -1
src_pose_2[0, 4:6] = src_pose_2[0, 4:6] * -1
intrinsics = intrinsics.unsqueeze(0)
warp=inverse_warp(tgt_image, depth, intrinsics, tgt_pose, src_pose)
warp=warp[0].permute(0,2,3,1)
plt.imshow(warp[0])
plt.show()
|
[
"torch.ones",
"matplotlib.pyplot.show",
"torch.stack",
"torch.nn.functional.grid_sample",
"torch.eye",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"torch.cat",
"scipy.spatial.transform.Rotation.random",
"PIL.Image.open",
"helper_functions.sigmoid_2_depth",
"numpy.finfo",
"cv2.imread",
"torch.cuda.is_available",
"numpy.array",
"torch.arange",
"torch.tensor"
] |
[((339, 357), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (347, 357), True, 'import numpy as np\n'), ((762, 806), 'helper_functions.sigmoid_2_depth', 'helper_functions.sigmoid_2_depth', (['depth_maps'], {}), '(depth_maps)\n', (794, 806), False, 'import helper_functions\n'), ((2657, 2703), 'torch.stack', 'torch.stack', (['point_cloud_list_all_views'], {'dim': '(1)'}), '(point_cloud_list_all_views, dim=1)\n', (2668, 2703), False, 'import torch\n'), ((2769, 2823), 'torch.stack', 'torch.stack', (['rotated_point_cloud_list_all_views'], {'dim': '(1)'}), '(rotated_point_cloud_list_all_views, dim=1)\n', (2780, 2823), False, 'import torch\n'), ((3238, 3282), 'helper_functions.sigmoid_2_depth', 'helper_functions.sigmoid_2_depth', (['depth_maps'], {}), '(depth_maps)\n', (3270, 3282), False, 'import helper_functions\n'), ((3469, 3494), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3492, 3494), False, 'import torch\n'), ((4573, 4602), 'torch.stack', 'torch.stack', (['point_cloud_list'], {}), '(point_cloud_list)\n', (4584, 4602), False, 'import torch\n'), ((5065, 5125), 'numpy.zeros', 'np.zeros', (['(depth_image_16.shape[0], depth_image_16.shape[1])'], {}), '((depth_image_16.shape[0], depth_image_16.shape[1]))\n', (5073, 5125), True, 'import numpy as np\n'), ((6664, 6700), 'torch.stack', 'torch.stack', (['[X_norm, Y_norm]'], {'dim': '(2)'}), '([X_norm, Y_norm], dim=2)\n', (6675, 6700), False, 'import torch\n'), ((9546, 9633), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['tgt_image', 'tgt_pix_coords'], {'padding_mode': '"""zeros"""', 'align_corners': '(False)'}), "(tgt_image, tgt_pix_coords, padding_mode='zeros',\n align_corners=False)\n", (9559, 9633), True, 'import torch.nn.functional as F\n'), ((11230, 11288), 'torch.ones', 'torch.ones', (['(b, 1, h * w)'], {'device': 'src_camera_coords.device'}), '((b, 1, h * w), device=src_camera_coords.device)\n', (11240, 11288), False, 'import torch\n'), ((11411, 11454), 'torch.cat', 'torch.cat', (['[src_camera_coords, ones]'], {'dim': '(1)'}), '([src_camera_coords, ones], dim=1)\n', (11420, 11454), False, 'import torch\n'), ((14435, 14602), 'torch.tensor', 'torch.tensor', (['[[1663.45703125, 46.258087158203125, -2127.346435546875, \n 0.008096654899418354, -0.3257482051849365, 0.0027897413820028305, \n 0.9454177618026733]]'], {}), '([[1663.45703125, 46.258087158203125, -2127.346435546875, \n 0.008096654899418354, -0.3257482051849365, 0.0027897413820028305, \n 0.9454177618026733]])\n', (14447, 14602), False, 'import torch\n'), ((14608, 14777), 'torch.tensor', 'torch.tensor', (['[[1889.214599609375, 221.49795532226562, -1699.667724609375, \n 0.039696164429187775, -0.4065377712249756, 0.01768353208899498, \n 0.9125999212265015]]'], {}), '([[1889.214599609375, 221.49795532226562, -1699.667724609375, \n 0.039696164429187775, -0.4065377712249756, 0.01768353208899498, \n 0.9125999212265015]])\n', (14620, 14777), False, 'import torch\n'), ((14785, 14953), 'torch.tensor', 'torch.tensor', (['[[2011.62060546875, 374.8108215332031, -1255.8643798828125, \n 0.06847226619720459, -0.48349833488464355, 0.03797297552227974, \n 0.8718366026878357]]'], {}), '([[2011.62060546875, 374.8108215332031, -1255.8643798828125, \n 0.06847226619720459, -0.48349833488464355, 0.03797297552227974, \n 0.8718366026878357]])\n', (14797, 14953), False, 'import torch\n'), ((14957, 14994), 'PIL.Image.open', 'Image.open', (['"""./test-images/depth.png"""'], {}), "('./test-images/depth.png')\n", (14967, 14994), False, 'from PIL import Image\n'), ((15007, 15022), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (15015, 15022), True, 'import numpy as np\n'), ((15205, 15240), 'cv2.imread', 'cv2.imread', (['"""./test-images/rgb.png"""'], {}), "('./test-images/rgb.png')\n", (15215, 15240), False, 'import cv2\n'), ((15347, 15420), 'torch.tensor', 'torch.tensor', (['[[617.1, 0.0, 320.0], [0.0, 617.1, 240.0], [0.0, 0.0, 1.0]]'], {}), '([[617.1, 0.0, 320.0], [0.0, 617.1, 240.0], [0.0, 0.0, 1.0]])\n', (15359, 15420), False, 'import torch\n'), ((16089, 16108), 'matplotlib.pyplot.imshow', 'plt.imshow', (['warp[0]'], {}), '(warp[0])\n', (16099, 16108), True, 'import matplotlib.pyplot as plt\n'), ((16113, 16123), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16121, 16123), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1148, 1150), False, 'import torch\n'), ((2335, 2364), 'torch.stack', 'torch.stack', (['point_cloud_list'], {}), '(point_cloud_list)\n', (2346, 2364), False, 'import torch\n'), ((8564, 8576), 'torch.eye', 'torch.eye', (['(4)'], {}), '(4)\n', (8573, 8576), False, 'import torch\n'), ((8704, 8750), 'torch.cat', 'torch.cat', (['[rot_mat, translation + 0.5]'], {'dim': '(2)'}), '([rot_mat, translation + 0.5], dim=2)\n', (8713, 8750), False, 'import torch\n'), ((5742, 5761), 'torch.ones', 'torch.ones', (['(1)', 'h', 'w'], {}), '(1, h, w)\n', (5752, 5761), False, 'import torch\n'), ((5900, 5944), 'torch.stack', 'torch.stack', (['(j_range, i_range, ones)'], {'dim': '(1)'}), '((j_range, i_range, ones), dim=1)\n', (5911, 5944), False, 'import torch\n'), ((7911, 8104), 'torch.stack', 'torch.stack', (['[1 - 2 * y2 - 2 * z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, 1 -\n 2 * x2 - 2 * z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, 1 -\n 2 * x2 - 2 * y2]'], {'dim': '(1)'}), '([1 - 2 * y2 - 2 * z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz +\n 2 * xy, 1 - 2 * x2 - 2 * z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx +\n 2 * yz, 1 - 2 * x2 - 2 * y2], dim=1)\n', (7922, 8104), False, 'import torch\n'), ((12701, 12720), 'torch.stack', 'torch.stack', (['[h, w]'], {}), '([h, w])\n', (12712, 12720), False, 'import torch\n'), ((3670, 3709), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (3678, 3709), True, 'from scipy.spatial.transform import Rotation as R\n'), ((5567, 5585), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (5579, 5585), False, 'import torch\n'), ((5656, 5674), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (5668, 5674), False, 'import torch\n'), ((15068, 15087), 'torch.tensor', 'torch.tensor', (['depth'], {}), '(depth)\n', (15080, 15087), False, 'import torch\n'), ((1338, 1377), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (1346, 1377), True, 'from scipy.spatial.transform import Rotation as R\n'), ((15257, 15280), 'torch.tensor', 'torch.tensor', (['tgt_image'], {}), '(tgt_image)\n', (15269, 15280), False, 'import torch\n'), ((3539, 3578), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (3547, 3578), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1199, 1238), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (1207, 1238), True, 'from scipy.spatial.transform import Rotation as R\n'), ((10268, 10296), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (10280, 10296), False, 'import torch\n'), ((8818, 8846), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (8830, 8846), False, 'import torch\n'), ((10118, 10143), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (10130, 10143), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
MirrorManager2 login forms.
'''
# # pylint cannot import flask extension correctly
# pylint: disable=E0611,F0401
# # The forms here don't have specific methods, they just inherit them.
# pylint: disable=R0903
# # We apparently use old style super in our __init__
# pylint: disable=E1002
# # Couple of our forms do not even have __init__
# pylint: disable=W0232
from flask.ext import wtf
import wtforms
def same_password(form, field):
''' Check if the data in the field is the same as in the password field.
'''
if field.data != form.password.data:
raise wtf.ValidationError('Both password fields should be equal')
class LostPasswordForm(wtf.Form):
""" Form to ask for a password change. """
username = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
class ResetPasswordForm(wtf.Form):
""" Form to reset one's password in the local database. """
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
confirm_password = wtforms.PasswordField(
'Confirm password <span class="error">*</span>',
[wtforms.validators.Required(), same_password]
)
class LoginForm(wtf.Form):
""" Form to login via the local database. """
username = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
class NewUserForm(wtf.Form):
""" Form to add a new user to the local database. """
user_name = wtforms.TextField(
'username <span class="error">*</span>',
[wtforms.validators.Required()]
)
display_name = wtforms.TextField(
'Full name',
[wtforms.validators.Optional()]
)
email_address = wtforms.TextField(
'Email address <span class="error">*</span>',
[wtforms.validators.Required(), wtforms.validators.Email()]
)
password = wtforms.PasswordField(
'Password <span class="error">*</span>',
[wtforms.validators.Required()]
)
confirm_password = wtforms.PasswordField(
'Confirm password <span class="error">*</span>',
[wtforms.validators.Required(), same_password]
)
|
[
"wtforms.validators.Email",
"flask.ext.wtf.ValidationError",
"wtforms.validators.Required",
"wtforms.validators.Optional"
] |
[((1567, 1626), 'flask.ext.wtf.ValidationError', 'wtf.ValidationError', (['"""Both password fields should be equal"""'], {}), "('Both password fields should be equal')\n", (1586, 1626), False, 'from flask.ext import wtf\n'), ((1803, 1832), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (1830, 1832), False, 'import wtforms\n'), ((2038, 2067), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (2065, 2067), False, 'import wtforms\n'), ((2188, 2217), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (2215, 2217), False, 'import wtforms\n'), ((2412, 2441), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (2439, 2441), False, 'import wtforms\n'), ((2546, 2575), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (2573, 2575), False, 'import wtforms\n'), ((2766, 2795), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (2793, 2795), False, 'import wtforms\n'), ((2871, 2900), 'wtforms.validators.Optional', 'wtforms.validators.Optional', ([], {}), '()\n', (2898, 2900), False, 'import wtforms\n'), ((3011, 3040), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (3038, 3040), False, 'import wtforms\n'), ((3042, 3068), 'wtforms.validators.Email', 'wtforms.validators.Email', ([], {}), '()\n', (3066, 3068), False, 'import wtforms\n'), ((3173, 3202), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (3200, 3202), False, 'import wtforms\n'), ((3323, 3352), 'wtforms.validators.Required', 'wtforms.validators.Required', ([], {}), '()\n', (3350, 3352), False, 'import wtforms\n')]
|
from glob import glob
import re
import operator
import os
import textwrap
import util
WEEKLY_METRICS_VERSION = "0.1"
ORG_WEEKLY_METRICS_VERSION = "0.1"
MONTHLY_METRICS_VERSION = "0.1"
ORG_MONTHLY_METRICS_VERSION = "0.1"
PATH_TO_METRICS_POSTS = "_posts"
PATH_TO_GRAPHS = "graphs"
WEEKLY_PROJECT_POST = """\
---
layout: weekly-metrics-v{version}
title: Metrics report for {owner}/{repo} | {reportID}
permalink: /{owner}/{repo}/{link}/
owner: {owner}
repo: {repo}
reportID: {reportID}
datestampThisWeek: {datestampThisWeek}
datestampLastWeek: {datestampLastWeek}
---
"""
MONTHLY_PROJECT_POST = """\
---
layout: monthly-metrics-v{version}
title: Metrics report for {owner}/{repo} | {reportID} | {datestampThisMonth}
permalink: /{owner}/{repo}/{link}/
owner: {owner}
repo: {repo}
reportID: {reportID}
datestampThisMonth: {datestampThisMonth}
datestampLastMonth: {datestampLastMonth}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"] %}}
WEEKLY_ORG_POST = """\
---
layout: org-weekly-metrics-v{version}
title: TwiterOSS Metrics Report for {owner} | {reportID}
permalink: /{owner}/{link}/
org: {owner}
reportID: {reportID}
datestampThisWeek: {datestampThisWeek}
datestampLastWeek: {datestampLastWeek}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{reportID}"]["data"] %}}
MONTHLY_ORG_POST = """\
---
layout: org-monthly-metrics-v{version}
title: TwiterOSS Metrics Report for {owner} | {reportID}
permalink: /{owner}/{link}/
org: {owner}
reportID: {reportID}
datestampThisMonth: {datestampThisMonth}
datestampLastMonth: {datestampLastMonth}
---
"""
# {{% for item in site.data["{owner_in_data}"]["{reportID}"]["data"] %}}
def add_table_of_metrics(post_text, REPORT_JSON, data_source, ID, add_breakdown=False):
# data_source is not used in the function
# It can be used to create a jekyll loop like below but is being avoided
# {{% for item in data_source %}}
post_text += textwrap.dedent("""
<table class="table table-condensed" style="border-collapse:collapse;">
<thead>
<tr>
<th>Metric</th>
<th>Latest</th>
<th>Previous</th>
<th colspan="2" style="text-align: center;">Difference</th>
</tr>
</thead>
<tbody>
""")
for metric in REPORT_JSON['data']:
color = util.get_metrics_color(metric, REPORT_JSON['data'][metric]['diff'])
if REPORT_JSON['data'][metric]['previous'] != 0:
percentage_change = str(round(REPORT_JSON['data'][metric]['diff']/REPORT_JSON['data'][metric]['previous']*100, 2))
elif REPORT_JSON['data'][metric]['latest'] != 0:
percentage_change = '∞'
else:
percentage_change = '0.0'
post_text += """
<tr data-toggle="collapse" data-target="#col-{5}" class="accordion-toggle" style="cursor: pointer;">
<td>{0:}</td>
<td>{1:,}</td>
<td>{2:,}</td>
<td style="color: {4}" >{3:,}</td>
<td style="color: {4}" >{6}%</td>
</tr>
""".format(util.get_metrics_name(metric),
REPORT_JSON['data'][metric]['latest'],
REPORT_JSON['data'][metric]['previous'],
REPORT_JSON['data'][metric]['diff'],
color,
metric,
percentage_change)
# Add diff breakdown
if add_breakdown and len(REPORT_JSON['data'][metric]['diff_breakdown'].items()):
post_text += """
<td class="hiddenRow" colspan="2"></td>
<td class="hiddenRow" colspan="3" style="padding: 0" ><div class="accordian-body collapse" id="col-{0}">
""".format(metric)
items = list(REPORT_JSON['data'][metric]['diff_breakdown'].items())
items.sort(key=operator.itemgetter(1), reverse=True)
for item, value in items:
href = "/metrics/{}/{}/{}".format(REPORT_JSON['name'], item, ID)
post_text += """<a target="_blank" href="{2}">{0} : {1}</a><br>""".format(item, value, href)
post_text += """</div> </td>"""
post_text += textwrap.dedent("""
</tbody>
</table>
""")
return post_text
def add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, ID, is_project=False):
if is_project:
nameWithOwner = REPORT_JSON["nameWithOwner"]
"""
Bus Factor
"""
try:
bus_factor = AUGUR_METRICS["bus_factor"][nameWithOwner]
except KeyError:
bus_factor = {"best": "N/A", "worst": "N/A"}
post_text += '<br>\n<h4><a target="_blank" href="https://chaoss.community/">CHAOSS</a> Metrics</h4>' + '\n'
post_text += textwrap.dedent(f"""
<table class="table table-condensed" style="border-collapse:collapse;">
<tbody>
<td>Bus Factor</td>
<td>Best: {bus_factor["best"]}</td>
<td>Worst: {bus_factor["worst"]}</td>
</tbody>
</table>
""")
graph_text = ""
# """
# Timeseries of new watchers
# """
# if ID == 'WEEKLY':
# graph_path = f"{PATH_TO_GRAPHS}/{nameWithOwner}/timeseries_new_watchers_per_week.svg"
# elif ID == 'MONTHLY':
# graph_path = f"{PATH_TO_GRAPHS}/{nameWithOwner}/timeseries_new_watchers_per_month.svg"
# if os.path.exists(graph_path):
# graph_text += f'\t<object class="cell" type="image/svg+xml" data="/metrics/{graph_path}">\n'
# graph_text += '\t\tYour browser does not support SVG\n'
# graph_text += '\t</object>\n'
# Add more chaoss graphs here
# After all the graphs
if graph_text:
post_text += '<div class="row">\n'
post_text += graph_text
post_text += '</div>\n'
else: # ORG
pass
return post_text
def add_highlights(post_text, REPORT_JSON, ID):
org = REPORT_JSON["name"]
if REPORT_JSON["highlights"]:
post_text += '<br>\n<h4>Highlights</h4>' + '\n'
post_text += '<ul>' + '\n'
highlights = REPORT_JSON["highlights"]
# Sort based on the number of zeroes!
highlights.sort(key=lambda item: str(item[1]).count('0'), reverse=True)
for highlight in highlights:
repo, number, metric = highlight
post_text += '\t' + f'<li><a href="/metrics/{org}/{repo}/{ID}">{repo}</a>'
post_text += f' crossed {number:,} {util.get_metrics_name(metric)}</li>' + '\n'
post_text += '</ul>' + '\n'
return post_text
def add_github_metrics_graphs(post_text, REPORT_JSON, ID):
"""
Add graphs for orgs' weekly reports
"""
org = REPORT_JSON["name"]
# Treemap graphs
all_treemap_graphs = glob(PATH_TO_GRAPHS + "/" + org + f"/treemap_{ID.lower()}_*.svg")
post_text += '<div class="graph-container">\n'
post_text += '<br>\n<h4>Binary Treemap graphs</h4>\n'
post_text += '<div class="row">\n'
for graph in all_treemap_graphs:
post_text += f'\t<object class="cell" type="image/svg+xml" data="/metrics/{graph}">\n'
post_text += '\t\tYour browser does not support SVG\n'
post_text += '\t</object>\n'
post_text += '</div>\n'
post_text += '</div>\n'
return post_text
def _create_post(REPORT_JSON, AUGUR_METRICS, latest=False, is_project=True):
"""
latest: If True, create a post with permalink /owner/repo/{ID}
project: If False, it means the post is for an org, else for a project
"""
ID = REPORT_JSON['reportID'].split('-')[0]
if is_project:
org, repo = REPORT_JSON['nameWithOwner'].split("/")
else: # org
org, repo = REPORT_JSON['name'], ''
# Create directory for the post, if it does not exist
path_to_post = os.path.join(PATH_TO_METRICS_POSTS, org, repo)
os.makedirs(path_to_post, exist_ok=True)
if latest:
"""
Create file for latest report
"""
# Delete already existing latest posts
re_latest_report = re.compile(r"\d{{4}}-\d{{2}}-\d{{2}}-{}-LATEST.md".format(ID))
for filename in os.listdir(path_to_post):
if re_latest_report.match(filename):
print("LOG: Removing existing latest post", os.path.join(path_to_post, filename))
os.unlink(os.path.join(path_to_post, filename))
# Create latest report file in _posts as well
post_file = "{}/{}-{}-LATEST.md".format(path_to_post, REPORT_JSON["datestamp"]["latest"], ID)
else:
"""
Create file for dated report
"""
# This is a weird filename for sure. But I think I have an explanation for it -
# posts need to start with %Y-%m-%d and the later is sent to page.title variable
# Without the later date, title did not make much sense.
post_file = "{}/{}-{}.md".format(path_to_post, REPORT_JSON["datestamp"]["latest"], REPORT_JSON["reportID"])
if latest:
link = ID
else:
link = REPORT_JSON["reportID"]
if ID == "WEEKLY":
if is_project:
"""
WEEKLY - PROJECT
"""
data_source = 'site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(WEEKLY_PROJECT_POST, REPORT_JSON, data_source, 'WEEKLY')
# post_text = add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, 'WEEKLY', is_project=True)
else:
"""
WEEKLY - ORG
"""
data_source = 'site.data["{owner_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(WEEKLY_ORG_POST, REPORT_JSON, data_source, 'WEEKLY', add_breakdown=True)
post_text = add_highlights(post_text, REPORT_JSON, 'WEEKLY')
post_text = add_github_metrics_graphs(post_text, REPORT_JSON, 'WEEKLY')
post_text = post_text.format(
version=WEEKLY_METRICS_VERSION,
owner=org,
owner_in_data=org.replace('.', ''), # Dots confused jekyll
repo=repo,
repo_in_data=repo.replace('.', ''),
reportID=REPORT_JSON["reportID"],
datestampThisWeek=REPORT_JSON["datestamp"]["latest"],
datestampLastWeek=REPORT_JSON["datestamp"]["previous"],
link=link)
elif ID == "MONTHLY":
if is_project:
"""
MONTHLY - PROJECT
"""
data_source = 'site.data["{owner_in_data}"]["{repo_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(MONTHLY_PROJECT_POST, REPORT_JSON, data_source, 'MONTHLY')
# post_text = add_augur_metrics(post_text, REPORT_JSON, AUGUR_METRICS, 'MONTHLY', is_project=True)
else:
"""
MONTHLY - ORG
"""
data_source = 'site.data["{owner_in_data}"]["{reportID}"]["data"]'
post_text = add_table_of_metrics(MONTHLY_ORG_POST, REPORT_JSON, data_source, 'MONTHLY', add_breakdown=True)
post_text = add_highlights(post_text, REPORT_JSON, 'MONTHLY')
post_text = add_github_metrics_graphs(post_text, REPORT_JSON, 'MONTHLY')
post_text = post_text.format(
version=MONTHLY_METRICS_VERSION,
owner=org,
owner_in_data=org.replace('.', ''), # Dots confused jekyll
repo=repo,
repo_in_data=repo.replace('.', ''),
reportID=REPORT_JSON["reportID"],
datestampThisMonth=REPORT_JSON["datestamp"]["latest"],
datestampLastMonth=REPORT_JSON["datestamp"]["previous"],
link=link)
with open(post_file, "w+") as f:
f.write(post_text)
if latest:
print("LOG: Created the latest POST", post_file)
else:
print("LOG: Created the POST", post_file)
def create_posts(REPORT_JSON, AUGUR_METRICS, is_project=True):
_create_post(REPORT_JSON, AUGUR_METRICS, latest=False, is_project=is_project)
_create_post(REPORT_JSON, AUGUR_METRICS, latest=True, is_project=is_project)
|
[
"textwrap.dedent",
"util.get_metrics_color",
"util.get_metrics_name",
"os.makedirs",
"operator.itemgetter",
"os.path.join",
"os.listdir"
] |
[((1955, 2303), 'textwrap.dedent', 'textwrap.dedent', (['"""\n <table class="table table-condensed" style="border-collapse:collapse;">\n <thead>\n <tr>\n <th>Metric</th>\n <th>Latest</th>\n <th>Previous</th>\n <th colspan="2" style="text-align: center;">Difference</th>\n </tr>\n </thead>\n <tbody>\n """'], {}), '(\n """\n <table class="table table-condensed" style="border-collapse:collapse;">\n <thead>\n <tr>\n <th>Metric</th>\n <th>Latest</th>\n <th>Previous</th>\n <th colspan="2" style="text-align: center;">Difference</th>\n </tr>\n </thead>\n <tbody>\n """\n )\n', (1970, 2303), False, 'import textwrap\n'), ((4163, 4221), 'textwrap.dedent', 'textwrap.dedent', (['"""\n </tbody>\n </table>\n """'], {}), '("""\n </tbody>\n </table>\n """)\n', (4178, 4221), False, 'import textwrap\n'), ((7856, 7902), 'os.path.join', 'os.path.join', (['PATH_TO_METRICS_POSTS', 'org', 'repo'], {}), '(PATH_TO_METRICS_POSTS, org, repo)\n', (7868, 7902), False, 'import os\n'), ((7907, 7947), 'os.makedirs', 'os.makedirs', (['path_to_post'], {'exist_ok': '(True)'}), '(path_to_post, exist_ok=True)\n', (7918, 7947), False, 'import os\n'), ((2349, 2416), 'util.get_metrics_color', 'util.get_metrics_color', (['metric', "REPORT_JSON['data'][metric]['diff']"], {}), "(metric, REPORT_JSON['data'][metric]['diff'])\n", (2371, 2416), False, 'import util\n'), ((4747, 5070), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n <table class="table table-condensed" style="border-collapse:collapse;">\n <tbody>\n <td>Bus Factor</td>\n <td>Best: {bus_factor[\'best\']}</td>\n <td>Worst: {bus_factor[\'worst\']}</td>\n </tbody>\n </table>\n """'], {}), '(\n f"""\n <table class="table table-condensed" style="border-collapse:collapse;">\n <tbody>\n <td>Bus Factor</td>\n <td>Best: {bus_factor[\'best\']}</td>\n <td>Worst: {bus_factor[\'worst\']}</td>\n </tbody>\n </table>\n """\n )\n', (4762, 5070), False, 'import textwrap\n'), ((8187, 8211), 'os.listdir', 'os.listdir', (['path_to_post'], {}), '(path_to_post)\n', (8197, 8211), False, 'import os\n'), ((3086, 3115), 'util.get_metrics_name', 'util.get_metrics_name', (['metric'], {}), '(metric)\n', (3107, 3115), False, 'import util\n'), ((3836, 3858), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3855, 3858), False, 'import operator\n'), ((8322, 8358), 'os.path.join', 'os.path.join', (['path_to_post', 'filename'], {}), '(path_to_post, filename)\n', (8334, 8358), False, 'import os\n'), ((8386, 8422), 'os.path.join', 'os.path.join', (['path_to_post', 'filename'], {}), '(path_to_post, filename)\n', (8398, 8422), False, 'import os\n'), ((6535, 6564), 'util.get_metrics_name', 'util.get_metrics_name', (['metric'], {}), '(metric)\n', (6556, 6564), False, 'import util\n')]
|
import h5py
import numpy as np
fname = "/home/stark/.keras/models/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
dfname = 'vgg16_owl.hdf5'
f = h5py.File(fname, 'r')
data_file = h5py.File(dfname, 'w')
# conv nodes
k = 1
for i in range(1, 6): # 5 blocks in total
for j in range(1, 4):
# This is how the author of keras network want to name each node
if (j == 3 and (i == 1 or i == 2)): continue
node_name_origin = 'block' + str(i) + '_conv' + str(j)
conv_w = f[node_name_origin][node_name_origin + '_W_1:0'].value.tolist()
conv_b = f[node_name_origin][node_name_origin + '_b_1:0'].value.tolist()
node_name = 'conv2d_' + str(k)
k += 1
data_file.create_dataset(node_name + '_w', data=conv_w)
data_file.create_dataset(node_name + '_b', data=conv_b)
assert(k == 14)
# fc nodes
for i in range(1, 3):
node_name = 'fc' + str(i)
fc_w = f[node_name][node_name + '_W_1:0'].value.tolist()
fc_b = f[node_name][node_name + '_b_1:0'].value.tolist()
data_file.create_dataset(node_name + '_w', data=fc_w)
data_file.create_dataset(node_name + '_b', data=fc_b)
# prediction node
node_name = 'predictions'
p_w = f[node_name][node_name + '_W_1:0'].value.tolist()
p_b = f[node_name][node_name + '_b_1:0'].value.tolist()
data_file.create_dataset('fc3_w', data=p_w) # since the last node is also a fc node
data_file.create_dataset('fc3_b', data=p_b)
data_file.close()
f.close()
# Read file
# f = h5py.File(dfname)
|
[
"h5py.File"
] |
[((143, 164), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (152, 164), False, 'import h5py\n'), ((177, 199), 'h5py.File', 'h5py.File', (['dfname', '"""w"""'], {}), "(dfname, 'w')\n", (186, 199), False, 'import h5py\n')]
|
import shutil
import tempfile
import os
import networkx as nx
from .generate_output import *
from .isvalid import *
from .__init__ import __version__
def get_options():
import argparse
description = 'Generate multiple sequence alignments after running Panaroo'
parser = argparse.ArgumentParser(description=description,
prog='generate_panaroo_msa')
io_opts = parser.add_argument_group('Input/output')
io_opts.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of the Panaroo output directory",
type=lambda x: is_valid_folder(parser, x))
# alignment
core = parser.add_argument_group('Gene alignment')
core.add_argument(
"-a",
"--alignment",
dest="aln",
help=("Output alignments of core genes or all genes. Options are" +
" 'core' and 'pan'. Default: 'None'"),
type=str,
choices={'core', 'pan'},
default='core')
core.add_argument(
"--aligner",
dest="alr",
help=
"Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'",
type=str,
choices={'prank', 'clustal', 'mafft'},
default="mafft")
core.add_argument("--core_threshold",
dest="core",
help="Core-genome sample threshold (default=0.95)",
type=float,
default=0.95)
# Other options
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
parser.add_argument("--verbose",
dest="verbose",
help="print additional output",
action='store_true',
default=False)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
return (args)
def main():
args = get_options()
# make sure trailing forward slash is present
args.output_dir = os.path.join(args.output_dir, "")
# Create temporary directory
temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), "")
# Load isolate names
seen = set()
isolate_names = []
with open(args.output_dir + "gene_data.csv", 'r') as infile:
next(infile)
for line in infile:
iso = line.split(",")[0]
if iso not in seen:
isolate_names.append(iso)
seen.add(iso)
# Load graph
G = nx.read_gml(args.output_dir + "final_graph.gml")
#Write out core/pan-genome alignments
if args.aln == "pan":
if args.verbose: print("generating pan genome MSAs...")
generate_pan_genome_alignment(G, temp_dir, args.output_dir, args.n_cpu,
args.alr, isolate_names)
core_nodes = get_core_gene_nodes(G, args.core, len(isolate_names))
concatenate_core_genome_alignments(core_nodes, args.output_dir)
elif args.aln == "core":
if args.verbose: print("generating core genome MSAs...")
generate_core_genome_alignment(G, temp_dir, args.output_dir,
args.n_cpu, args.alr, isolate_names,
args.core, len(isolate_names))
# remove temporary directory
shutil.rmtree(temp_dir)
return
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"tempfile.mkdtemp",
"shutil.rmtree",
"networkx.read_gml",
"os.path.join"
] |
[((286, 363), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'prog': '"""generate_panaroo_msa"""'}), "(description=description, prog='generate_panaroo_msa')\n", (309, 363), False, 'import argparse\n'), ((2360, 2393), 'os.path.join', 'os.path.join', (['args.output_dir', '""""""'], {}), "(args.output_dir, '')\n", (2372, 2393), False, 'import os\n'), ((2846, 2894), 'networkx.read_gml', 'nx.read_gml', (["(args.output_dir + 'final_graph.gml')"], {}), "(args.output_dir + 'final_graph.gml')\n", (2857, 2894), True, 'import networkx as nx\n'), ((3665, 3688), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (3678, 3688), False, 'import shutil\n'), ((2456, 2493), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'args.output_dir'}), '(dir=args.output_dir)\n', (2472, 2493), False, 'import tempfile\n')]
|
# -*- coding: utf-8 -*-
#
# test_errors.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for error handling
"""
import unittest
import nest
@nest.check_stack
class ErrorTestCase(unittest.TestCase):
"""Tests if errors are handled correctly"""
def test_Raise(self):
"""Error raising"""
def raise_custom_exception(exc, msg):
raise exc(msg)
message = "test"
exception = nest.NESTError
self.assertRaisesRegex(
exception, message, raise_custom_exception, exception, message)
def test_StackUnderFlow(self):
"""Stack underflow"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "StackUnderflow", nest.sli_run, 'clear ;')
def test_DivisionByZero(self):
"""Division by zero"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "DivisionByZero", nest.sli_run, '1 0 div')
def test_UnknownNode(self):
"""Unknown node"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "UnknownNode", nest.Connect, (99, ), (99, ))
def test_UnknownModel(self):
"""Unknown model name"""
nest.ResetKernel()
self.assertRaisesRegex(
nest.NESTError, "UnknownModelName", nest.Create, -1)
def suite():
suite = unittest.makeSuite(ErrorTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
[
"unittest.makeSuite",
"unittest.TextTestRunner",
"nest.ResetKernel"
] |
[((2010, 2051), 'unittest.makeSuite', 'unittest.makeSuite', (['ErrorTestCase', '"""test"""'], {}), "(ErrorTestCase, 'test')\n", (2028, 2051), False, 'import unittest\n'), ((2095, 2131), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2118, 2131), False, 'import unittest\n'), ((1274, 1292), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (1290, 1292), False, 'import nest\n'), ((1473, 1491), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (1489, 1491), False, 'import nest\n'), ((1665, 1683), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (1681, 1683), False, 'import nest\n'), ((1866, 1884), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (1882, 1884), False, 'import nest\n')]
|
import pybullet_envs
from stable_baselines3 import SAC_GER
model = SAC_GER('MlpPolicy', 'MinitaurBulletEnv-v0', verbose=1, tensorboard_log="results/long_SAC_GER_MinitaurBullet/")
model.learn(total_timesteps=3000000)
|
[
"stable_baselines3.SAC_GER"
] |
[((68, 184), 'stable_baselines3.SAC_GER', 'SAC_GER', (['"""MlpPolicy"""', '"""MinitaurBulletEnv-v0"""'], {'verbose': '(1)', 'tensorboard_log': '"""results/long_SAC_GER_MinitaurBullet/"""'}), "('MlpPolicy', 'MinitaurBulletEnv-v0', verbose=1, tensorboard_log=\n 'results/long_SAC_GER_MinitaurBullet/')\n", (75, 184), False, 'from stable_baselines3 import SAC_GER\n')]
|