text stringlengths 957 885k |
|---|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
import responses
import json
from datetime import datetime
from comport.department.models import Department, Extractor
from comport.data.models import OfficerInvolvedShootingIMPD, UseOfForceIncidentIMPD, CitizenComplaintIMPD, AssaultOnOfficerIMPD
from testclient.JSON_test_client import JSONTestClient
from comport.data.cleaners import Cleaners
from flask import current_app
@pytest.mark.usefixtures('db')
class TestHeartbeat:
def test_reject_nonexistent_extractor_post(self, testapp):
''' An extractor login that doesn't exist is rejected.
'''
testapp.authorization = ('Basic', ('extractor', 'nonexistent'))
response = testapp.post("/data/heartbeat", expect_errors=True)
assert response.status_code == 401
assert response.text == 'No extractor with that username!'
def test_reject_extractor_post_with_wrong_password(self, testapp):
''' An extractor login with the wrong password is rejected.
'''
Extractor.create(username='extractor', email='<EMAIL>', password="password")
testapp.authorization = ('Basic', ('extractor', 'drowssap'))
response = testapp.post("/data/heartbeat", expect_errors=True)
assert response.status_code == 401
assert response.text == 'Extractor authorization failed!'
def test_successful_extractor_post(self, testapp):
''' Send a valid heartbeat post, get a valid response.
'''
# set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
extractor.update(email='<EMAIL>', next_month=10, next_year=2006)
# set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post a sample json object to the heartbeat URL
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# assert that we got the expected response
assert response.status_code == 200
assert response.json_body['nextMonth'] == 10
assert response.json_body['nextYear'] == 2006
assert response.json_body['received'] == {'heartbeat': 'heartbeat'}
def test_current_mmyy_on_no_setdate(self, testapp):
''' When there is no fixed date, it should send the current month and current year '''
# set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post a sample json object to the heartbeat URL
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# current month and year
now = datetime.now()
# assert that we got the expected response
assert response.status_code == 200
assert response.json_body['nextMonth'] == now.month
assert response.json_body['nextYear'] == now.year
assert response.json_body['received'] == {'heartbeat': 'heartbeat'}
@responses.activate
def test_extractor_post_triggers_slack_notification(self, testapp):
''' A valid heartbeat post triggers a Slack notification
'''
# set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# set a fake Slack webhook URL
fake_webhook_url = 'http://webhook.example.com/'
current_app.config['SLACK_WEBHOOK_URL'] = fake_webhook_url
# create a mock to receive POST requests to that URL
responses.add(responses.POST, fake_webhook_url, status=200)
# post a sample json object to the heartbeat URL
testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# test the captured post payload
post_body = json.loads(responses.calls[0].request.body)
assert 'Comport Pinged by Extractor!' in post_body['text']
# delete the fake Slack webhook URL
del(current_app.config['SLACK_WEBHOOK_URL'])
# reset the mock
responses.reset()
def test_post_assaults_data(self, testapp):
''' New assaults data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of assault descriptions from the JSON test client
test_client = JSONTestClient()
assault_count = 1
assault_data = test_client.get_prebaked_assaults(last=assault_count)
# post the json to the assault URL
response = testapp.post_json("/data/assaults", params={'month': 0, 'year': 0, 'data': assault_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == assault_count
# check the assault incident in the database against the data that was sent
cleaner = Cleaners()
sent_assault = cleaner.capitalize_incident(assault_data[0])
check_assault = AssaultOnOfficerIMPD.query.filter_by(opaque_id=sent_assault['opaqueId']).first()
assert check_assault.service_type == sent_assault['serviceType']
assert check_assault.force_type == sent_assault['forceType']
assert check_assault.assignment == sent_assault['assignment']
assert check_assault.arrest_made == sent_assault['arrestMade']
assert check_assault.officer_injured == sent_assault['officerInjured']
assert check_assault.officer_killed == sent_assault['officerKilled']
assert check_assault.report_filed == sent_assault['reportFiled']
def test_post_complaint_data(self, testapp):
''' New complaint data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_count = 1
complaint_data = test_client.get_prebaked_complaints(last=complaint_count)
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == complaint_count
# check the complaint incident in the database against the data that was sent
cleaner = Cleaners()
sent_complaint = cleaner.capitalize_incident(complaint_data[0])
check_complaint = CitizenComplaintIMPD.query.filter_by(opaque_id=sent_complaint['opaqueId']).first()
assert check_complaint.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_complaint['occuredDate']
assert check_complaint.division == sent_complaint['division']
assert check_complaint.precinct == sent_complaint['precinct']
assert check_complaint.shift == sent_complaint['shift']
assert check_complaint.beat == sent_complaint['beat']
assert check_complaint.disposition == sent_complaint['disposition']
assert check_complaint.service_type == sent_complaint['serviceType']
assert check_complaint.source == sent_complaint['source']
assert check_complaint.allegation_type == sent_complaint['allegationType']
assert check_complaint.allegation == sent_complaint['allegation']
assert check_complaint.resident_race == cleaner.race(sent_complaint['residentRace'])
assert check_complaint.resident_sex == cleaner.sex(sent_complaint['residentSex'])
assert check_complaint.resident_age == cleaner.number_to_string(sent_complaint['residentAge'])
assert check_complaint.officer_identifier == sent_complaint['officerIdentifier']
assert check_complaint.officer_race == cleaner.race(sent_complaint['officerRace'])
assert check_complaint.officer_sex == cleaner.sex(sent_complaint['officerSex'])
assert check_complaint.officer_age == cleaner.number_to_string(sent_complaint['officerAge'])
assert check_complaint.officer_years_of_service == cleaner.number_to_string(sent_complaint['officerYearsOfService'])
def test_correct_complaint_cap(self, testapp):
''' New complaint data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_count = 1
complaint_data = test_client.get_prebaked_complaints(last=complaint_count)
complaint_data[0]["allegation"] = "Rude, demeaning, or affronting language"
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == complaint_count
# check the complaint incident in the database against the data that was sent
cleaner = Cleaners()
sent_complaint = cleaner.capitalize_incident(complaint_data[0])
check_complaint = CitizenComplaintIMPD.query.filter_by(opaque_id=sent_complaint['opaqueId']).first()
assert check_complaint.allegation == "Rude, Demeaning, or Affronting Language"
def test_post_mistyped_complaint_data(self, testapp):
''' New complaint data from the extractor with wrongly typed data is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_count = 1
complaint_data = test_client.get_prebaked_complaints(last=complaint_count)
# The app expects number values to be transmitted as strings. Let's change them to integers.
complaint_data[0]['residentAge'] = 28
complaint_data[0]['officerAge'] = 46
complaint_data[0]['officerYearsOfService'] = 17
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == complaint_count
# check the complaint incident in the database against the data that was sent
cleaner = Cleaners()
sent_complaint = cleaner.capitalize_incident(complaint_data[0])
check_complaint = CitizenComplaintIMPD.query.filter_by(opaque_id=sent_complaint['opaqueId']).first()
assert check_complaint.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_complaint['occuredDate']
assert check_complaint.division == sent_complaint['division']
assert check_complaint.precinct == sent_complaint['precinct']
assert check_complaint.shift == sent_complaint['shift']
assert check_complaint.beat == sent_complaint['beat']
assert check_complaint.disposition == sent_complaint['disposition']
assert check_complaint.service_type == sent_complaint['serviceType']
assert check_complaint.source == sent_complaint['source']
assert check_complaint.allegation_type == sent_complaint['allegationType']
assert check_complaint.allegation == sent_complaint['allegation']
assert check_complaint.resident_race == cleaner.race(sent_complaint['residentRace'])
assert check_complaint.resident_sex == cleaner.sex(sent_complaint['residentSex'])
assert check_complaint.resident_age == cleaner.number_to_string(sent_complaint['residentAge'])
assert check_complaint.officer_identifier == sent_complaint['officerIdentifier']
assert check_complaint.officer_race == cleaner.race(sent_complaint['officerRace'])
assert check_complaint.officer_sex == cleaner.sex(sent_complaint['officerSex'])
assert check_complaint.officer_age == cleaner.number_to_string(sent_complaint['officerAge'])
assert check_complaint.officer_years_of_service == cleaner.number_to_string(sent_complaint['officerYearsOfService'])
def test_update_complaint_data(self, testapp):
''' Updated complaint data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_data = test_client.get_prebaked_complaints(last=1)
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked complaint
updated_complaint_data = test_client.get_prebaked_complaints(first=1, last=2)
# Swap in the opaque ID from the first complaint
updated_complaint_data[0]["opaqueId"] = complaint_data[0]["opaqueId"]
# The complaint won't be a match unless these fields are the same
updated_complaint_data[0]["allegationType"] = complaint_data[0]["allegationType"]
updated_complaint_data[0]["allegation"] = complaint_data[0]["allegation"]
updated_complaint_data[0]["officerIdentifier"] = complaint_data[0]["officerIdentifier"]
updated_complaint_data[0]["residentRace"] = complaint_data[0]["residentRace"]
updated_complaint_data[0]["residentSex"] = complaint_data[0]["residentSex"]
updated_complaint_data[0]["residentAge"] = complaint_data[0]["residentAge"]
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': updated_complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 1
assert response.json_body['added'] == 0
# There's only one complaint in the database.
all_complaints = CitizenComplaintIMPD.query.all()
assert len(all_complaints) == 1
# check the complaint incident in the database against the updated data that was sent
cleaner = Cleaners()
sent_complaint = cleaner.capitalize_incident(updated_complaint_data[0])
check_complaint = CitizenComplaintIMPD.query.filter_by(opaque_id=sent_complaint['opaqueId']).first()
assert check_complaint.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_complaint['occuredDate']
assert check_complaint.division == sent_complaint['division']
assert check_complaint.precinct == sent_complaint['precinct']
assert check_complaint.shift == sent_complaint['shift']
assert check_complaint.beat == sent_complaint['beat']
assert check_complaint.disposition == sent_complaint['disposition']
assert check_complaint.service_type == sent_complaint['serviceType']
assert check_complaint.source == sent_complaint['source']
assert check_complaint.allegation_type == sent_complaint['allegationType']
assert check_complaint.allegation == sent_complaint['allegation']
assert check_complaint.resident_race == cleaner.race(sent_complaint['residentRace'])
assert check_complaint.resident_sex == cleaner.sex(sent_complaint['residentSex'])
assert check_complaint.resident_age == cleaner.number_to_string(sent_complaint['residentAge'])
assert check_complaint.officer_identifier == sent_complaint['officerIdentifier']
assert check_complaint.officer_race == cleaner.race(sent_complaint['officerRace'])
assert check_complaint.officer_sex == cleaner.sex(sent_complaint['officerSex'])
assert check_complaint.officer_age == cleaner.number_to_string(sent_complaint['officerAge'])
assert check_complaint.officer_years_of_service == cleaner.number_to_string(sent_complaint['officerYearsOfService'])
def test_skip_multiple_complaint_data(self, testapp):
''' Multiple complaint data from the extractor is skipped.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_data = test_client.get_prebaked_complaints(last=1)
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked complaint
multiple_complaint_data = test_client.get_prebaked_complaints(first=1, last=2)
# Swap in the opaque ID from the first complaint
multiple_complaint_data[0]["opaqueId"] = complaint_data[0]["opaqueId"]
# The complaint will be skipped as a 'multiple' if these fields are the same
multiple_complaint_data[0]["allegationType"] = complaint_data[0]["allegationType"]
multiple_complaint_data[0]["allegation"] = complaint_data[0]["allegation"]
multiple_complaint_data[0]["officerIdentifier"] = complaint_data[0]["officerIdentifier"]
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': multiple_complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 0
# There is one complaint in the database.
all_complaints = CitizenComplaintIMPD.query.all()
assert len(all_complaints) == 1
def test_post_complaint_data_near_match_does_not_update(self, testapp):
''' Complaint data with the same ID but different details creates a new record.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of complaint descriptions from the JSON test client
test_client = JSONTestClient()
complaint_data = test_client.get_prebaked_complaints(last=1)
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked complaint
updated_complaint_data = test_client.get_prebaked_complaints(first=1, last=2)
# Swap in the opaque ID from the first complaint
updated_complaint_data[0]["opaqueId"] = complaint_data[0]["opaqueId"]
# post the json to the complaint URL
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': updated_complaint_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# There are two complaints in the database.
all_complaints = CitizenComplaintIMPD.query.all()
assert len(all_complaints) == 2
def test_post_uof_data(self, testapp):
''' New UOF data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of UOF descriptions from the JSON test client
test_client = JSONTestClient()
uof_count = 1
uof_data = test_client.get_prebaked_uof(last=uof_count)
# post the json to the UOF URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == uof_count
# check the uof incident in the database against the data that was sent
cleaner = Cleaners()
sent_uof = uof_data[0]
check_uof = UseOfForceIncidentIMPD.query.filter_by(opaque_id=sent_uof['opaqueId']).first()
assert check_uof.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_uof['occuredDate']
assert check_uof.division == cleaner.capitalize(sent_uof['division'])
assert check_uof.precinct == cleaner.capitalize(sent_uof['precinct'])
assert check_uof.shift == cleaner.capitalize(sent_uof['shift'])
assert check_uof.beat == cleaner.capitalize(sent_uof['beat'])
assert check_uof.disposition == sent_uof['disposition']
assert check_uof.officer_force_type == cleaner.officer_force_type(sent_uof['officerForceType'])
assert check_uof.use_of_force_reason == sent_uof['useOfForceReason']
assert check_uof.service_type == sent_uof['serviceType']
assert check_uof.arrest_made == sent_uof['arrestMade']
assert check_uof.arrest_charges == sent_uof['arrestCharges']
assert check_uof.resident_weapon_used == sent_uof['residentWeaponUsed']
assert check_uof.resident_injured == sent_uof['residentInjured']
assert check_uof.resident_hospitalized == sent_uof['residentHospitalized']
assert check_uof.officer_injured == sent_uof['officerInjured']
assert check_uof.officer_hospitalized == sent_uof['officerHospitalized']
assert check_uof.resident_race == cleaner.race(sent_uof['residentRace'])
assert check_uof.resident_sex == cleaner.sex(sent_uof['residentSex'])
assert check_uof.resident_age == cleaner.number_to_string(sent_uof['residentAge'])
assert check_uof.resident_condition == sent_uof['residentCondition']
assert check_uof.officer_identifier == sent_uof['officerIdentifier']
assert check_uof.officer_race == cleaner.race(sent_uof['officerRace'])
assert check_uof.officer_sex == cleaner.sex(sent_uof['officerSex'])
assert check_uof.officer_age == cleaner.number_to_string(sent_uof['officerAge'])
assert check_uof.officer_years_of_service == cleaner.number_to_string(sent_uof['officerYearsOfService'])
assert check_uof.officer_condition == sent_uof['officerCondition']
def test_post_mistyped_uof_data(self, testapp):
''' New UOF data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of UOF descriptions from the JSON test client
test_client = JSONTestClient()
uof_count = 1
uof_data = test_client.get_prebaked_uof(last=uof_count)
# The app expects number values to be transmitted as strings. Let's change them to integers.
uof_data[0]['residentAge'] = 28
uof_data[0]['officerAge'] = 46
uof_data[0]['officerYearsOfService'] = 17
# post the json to the UOF URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == uof_count
# check the uof incident in the database against the data that was sent
cleaner = Cleaners()
sent_uof = uof_data[0]
check_uof = UseOfForceIncidentIMPD.query.filter_by(opaque_id=sent_uof['opaqueId']).first()
assert check_uof.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_uof['occuredDate']
assert check_uof.division == cleaner.capitalize(sent_uof['division'])
assert check_uof.precinct == cleaner.capitalize(sent_uof['precinct'])
assert check_uof.shift == cleaner.capitalize(sent_uof['shift'])
assert check_uof.beat == cleaner.capitalize(sent_uof['beat'])
assert check_uof.disposition == sent_uof['disposition']
assert check_uof.officer_force_type == cleaner.officer_force_type(sent_uof['officerForceType'])
assert check_uof.use_of_force_reason == sent_uof['useOfForceReason']
assert check_uof.service_type == sent_uof['serviceType']
assert check_uof.arrest_made == sent_uof['arrestMade']
assert check_uof.arrest_charges == sent_uof['arrestCharges']
assert check_uof.resident_weapon_used == sent_uof['residentWeaponUsed']
assert check_uof.resident_injured == sent_uof['residentInjured']
assert check_uof.resident_hospitalized == sent_uof['residentHospitalized']
assert check_uof.officer_injured == sent_uof['officerInjured']
assert check_uof.officer_hospitalized == sent_uof['officerHospitalized']
assert check_uof.resident_race == cleaner.race(sent_uof['residentRace'])
assert check_uof.resident_sex == cleaner.sex(sent_uof['residentSex'])
assert check_uof.resident_age == cleaner.number_to_string(sent_uof['residentAge'])
assert check_uof.resident_condition == sent_uof['residentCondition']
assert check_uof.officer_identifier == sent_uof['officerIdentifier']
assert check_uof.officer_race == cleaner.race(sent_uof['officerRace'])
assert check_uof.officer_sex == cleaner.sex(sent_uof['officerSex'])
assert check_uof.officer_age == cleaner.number_to_string(sent_uof['officerAge'])
assert check_uof.officer_years_of_service == cleaner.number_to_string(sent_uof['officerYearsOfService'])
assert check_uof.officer_condition == sent_uof['officerCondition']
def test_update_uof_data(self, testapp):
''' Updated UOF data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of UOF descriptions from the JSON test client
test_client = JSONTestClient()
uof_data = test_client.get_prebaked_uof(last=1)
# post the json to the UOF URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked uof incident
updated_uof_data = test_client.get_prebaked_uof(first=1, last=2)
# Swap in the opaque ID from the first uof incident
updated_uof_data[0]["opaqueId"] = uof_data[0]["opaqueId"]
# The uof incident won't be a match unless these fields are the same
updated_uof_data[0]["officerIdentifier"] = uof_data[0]["officerIdentifier"]
updated_uof_data[0]["officerForceType"] = uof_data[0]["officerForceType"]
# post the json to the uof URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': updated_uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 1
assert response.json_body['added'] == 0
# There's only one complaint in the database.
all_uof = UseOfForceIncidentIMPD.query.all()
assert len(all_uof) == 1
# check the uof incident in the database against the updated data that was sent
cleaner = Cleaners()
sent_uof = updated_uof_data[0]
check_uof = UseOfForceIncidentIMPD.query.filter_by(opaque_id=sent_uof['opaqueId']).first()
assert check_uof.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_uof['occuredDate']
assert check_uof.division == cleaner.capitalize(sent_uof['division'])
assert check_uof.precinct == cleaner.capitalize(sent_uof['precinct'])
assert check_uof.shift == cleaner.capitalize(sent_uof['shift'])
assert check_uof.beat == cleaner.capitalize(sent_uof['beat'])
assert check_uof.disposition == sent_uof['disposition']
assert check_uof.officer_force_type == cleaner.officer_force_type(sent_uof['officerForceType'])
assert check_uof.use_of_force_reason == sent_uof['useOfForceReason']
assert check_uof.service_type == sent_uof['serviceType']
assert check_uof.arrest_made == sent_uof['arrestMade']
assert check_uof.arrest_charges == sent_uof['arrestCharges']
assert check_uof.resident_weapon_used == sent_uof['residentWeaponUsed']
assert check_uof.resident_injured == sent_uof['residentInjured']
assert check_uof.resident_hospitalized == sent_uof['residentHospitalized']
assert check_uof.officer_injured == sent_uof['officerInjured']
assert check_uof.officer_hospitalized == sent_uof['officerHospitalized']
assert check_uof.resident_race == cleaner.race(sent_uof['residentRace'])
assert check_uof.resident_sex == cleaner.sex(sent_uof['residentSex'])
assert check_uof.resident_age == cleaner.number_to_string(sent_uof['residentAge'])
assert check_uof.resident_condition == sent_uof['residentCondition']
assert check_uof.officer_identifier == sent_uof['officerIdentifier']
assert check_uof.officer_race == cleaner.race(sent_uof['officerRace'])
assert check_uof.officer_sex == cleaner.sex(sent_uof['officerSex'])
assert check_uof.officer_age == cleaner.number_to_string(sent_uof['officerAge'])
assert check_uof.officer_years_of_service == cleaner.number_to_string(sent_uof['officerYearsOfService'])
assert check_uof.officer_condition == sent_uof['officerCondition']
def test_post_uof_data_near_match_does_not_update(self, testapp):
''' UOF data with the same ID but different details creates a new record.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of UOF descriptions from the JSON test client
test_client = JSONTestClient()
uof_data = test_client.get_prebaked_uof(last=1)
# post the json to the UOF URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked uof incident
updated_uof_data = test_client.get_prebaked_uof(first=1, last=2)
# Swap in the opaque ID from the first uof incident
updated_uof_data[0]["opaqueId"] = uof_data[0]["opaqueId"]
# post the json to the uof URL
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': updated_uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# There's only one complaint in the database.
all_uof = UseOfForceIncidentIMPD.query.all()
assert len(all_uof) == 2
def test_post_ois_data(self, testapp):
''' New OIS data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of OIS descriptions from the JSON test client
test_client = JSONTestClient()
ois_count = 1
ois_data = test_client.get_prebaked_ois(last=ois_count)
# post the json to the OIS URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == ois_count
# check the ois incident in the database against the data that was sent
cleaner = Cleaners()
sent_ois = ois_data[0]
check_ois = OfficerInvolvedShootingIMPD.query.filter_by(opaque_id=sent_ois['opaqueId']).first()
assert check_ois.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_ois['occuredDate']
assert check_ois.division == cleaner.capitalize(sent_ois['division'])
assert check_ois.precinct == cleaner.capitalize(sent_ois['precinct'])
assert check_ois.shift == cleaner.capitalize(sent_ois['shift'])
assert check_ois.beat == cleaner.capitalize(sent_ois['beat'])
assert check_ois.disposition == sent_ois['disposition']
assert check_ois.resident_race == cleaner.race(sent_ois['residentRace'])
assert check_ois.resident_sex == cleaner.sex(sent_ois['residentSex'])
assert check_ois.resident_age == cleaner.number_to_string(sent_ois['residentAge'])
assert check_ois.resident_weapon_used == cleaner.resident_weapon_used(sent_ois['residentWeaponUsed'])
assert check_ois.resident_condition == sent_ois['residentCondition']
assert check_ois.officer_identifier == sent_ois['officerIdentifier']
assert check_ois.officer_weapon_used == sent_ois['officerForceType']
assert check_ois.officer_race == cleaner.race(sent_ois['officerRace'])
assert check_ois.officer_sex == cleaner.sex(sent_ois['officerSex'])
assert check_ois.officer_age == cleaner.number_to_string(sent_ois['officerAge'])
assert check_ois.officer_years_of_service == cleaner.string_to_integer(sent_ois['officerYearsOfService'])
assert check_ois.officer_condition == sent_ois['officerCondition']
def test_post_mistyped_ois_data(self, testapp):
''' New OIS data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of OIS descriptions from the JSON test client
test_client = JSONTestClient()
ois_count = 1
ois_data = test_client.get_prebaked_ois(last=ois_count)
# The app expects number values to be transmitted as strings. Let's change them to integers.
ois_data[0]['residentAge'] = 28
ois_data[0]['officerAge'] = 46
# And it expects this number value to be transmitted as a number, so let's make it a string.
ois_data[0]['officerYearsOfService'] = "17"
# post the json to the OIS URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == ois_count
# check the ois incident in the database against the data that was sent
cleaner = Cleaners()
sent_ois = ois_data[0]
check_ois = OfficerInvolvedShootingIMPD.query.filter_by(opaque_id=sent_ois['opaqueId']).first()
assert check_ois.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_ois['occuredDate']
assert check_ois.division == cleaner.capitalize(sent_ois['division'])
assert check_ois.precinct == cleaner.capitalize(sent_ois['precinct'])
assert check_ois.shift == cleaner.capitalize(sent_ois['shift'])
assert check_ois.beat == cleaner.capitalize(sent_ois['beat'])
assert check_ois.disposition == sent_ois['disposition']
assert check_ois.resident_race == cleaner.race(sent_ois['residentRace'])
assert check_ois.resident_sex == cleaner.sex(sent_ois['residentSex'])
assert check_ois.resident_age == cleaner.number_to_string(sent_ois['residentAge'])
assert check_ois.resident_weapon_used == cleaner.resident_weapon_used(sent_ois['residentWeaponUsed'])
assert check_ois.resident_condition == sent_ois['residentCondition']
assert check_ois.officer_identifier == sent_ois['officerIdentifier']
assert check_ois.officer_weapon_used == sent_ois['officerForceType']
assert check_ois.officer_race == cleaner.race(sent_ois['officerRace'])
assert check_ois.officer_sex == cleaner.sex(sent_ois['officerSex'])
assert check_ois.officer_age == cleaner.number_to_string(sent_ois['officerAge'])
assert check_ois.officer_years_of_service == cleaner.string_to_integer(sent_ois['officerYearsOfService'])
assert check_ois.officer_condition == sent_ois['officerCondition']
def test_update_ois_data(self, testapp):
''' Updated OIS data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of OIS descriptions from the JSON test client
test_client = JSONTestClient()
ois_data = test_client.get_prebaked_ois(last=1)
# post the json to the OIS URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked ois incident
updated_ois_data = test_client.get_prebaked_ois(first=1, last=2)
# Swap in the opaque ID from the first ois incident
updated_ois_data[0]["opaqueId"] = ois_data[0]["opaqueId"]
# The ois incident won't be a match unless this field is the same
updated_ois_data[0]["officerIdentifier"] = ois_data[0]["officerIdentifier"]
# post the json to the ois URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': updated_ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 1
assert response.json_body['added'] == 0
# There's only one complaint in the database.
all_ois = OfficerInvolvedShootingIMPD.query.all()
assert len(all_ois) == 1
# check the ois incident in the database against the updated data that was sent
cleaner = Cleaners()
sent_ois = updated_ois_data[0]
check_ois = OfficerInvolvedShootingIMPD.query.filter_by(opaque_id=sent_ois['opaqueId']).first()
assert check_ois.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_ois['occuredDate']
assert check_ois.division == cleaner.capitalize(sent_ois['division'])
assert check_ois.precinct == cleaner.capitalize(sent_ois['precinct'])
assert check_ois.shift == cleaner.capitalize(sent_ois['shift'])
assert check_ois.beat == cleaner.capitalize(sent_ois['beat'])
assert check_ois.disposition == sent_ois['disposition']
assert check_ois.resident_race == cleaner.race(sent_ois['residentRace'])
assert check_ois.resident_sex == cleaner.sex(sent_ois['residentSex'])
assert check_ois.resident_age == cleaner.number_to_string(sent_ois['residentAge'])
assert check_ois.resident_weapon_used == cleaner.resident_weapon_used(sent_ois['residentWeaponUsed'])
assert check_ois.resident_condition == sent_ois['residentCondition']
assert check_ois.officer_identifier == sent_ois['officerIdentifier']
assert check_ois.officer_weapon_used == sent_ois['officerForceType']
assert check_ois.officer_race == cleaner.race(sent_ois['officerRace'])
assert check_ois.officer_sex == cleaner.sex(sent_ois['officerSex'])
assert check_ois.officer_age == cleaner.number_to_string(sent_ois['officerAge'])
assert check_ois.officer_years_of_service == cleaner.string_to_integer(sent_ois['officerYearsOfService'])
assert check_ois.officer_condition == sent_ois['officerCondition']
def test_post_ois_data_near_match_does_not_update(self, testapp):
''' OIS data with the same ID but different details creates a new record.
'''
# Set up the extractor
department = Department.create(name="IM Police Department", short_name="IMPD", load_defaults=False)
extractor, envs = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# Get a generated list of OIS descriptions from the JSON test client
test_client = JSONTestClient()
ois_data = test_client.get_prebaked_ois(last=1)
# post the json to the OIS URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# Get the second pre-baked ois incident
updated_ois_data = test_client.get_prebaked_ois(first=1, last=2)
# Swap in the opaque ID from the first ois incident
updated_ois_data[0]["opaqueId"] = ois_data[0]["opaqueId"]
# post the json to the ois URL
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': updated_ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
assert response.json_body['updated'] == 0
assert response.json_body['added'] == 1
# There's only one complaint in the database.
all_ois = OfficerInvolvedShootingIMPD.query.all()
assert len(all_ois) == 2
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
/***************************************************************************
iso4app
A QGIS plugin
iso4app nuovo path
-------------------
begin : 2018-02-07
git sha : $Format:%H$
copyright : (C) 2018 by <NAME>
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from __future__ import absolute_import
from time import sleep
from builtins import str
from builtins import range
from builtins import object
import os
import sys
import tempfile
import gettext
from . import resources
import datetime
from time import sleep
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QAction,QMenu,QApplication,QTableWidget,QTableWidgetItem,QProgressBar,QProgressDialog
from qgis.core import *
from qgis.utils import *
from qgis.gui import *
from .iso4app_dialog import iso4appDialog
from .iso4appApi import isoline
from .iso4appApi import massiveIsoline
from .iso4appService import iso4CallService
from . import iso4app_dialog
from . import iso4app_massive_dialog
class MainPlugin(object):
def __init__(self, iface):
self.iface = iface
self.dlg=iso4app_dialog.iso4appDialog()
self.massiveDlg=iso4app_massive_dialog.iso4appMassiveDialog()
self.toolbar = self.iface.addToolBar(u'wfsOutputExtension')
self.canvas=iface.mapCanvas()
self.isoTool = QgsMapToolEmitPoint(self.canvas)
self.selectedLayer=None
self.isoDescr=''
self.stopRunning=0
self.timeStampLastMassiveRunning=datetime.datetime.now()
def name(self):
return "Iso4App"
def initGui(self):
QgsMessageLog.logMessage('initGui start', 'iso4app')
self.isoline = isoline(self.iface,self.dlg)
self.massiveIsoline = massiveIsoline(self.iface,self.massiveDlg)
self.requestAK = QAction("Request Api Key",self.iface.mainWindow())
self.requestAK.triggered.connect(self.clickRequestApiKey)
self.creditsAndPlans = QAction("Credits and Plans",self.iface.mainWindow())
self.creditsAndPlans.triggered.connect(self.clickCreditsAndPlans)
icon_path = ':/plugins/iso4app/icon.png'
self.action = QAction(QIcon(":/plugins/iso4app/icon.png"), "Iso4App", self.iface.mainWindow())
self.action.triggered.connect(self.run)
result = self.isoTool.canvasClicked.connect(self.place_iso)
self.iface.addToolBarIcon(self.action)
#connessione menù alla azione isoline
self.menu=QMenu("Iso4App")
self.menu.addActions([self.isoline])
self.menu.addActions([self.massiveIsoline])
menuBar = self.iface.mainWindow().menuBar()
menuBar.addMenu(self.menu)
self.menu.addSeparator()
self.menu.addAction(self.requestAK)
self.menu.addAction(self.creditsAndPlans)
self.isoline.triggered.connect(self.clickParameters)
self.massiveIsoline.triggered.connect(self.clickMassiveIsolines)
self.dlg.radioButtonIsochrone.toggled.connect(self.eventRbIsocrone)
self.dlg.radioButtonIsodistance.toggled.connect(self.eventRbIsodistance)
self.dlg.radioButtonIsodistanceAir.toggled.connect(self.eventRbIsodistanceAir)
self.dlg.comboTravelType.currentIndexChanged.connect(self.eventCbTravelType)
self.dlg.comboSpeedType.currentIndexChanged.connect(self.eventCbSpeedType)
self.dlg.button_box.clicked.connect(self.eventOkButton)
self.dlg.radioButtonPolygon.toggled.connect(self.eventRbPolygon)
self.dlg.radioButtonStreetNetwork.toggled.connect(self.eventRbStreetNetwork)
self.dlg.lnkAvailableCountries.clicked.connect(self.eventLnkAvailableCountries)
#MASSIVE
self.massiveDlg.pushButtonClose.clicked.connect(self.eventButtonCloseMassive)
self.massiveDlg.comboBoxLayers.currentIndexChanged.connect(self.eventCbBoxLayers)
self.massiveDlg.comboBoxAttributeAsDistance.currentIndexChanged.connect(self.eventCbBoxAttributesAsDistance)
self.massiveDlg.comboBoxAttributes.currentIndexChanged.connect(self.eventCbBoxAttributes)
self.massiveDlg.pushButtonCalculate.pressed.connect(self.disableButtonGroup)
self.massiveDlg.pushButtonCalculate.released.connect(self.calculate_massive_isolines)
self.dlg.comboTravelType.addItem('Motor vehicle')
self.dlg.comboTravelType.addItem('Bicycle')
self.dlg.comboTravelType.addItem('Pedestrian')
#
self.dlg.comboApprox.addItem('50 m')
self.dlg.comboApprox.addItem('100 m')
self.dlg.comboApprox.addItem('200 m')
self.dlg.comboApprox.addItem('300 m')
self.dlg.comboApprox.addItem('400 m')
self.dlg.comboApprox.addItem('500 m')
self.dlg.comboApprox.addItem('600 m')
self.dlg.comboApprox.addItem('700 m')
self.dlg.comboApprox.addItem('800 m')
self.dlg.comboApprox.addItem('900 m')
self.dlg.comboApprox.addItem('1000 m')
#
self.dlg.comboConcavity.addItem('0 (convex)')
self.dlg.comboConcavity.addItem('1 ')
self.dlg.comboConcavity.addItem('2 ')
self.dlg.comboConcavity.addItem('3 ')
self.dlg.comboConcavity.addItem('4 ')
self.dlg.comboConcavity.addItem('5 ')
self.dlg.comboConcavity.addItem('6 ')
self.dlg.comboConcavity.addItem('7 ')
self.dlg.comboConcavity.addItem('8 ')
self.dlg.comboBuffering.addItem('0')
self.dlg.comboBuffering.addItem('1')
self.dlg.comboBuffering.addItem('2')
self.dlg.comboBuffering.addItem('3')
self.dlg.comboBuffering.addItem('4')
self.dlg.comboBuffering.addItem('5')
self.dlg.comboBuffering.addItem('6')
self.dlg.comboBuffering.addItem('7')
self.dlg.comboBuffering.addItem('8')
self.dlg.comboSpeedType.addItem('Very low')
self.dlg.comboSpeedType.addItem('Low')
self.dlg.comboSpeedType.addItem('Normal')
self.dlg.comboSpeedType.addItem('Fast')
self.dlg.comboSeconds.addItem(repr(1)+' min')
for min in range(2,301):
self.dlg.comboSeconds.addItem(repr(min)+' mins')
for meters in range(50,1000):
remM=meters % 10
if remM == 0:
self.dlg.comboMeters.addItem(repr(meters)+' meters')
for km in range(1,51):
self.dlg.comboMeters.addItem(repr(km)+' km')
for km in range(55,101):
remK=km % 5
if remK == 0:
self.dlg.comboMeters.addItem(repr(km)+' km')
for km in range(101,501):
remK=km % 25
if remK == 0:
self.dlg.comboMeters.addItem(repr(km)+' km')
s = QSettings()
apiKey=s.value("iso4app/apy-key", "<KEY>")
if apiKey=='':
apiKey='<KEY>'
self.dlg.lineApiKey.setText(apiKey)
rbIsochrone=s.value("iso4app/rbIsochrone", True)
rbIsodistanceType=s.value("iso4app/rbIsodistanceType", "BYROAD")
if rbIsochrone:
self.dlg.radioButtonIsochrone.setChecked(True)
self.dlg.radioButtonIsodistance.setChecked(False)
self.dlg.comboMeters.setEnabled(False)
self.dlg.comboSeconds.setEnabled(True)
else:
if rbIsodistanceType=='BYROAD':
self.dlg.radioButtonIsodistance.setChecked(True)
self.dlg.radioButtonIsodistanceAir.setChecked(False)
QgsMessageLog.logMessage('BYROAD!!!!!!!!!!!!!!!!:'+str(self.dlg.radioButtonIsodistance.isChecked()), 'iso4app')
else:
self.dlg.radioButtonIsodistance.setChecked(False)
self.dlg.radioButtonIsodistanceAir.setChecked(True)
self.dlg.radioButtonIsochrone.setChecked(False)
self.dlg.comboMeters.setEnabled(True)
self.dlg.comboSeconds.setEnabled(False)
rbPolygon=s.value("iso4app/rbPolygon", True)
if rbPolygon:
self.dlg.radioButtonPolygon.setChecked(True)
self.dlg.radioButtonStreetNetwork.setChecked(False)
self.dlg.chkPopulation.setEnabled(True)
else:
self.dlg.radioButtonStreetNetwork.setChecked(True)
self.dlg.radioButtonPolygon.setChecked(False)
self.dlg.chkPopulation.setEnabled(False)
comboMeters=s.value("iso4app/comboMeters", 104)
comboSeconds=s.value("iso4app/comboSeconds", 9)
comboApprox=s.value("iso4app/comboApprox", 2)
comboConcavity=s.value("iso4app/comboConcavity", 5)
comboBuffering=s.value("iso4app/comboBuffering", 0)
comboSpeedType=s.value("iso4app/comboSpeedType", 2)
comboTravelType=s.value("iso4app/comboTravelType", 0)
self.dlg.comboMeters.setCurrentIndex(int(comboMeters))
self.dlg.comboSeconds.setCurrentIndex(int(comboSeconds))
self.dlg.comboApprox.setCurrentIndex(int(comboApprox))
self.dlg.comboConcavity.setCurrentIndex(int(comboConcavity))
self.dlg.comboBuffering.setCurrentIndex(int(comboBuffering))
self.dlg.comboSpeedType.setCurrentIndex(int(comboSpeedType))
self.dlg.comboTravelType.setCurrentIndex(int(comboTravelType))
checkBoxAllowBikeOnPedestrian=s.value("iso4app/checkBoxAllowBikeOnPedestrian", True)
if checkBoxAllowBikeOnPedestrian:
self.dlg.checkBoxAllowBikeOnPedestrian.setChecked(True)
else:
self.dlg.checkBoxAllowBikeOnPedestrian.setChecked(False)
checkBoxAllowPedBikeOnTrunk=s.value("iso4app/checkBoxAllowPedBikeOnTrunk", True)
if checkBoxAllowPedBikeOnTrunk:
self.dlg.checkBoxAllowPedBikeOnTrunk.setChecked(True)
else:
self.dlg.checkBoxAllowPedBikeOnTrunk.setChecked(False)
checkBoxAvoidTolls=s.value("iso4app/checkBoxAvoidTolls", True)
if checkBoxAvoidTolls:
self.dlg.checkBoxAvoidTolls.setChecked(True)
else:
self.dlg.checkBoxAvoidTolls.setChecked(False)
checkBoxRestrictedArea=s.value("iso4app/checkBoxRestrictedArea", True)
if checkBoxRestrictedArea:
self.dlg.checkBoxRestrictedArea.setChecked(True)
else:
self.dlg.checkBoxRestrictedArea.setChecked(False)
checkBoxReduceQueueTime=s.value("iso4app/checkBoxReduceQueueTime", True)
if checkBoxReduceQueueTime:
self.dlg.checkBoxReduceQueueTime.setChecked(True)
else:
self.dlg.checkBoxReduceQueueTime.setChecked(False)
checkBoxFastestRoute=s.value("iso4app/checkBoxFastestRoute", False)
if checkBoxFastestRoute:
self.dlg.checkBoxFastestRoute.setChecked(True)
else:
self.dlg.checkBoxFastestRoute.setChecked(False)
checkPopulation=s.value("iso4app/chkPopulation", False)
if isinstance(checkPopulation, bool):
if checkPopulation:
self.dlg.chkPopulation.setChecked(True)
else:
self.dlg.chkPopulation.setChecked(False)
else:
if checkPopulation=="true":
self.dlg.chkPopulation.setChecked(True)
else:
self.dlg.chkPopulation.setChecked(False)
checkBoxLogging=s.value("iso4app/checkBoxLogging", False)
if checkBoxLogging:
self.dlg.checkBoxLogging.setChecked(True)
else:
self.dlg.checkBoxLogging.setChecked(False)
if rbIsochrone:
self.dlg.checkBoxFastestRoute.setEnabled(False)
else:
idx=self.dlg.comboTravelType.currentIndex()
if idx==0:
self.dlg.checkBoxFastestRoute.setEnabled(True)
else:
self.dlg.checkBoxFastestRoute.setEnabled(False)
if self.dlg.checkBoxLogging.isChecked():
QgsMessageLog.logMessage('apiKey:'+apiKey, 'iso4app')
QgsMessageLog.logMessage('rbIsochrone:'+str(self.dlg.radioButtonIsochrone.isChecked()), 'iso4app')
QgsMessageLog.logMessage('rbIsodistanceAir:'+str(self.dlg.radioButtonIsodistanceAir.isChecked()), 'iso4app')
QgsMessageLog.logMessage('radioButtonIsodistance:'+str(self.dlg.radioButtonIsodistance.isChecked()), 'iso4app')
QgsMessageLog.logMessage('comboMeters:'+repr(comboMeters), 'iso4app')
QgsMessageLog.logMessage('comboSeconds:'+repr(comboSeconds), 'iso4app')
QgsMessageLog.logMessage('comboApprox:'+repr(comboApprox), 'iso4app')
QgsMessageLog.logMessage('comboConcavity:'+repr(comboConcavity), 'iso4app')
QgsMessageLog.logMessage('comboBuffering:'+repr(comboBuffering), 'iso4app')
QgsMessageLog.logMessage('comboSpeedType:'+repr(comboSpeedType), 'iso4app')
QgsMessageLog.logMessage('comboTravelType:'+repr(comboTravelType), 'iso4app')
QgsMessageLog.logMessage('checkBoxAllowBikeOnPedestrian:'+str(checkBoxAllowBikeOnPedestrian), 'iso4app')
QgsMessageLog.logMessage('checkBoxAllowPedBikeOnTrunk:'+str(checkBoxAllowPedBikeOnTrunk), 'iso4app')
QgsMessageLog.logMessage('checkBoxAvoidTolls:'+str(checkBoxAvoidTolls), 'iso4app')
QgsMessageLog.logMessage('checkBoxRestrictedArea:'+str(checkBoxRestrictedArea), 'iso4app')
QgsMessageLog.logMessage('checkBoxFastestRoute:'+str(checkBoxFastestRoute), 'iso4app')
QgsMessageLog.logMessage('checkBoxReduceQueueTime:'+str(checkBoxReduceQueueTime), 'iso4app')
QgsMessageLog.logMessage('chkPopulation:'+str(checkPopulation), 'iso4app')
QgsMessageLog.logMessage('rbStreetNetwork:'+str(self.dlg.radioButtonStreetNetwork.isChecked()), 'iso4app')
QgsMessageLog.logMessage('rbPolygon:'+str(self.dlg.radioButtonPolygon.isChecked()), 'iso4app')
QgsMessageLog.logMessage('initGui end', 'iso4app')
def clickRequestApiKey(self):
QDesktopServices.openUrl(QUrl('http://www.iso4app.com#getapikey'))
def clickCreditsAndPlans(self):
QDesktopServices.openUrl(QUrl('http://www.iso4app.com#creditusage'))
def place_iso(self, pointTriggered, button):
try:
epsgCodeInput=self.canvas.mapSettings().destinationCrs().authid()
epsgCodeCanvas=self.canvas.mapSettings().destinationCrs().authid()
except:
epsgCodeInput=self.canvas.mapRenderer().destinationCrs().authid()
epsgCodeCanvas=self.canvas.mapRenderer().destinationCrs().authid()
layernamePoly='tmp polygn layer'
layernamePin='tmp point layer'
if self.dlg.radioButtonStreetNetwork.isChecked()==True:
vlyrPoly = QgsVectorLayer("multilinestring?crs="+epsgCodeCanvas, layernamePoly, "memory")
if self.dlg.radioButtonPolygon.isChecked()==True:
vlyrPoly = QgsVectorLayer("Polygon?crs="+epsgCodeCanvas, layernamePoly, "memory")
vlyrPin = QgsVectorLayer("Point?crs="+epsgCodeCanvas+"&field=id:integer&field=description:string(120)&field=x:double&field=y:double&index=yes",layernamePin,"memory")
QApplication.setOverrideCursor(Qt.WaitCursor)
instancei4a=None
try:
instancei4a=iso4CallService(self.iface,self.canvas,self.dlg,pointTriggered,epsgCodeInput,epsgCodeCanvas,vlyrPin,vlyrPoly,'','',None)
vlyrPoly.setName(instancei4a.layernamePoly)
vlyrPin.setName(instancei4a.layernamePin)
vlyrPoly.setOpacity(0.5)
if self.dlg.radioButtonStreetNetwork.isChecked()==True:
renderer=vlyrPoly.renderer()
rendererPin=vlyrPin.renderer()
pinColor=rendererPin.symbol().color().name()
symbolTmp=QgsLineSymbol.createSimple({'name':'LINE SYMBOL','width':'1', 'color':pinColor})
renderer.setSymbol(symbolTmp)
QgsProject.instance().addMapLayers([vlyrPin,vlyrPoly])
except Exception as inst:
QgsMessageLog.logMessage('Error:'+str(inst), 'iso4app')
QApplication.restoreOverrideCursor()
self.canvas.refresh()
return None
def clickParameters(self):
self.dlg.exec_()
def clickMassiveIsolines(self):
#logica
rbIsodistanceAir=self.dlg.radioButtonIsodistanceAir.isChecked()
if rbIsodistanceAir:
isoDescr="ISODISTANCE BY AIR "
isoDescr += self.dlg.comboMeters.currentText()
else:
isoDescr=getParamDescription(self)
self.massiveDlg.labelIsolineDescription.setText(isoDescr)
self.isoDescr=isoDescr
layersNames = []
self.massiveDlg.comboBoxLayers.clear()
self.massiveDlg.tableWidgetPoints.clear()
self.massiveDlg.tableWidgetPoints.setRowCount(0)
self.massiveDlg.comboBoxLayers.addItem('Select a layer...')
mapLayers=QgsProject.instance().mapLayers()
#for i in list(QgsProject.instance().mapLayers().values()):
for i in mapLayers.values():
#lName=i.name().encode('utf-8')
lName=i.name()
layersNames.append(lName)
#QgsMessageLog.logMessage('calculate_massive_isolines lName:'+repr(lName), 'iso4app')
self.massiveDlg.comboBoxLayers.addItem(lName)
self.massiveDlg.exec_()
def unload(self):
pass
def eventCbSpeedType(self):
manageSpeed(self)
def eventRbPolygon(self):
managePolygonOption(self)
def eventRbStreetNetwork(self):
managePolygonOption(self)
def eventRbIsocrone(self):
isChecked=self.dlg.radioButtonIsochrone.isChecked()
self.dlg.checkBoxFastestRoute.setEnabled(False)
self.dlg.comboTravelType.setEnabled(True)
self.dlg.comboApprox.setEnabled(True)
self.dlg.checkBoxAvoidTolls.setEnabled(True)
self.dlg.checkBoxRestrictedArea.setEnabled(True)
self.dlg.radioButtonStreetNetwork.setEnabled(True)
manageTravelType(self)
managePolygonOption(self)
def eventRbIsodistance(self):
self.dlg.comboMeters.setEnabled(True)
idx=self.dlg.comboTravelType.currentIndex()
self.dlg.comboTravelType.setEnabled(True)
self.dlg.comboApprox.setEnabled(True)
self.dlg.checkBoxAvoidTolls.setEnabled(True)
self.dlg.checkBoxRestrictedArea.setEnabled(True)
self.dlg.radioButtonStreetNetwork.setEnabled(True)
if idx==0:
self.dlg.checkBoxFastestRoute.setEnabled(True)
manageTravelType(self)
managePolygonOption(self)
def eventRbIsodistanceAir(self):
self.dlg.comboMeters.setEnabled(True)
self.dlg.checkBoxFastestRoute.setEnabled(False)
self.dlg.comboTravelType.setEnabled(False)
self.dlg.comboApprox.setEnabled(False)
self.dlg.checkBoxAvoidTolls.setEnabled(False)
self.dlg.checkBoxRestrictedArea.setEnabled(False)
self.dlg.radioButtonStreetNetwork.setEnabled(False)
self.dlg.radioButtonPolygon.setChecked(True)
self.dlg.comboConcavity.setEnabled(False)
self.dlg.comboBuffering.setEnabled(False)
self.dlg.chkPopulation.setEnabled(False)
self.dlg.checkBoxAllowBikeOnPedestrian.setEnabled(False)
self.dlg.checkBoxAllowPedBikeOnTrunk.setEnabled(False)
def eventCbTravelType(self):
manageTravelType(self)
def eventLnkAvailableCountries(self):
QDesktopServices.openUrl(QUrl('http://www.iso4app.com/thematicMap.jsp'))
def calculate_massive_isolines_test(self):
rowCount=10
for row in range(0,rowCount):
progress=(float(row)/float(rowCount))
iface.statusBarIface().showMessage("Processed:"+str(row)+ ' of:'+str(rowCount))
sleep(1)
self.iface.mainWindow().repaint()
def calculate_massive_isolines(self):
self.stopRunning=0
timeStampNow=datetime.datetime.now()
self.massiveDlg.labelCriticalMsg.setText('')
lastTimeRunning=diffMillis(self.timeStampLastMassiveRunning,timeStampNow)
if lastTimeRunning>1000:
if self.massiveDlg.lineEditLayerName.text()!='':
if self.selectedLayer is not None:
epsgCodeInput=self.selectedLayer.crs().authid()
QgsMessageLog.logMessage('calculate_massive_isolines epsgCodeInput:'+epsgCodeInput, 'iso4app')
epsgCodeCanvas=self.canvas.mapSettings().destinationCrs().authid()
layernamePoly=self.massiveDlg.lineEditLayerName.text()
layernamePin='test pin'
if self.dlg.radioButtonStreetNetwork.isChecked()==True:
vlyrPoly = QgsVectorLayer("multilinestring?crs="+epsgCodeCanvas, layernamePoly, "memory")
if self.dlg.radioButtonPolygon.isChecked()==True:
vlyrPoly = QgsVectorLayer("Polygon?crs="+epsgCodeCanvas, layernamePoly, "memory")
vlyrPin = None
#gestione attributi su feature
idxAttrbute4Layer=self.massiveDlg.comboBoxAttributes4Layer.currentIndex()
attributeName4Layer=''
attributeValue4Layer=''
if idxAttrbute4Layer>0:
attributeName4Layer=self.massiveDlg.comboBoxAttributes4Layer.currentText()
idxAttributeAsDistance=self.massiveDlg.comboBoxAttributeAsDistance.currentIndex()
okIso=0
errIso=0
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
rowCount=self.massiveDlg.tableWidgetPoints.rowCount()
for row in range(0,rowCount):
if self.stopRunning==1: break
coordWgsX = self.massiveDlg.tableWidgetPoints.item(row,1)
coordWgsY = self.massiveDlg.tableWidgetPoints.item(row,2)
pointData=self.massiveDlg.tableWidgetPoints.item(row,4).data(0)
colorLayer=self.massiveDlg.tableWidgetPoints.item(row,5)
if idxAttrbute4Layer>0:
attributeValue4Layer = self.massiveDlg.tableWidgetPoints.item(row,idxAttrbute4Layer+5).text()
QgsMessageLog.logMessage('calculate_massive_isolines selezionato attributo indice:'+repr(idxAttrbute4Layer)+' valore:'+attributeValue4Layer, 'iso4app')
overWrittenDistance=None
goCalculation=False
if idxAttributeAsDistance>0:
valueDst=self.massiveDlg.tableWidgetPoints.item(row,idxAttributeAsDistance+5).text()
if valueDst.isdigit():
QgsMessageLog.logMessage('calculate_massive_isolines valueDst:'+valueDst, 'iso4app')
overWrittenDistance=int(valueDst)
QgsMessageLog.logMessage('calculate_massive_isolines overWrittenDistance:'+repr(overWrittenDistance)+ ' valueDst:'+valueDst, 'iso4app')
goCalculation=True
else:
goCalculation=True
if goCalculation:
QgsMessageLog.logMessage('calculate_massive_isolines X:'+coordWgsX.text()+ ' Y:'+coordWgsY.text()+ ' type:'+str(type(pointData))+ ' overWrittenDistance:'+repr(overWrittenDistance), 'iso4app')
instancei4a=iso4CallService(self.iface,self.canvas,self.dlg,pointData,epsgCodeInput,epsgCodeCanvas,vlyrPin,vlyrPoly,attributeName4Layer,attributeValue4Layer, overWrittenDistance)
rc=instancei4a.rc
rcMessageCritical=instancei4a.rcMessageCritical
if rc==0:
self.massiveDlg.tableWidgetPoints.item(row,3).setText('OK')
okIso=okIso+1
else:
self.massiveDlg.tableWidgetPoints.item(row,3).setText('ERR')
errIso=errIso+1
if len(rcMessageCritical)>0:
self.massiveDlg.labelCriticalMsg.setText('Massive elaboration STOPPED:'+rcMessageCritical)
break
self.massiveDlg.lineEditTotaPointOK.setText(repr(okIso))
self.massiveDlg.lineEditTotaPointError.setText(repr(errIso))
firstT = datetime.datetime.now()
sleep(1)
iface.statusBarIface().showMessage("Processed:"+str(row+1)+ ' of:'+str(rowCount))
self.iface.mainWindow().repaint()
else:
self.iface.mainWindow().repaint()
iface.statusBarIface().clearMessage()
if okIso>0:
vlyrPoly.setOpacity(0.5)
if self.dlg.radioButtonStreetNetwork.isChecked()==True:
renderer=vlyrPoly.renderer()
symbolTmp=QgsLineSymbol.createSimple({'name':'LINE SYMBOL','width':'1', 'color':colorLayer.text()})
renderer.setSymbol(symbolTmp)
QgsProject.instance().addMapLayers([vlyrPin,vlyrPoly])
except Exception as inst:
QgsMessageLog.logMessage('Error:'+str(inst), 'iso4app')
QApplication.restoreOverrideCursor()
self.canvas.refresh()
else:
self.iface.messageBar().pushMessage("Iso4App", 'Selected Layer has not any points!', level=2)
else:
self.iface.messageBar().pushMessage("Iso4App", 'Layer name required!', level=2)
#comunque riabilito
self.massiveDlg.pushButtonClose.setEnabled(True)
self.timeStampLastMassiveRunning=datetime.datetime.now()
else:
QgsMessageLog.logMessage('click pressed in massive running: SKIP:', 'iso4app')
self.massiveDlg.pushButtonClose.setEnabled(True)
def disableButtonGroup(self):
QgsMessageLog.logMessage('disableButtonGroup triggered', 'iso4app')
self.massiveDlg.pushButtonClose.setEnabled(False)
def eventCbBoxAttributesAsDistance(self):
if self.selectedLayer is not None:
idx=self.massiveDlg.comboBoxAttributeAsDistance.currentIndex()
self.massiveDlg.labelCriticalMsg.setText('')
if idx>0:
rowCount=self.massiveDlg.tableWidgetPoints.rowCount()
attributeValuesNotNumeric=0
attributeValuesNumeric=0
for row in range(0,rowCount):
value = self.massiveDlg.tableWidgetPoints.item(row,idx+5)
if value.text().isdigit():
attributeValuesNumeric=attributeValuesNumeric+1
else:
attributeValuesNotNumeric=attributeValuesNotNumeric+1
if attributeValuesNotNumeric>0:
self.massiveDlg.labelCriticalMsg.setText('Warning: you have choosed a value of an attribute as an isoline distance, however non numeric values are present, this points will be skipped on massive isoline calculation')
self.massiveDlg.labelIsolineDescription.setText(self.isoDescr+ ' --> WARNING: DEFAUT DISTANCE WILL BE OVERWRITTEN BY THE ATTRIBUTE VALUE NAMED: '+ self.massiveDlg.comboBoxAttributeAsDistance.currentText())
def eventCbBoxAttributes(self):
suggestedLayerName=''
if self.selectedLayer is not None:
idx=self.massiveDlg.comboBoxAttributes.currentIndex()
if idx>0:
rowCount=self.massiveDlg.tableWidgetPoints.rowCount()
for row in range(0,rowCount):
value = self.massiveDlg.tableWidgetPoints.item(row,idx+5)
if len(value.text())>0:
suggestedLayerName+=value.text()+'_'
if len(suggestedLayerName)>200:
break
self.massiveDlg.lineEditLayerName.setText(suggestedLayerName)
def eventCbBoxLayers(self):
idx=self.massiveDlg.comboBoxLayers.currentIndex()
self.selectedLayer=None
self.massiveDlg.tableWidgetPoints.clear()
self.massiveDlg.comboBoxAttributes.clear()
self.massiveDlg.comboBoxAttributes4Layer.clear()
self.massiveDlg.comboBoxAttributeAsDistance.clear()
self.massiveDlg.tableWidgetPoints.setRowCount(0)
self.massiveDlg.lineEditLayerName.setText('')
self.massiveDlg.lineEditTotaPoint.setText('')
self.massiveDlg.lineEditTotaPointOK.setText('')
self.massiveDlg.lineEditTotaPointError.setText('')
if idx>0:
selectedLayer = list(QgsProject.instance().mapLayers().values())[idx-1]
epsgCodeInput=selectedLayer.crs().authid()
currentCoordSystem=QgsCoordinateReferenceSystem(epsgCodeInput)
gpsCoordSystem=QgsCoordinateReferenceSystem("EPSG:4326")
transformer = QgsCoordinateTransform(currentCoordSystem,gpsCoordSystem,QgsProject.instance())
attrNames=''
numAttr=0
self.massiveDlg.comboBoxAttributes.addItem('Select an attribute...')
self.massiveDlg.comboBoxAttributes4Layer.addItem('Select an attribute...')
self.massiveDlg.comboBoxAttributeAsDistance.addItem('Select an attribute...')
try:
for field in selectedLayer.fields():
attributeName=field.name() #.encode('utf-8')
attrNames+=attributeName+';'
numAttr=numAttr+1
self.massiveDlg.comboBoxAttributes.addItem(attributeName)
self.massiveDlg.comboBoxAttributes4Layer.addItem(attributeName)
self.massiveDlg.comboBoxAttributeAsDistance.addItem(attributeName)
except Exception as ex:
QgsMessageLog.logMessage(str(ex),'iso4app')
self.massiveDlg.tableWidgetPoints.setColumnCount(6+numAttr)
self.massiveDlg.tableWidgetPoints.setColumnWidth(0,40)
self.massiveDlg.tableWidgetPoints.setColumnWidth(3,50)
self.massiveDlg.tableWidgetPoints.setColumnWidth(4,60)
self.massiveDlg.tableWidgetPoints.setHorizontalHeaderLabels(('FID;LNG;LAT;STATUS;RESERVED;COLOR;'+attrNames).split(";"))
if self.dlg.checkBoxLogging.isChecked():
QgsMessageLog.logMessage('Info Layer:'+repr(idx)+ ' '+selectedLayer.name()+' epsg:'+repr(epsgCodeInput), 'iso4app')
try:
iter = selectedLayer.getFeatures()
idxRow=0
newSuggestedLayerName='isoline_'
QgsMessageLog.logMessage('before :', 'iso4app')
loopNum=0
colorLayer="#93604e"
for feature in iter:
geom = feature.geometry()
#geomTypeString=self.iface.QgsWKBTypes.displayString(int(geom.wkbType()))
QgsMessageLog.logMessage('geom:', 'iso4app')
QgsMessageLog.logMessage('geom.type()'+repr(geom.type()), 'iso4app')
#if geom.type() == QGis.WKBPoint:
if geom.type() == 0:
if loopNum==0:
rendererMassive=selectedLayer.renderer()
colorLayer=rendererMassive.symbol().color().name()
QgsMessageLog.logMessage('eventCbBoxLayers:'+colorLayer ,'iso4app')
loopNum=loopNum+1
pointOnLayer = geom.asPoint()
pt = transformer.transform(pointOnLayer)
QgsMessageLog.logMessage('point:'+str(pointOnLayer)+ ' '+str(pt), 'iso4app')
itemPointX = QTableWidgetItem(str(pt.x()))
itemPointY = QTableWidgetItem(str(pt.y()))
itemId = QTableWidgetItem(str(feature.id()))
itemStatus = QTableWidgetItem(' ')
itemQgisPoint = QTableWidgetItem(str(feature.id()))
itemQgisPoint.setData(0,geom.asPoint())
itemQgisLayerColor = QTableWidgetItem(str(colorLayer))
QgsMessageLog.logMessage('dopo itemQgisLayerColor' , 'iso4app')
self.massiveDlg.tableWidgetPoints.insertRow(idxRow)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,0,itemId)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,1,itemPointX)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,2,itemPointY)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,3,itemStatus)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,4,itemQgisPoint)
self.massiveDlg.tableWidgetPoints.setItem(idxRow,5,itemQgisLayerColor)
QgsMessageLog.logMessage('dopo tableWidgetPoints itemQgisLayerColor' , 'iso4app')
if numAttr>0:
prgTable=6
valueAttr=''
for field in selectedLayer.fields():
if type(feature[field.name()])==int:
valueAttr=repr(feature[field.name()])
if type(feature[field.name()])==int:
valueAttr=repr(feature[field.name()])
if type(feature[field.name()])==bool:
valueAttr=str(feature[field.name()])
if type(feature[field.name()])==float:
valueAttr=str(feature[field.name()])
if type(feature[field.name()])==str:
valueAttr=feature[field.name()]
if type(feature[field.name()])==str:
valueAttr=feature[field.name()]
#QgsMessageLog.logMessage('type feature:'+type(feature[field.name()]) , 'iso4app')
self.massiveDlg.tableWidgetPoints.setItem(idxRow,prgTable,QTableWidgetItem(valueAttr))
prgTable=prgTable+1
idxRow=idxRow+1
else:
QgsMessageLog.logMessage('no point', 'iso4app')
if idxRow>0:
self.selectedLayer=selectedLayer
self.massiveDlg.lineEditTotaPoint.setText(repr(idxRow))
except Exception as ex:
QgsMessageLog.logMessage('Warning: selected layer has not any feature.'+str(ex) ,'iso4app')
def eventButtonCloseMassive(self):
QgsMessageLog.logMessage('eventButtonCloseMassive' ,'iso4app')
self.massiveDlg.close()
self.canvas.setMapTool(self.isoTool)
def eventOkButton(self):
QgsMessageLog.logMessage('eventOkButton' ,'iso4app')
s = QSettings()
if len(self.dlg.lineApiKey.text())!=36 :
self.dlg.lineApiKey.setText('<KEY>')
s.setValue("iso4app/apy-key", self.dlg.lineApiKey.text())
s.setValue("iso4app/rbIsochrone", self.dlg.radioButtonIsochrone.isChecked())
s.setValue("iso4app/comboMeters", self.dlg.comboMeters.currentIndex())
s.setValue("iso4app/comboSeconds", self.dlg.comboSeconds.currentIndex())
s.setValue("iso4app/comboApprox", self.dlg.comboApprox.currentIndex())
s.setValue("iso4app/comboConcavity", self.dlg.comboConcavity.currentIndex())
s.setValue("iso4app/comboBuffering", self.dlg.comboBuffering.currentIndex())
s.setValue("iso4app/comboSpeedType", self.dlg.comboSpeedType.currentIndex())
s.setValue("iso4app/comboTravelType", self.dlg.comboTravelType.currentIndex())
s.setValue("iso4app/checkBoxAllowBikeOnPedestrian", self.dlg.checkBoxAllowBikeOnPedestrian.isChecked())
s.setValue("iso4app/checkBoxAllowPedBikeOnTrunk", self.dlg.checkBoxAllowPedBikeOnTrunk.isChecked())
s.setValue("iso4app/checkBoxAvoidTolls", self.dlg.checkBoxAvoidTolls.isChecked())
s.setValue("iso4app/checkBoxRestrictedArea", self.dlg.checkBoxRestrictedArea.isChecked())
s.setValue("iso4app/checkBoxReduceQueueTime", self.dlg.checkBoxReduceQueueTime.isChecked())
s.setValue("iso4app/checkBoxLogging", self.dlg.checkBoxLogging.isChecked())
s.setValue("iso4app/checkBoxFastestRoute", self.dlg.checkBoxFastestRoute.isChecked())
s.setValue("iso4app/rbPolygon", self.dlg.radioButtonPolygon.isChecked())
s.setValue("iso4app/chkPopulation", self.dlg.chkPopulation.isChecked())
s.setValue("iso4app/rbStreetNetwork", self.dlg.radioButtonStreetNetwork.isChecked())
self.dlg.close()
self.canvas.setMapTool(self.isoTool)
def run(self):
self.canvas.setMapTool(self.isoTool)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu("Iso4App",self.action)
self.iface.removeToolBarIcon(self.action)
def diffMillis(firstT,currT):
diff = currT - firstT
millis = diff.days * 24 * 60 * 60 * 1000
millis += diff.seconds * 1000
millis += diff.microseconds / 1000
return millis
def getParamDescription(self):
rbIsochrone=self.dlg.radioButtonIsochrone.isChecked()
rbStreetNetwork=self.dlg.radioButtonStreetNetwork.isChecked()
rbPolygon=self.dlg.radioButtonPolygon.isChecked()
comboMeters=self.dlg.comboMeters.currentIndex()
comboSeconds=self.dlg.comboMeters.currentIndex()
comboApprox=self.dlg.comboApprox.currentIndex()
comboConcavity=self.dlg.comboConcavity.currentIndex()
comboBuffering=self.dlg.comboBuffering.currentIndex()
comboSpeedType=self.dlg.comboSpeedType.currentIndex()
comboTravelType=self.dlg.comboTravelType.currentIndex()
checkBoxAvoidTolls=self.dlg.checkBoxAvoidTolls.isChecked()
checkBoxRestrictedArea=self.dlg.checkBoxRestrictedArea.isChecked()
checkBoxReduceQueueTime=self.dlg.checkBoxReduceQueueTime.isChecked()
checkBoxAllowBikeOnPedestrian=self.dlg.checkBoxAllowBikeOnPedestrian.isChecked()
checkBoxAllowPedBikeOnTrunk=self.dlg.checkBoxAllowPedBikeOnTrunk.isChecked()
checkBoxFastestRoute=self.dlg.checkBoxFastestRoute.isChecked()
checkBoxPopulation=self.dlg.chkPopulation.isChecked()
speedLimit=self.dlg.lineSpeed.text()
self.massiveDlg.labelCriticalMsg.setText('')
otherParam=''
if checkBoxAvoidTolls:
otherParam+=' Avoid Tolls: YES. '
else:
otherParam+=' Avoid Tolls: NO. '
if checkBoxRestrictedArea:
otherParam+=' Include Restricted Area: YES. '
else:
otherParam+=' Include Restricted Area: NO. '
isoDescr=''
fastestRoutingText=''
if rbIsochrone:
isoDescr+='ISOCHRONE'
valueIsoline = self.dlg.comboSeconds.currentText()
if comboSpeedType==0:
speedType=' Speed:Very Low'+'.'
if comboSpeedType==1:
speedType=' Speed:Low'+'.'
if comboSpeedType==2:
speedType=' Speed:Normal'+'.'
if comboSpeedType==3:
speedType=' Speed:Fast'+'.'
if checkBoxReduceQueueTime:
otherParam+=' Reduce queue time: YES. '
else:
otherParam+=' Reduce queue time: NO. '
if speedLimit!='':
otherParam+=' Speed Limit:'+speedLimit+'.'
else:
isoDescr+='ISODISTANCE'
speedType=''
valueIsoline = self.dlg.comboMeters.currentText()
if checkBoxFastestRoute:
fastestRoutingText=' Fastest Routing: YES. '
if rbStreetNetwork:
isoDescr+='(StreetNetwork)'
else:
isoDescr+='(Polygon)'
isoDescr+=' '+valueIsoline+'.'
if comboTravelType==0:
mobility='Motor Vehicle'
if comboTravelType==1:
mobility='Bicycle'
if checkBoxAllowPedBikeOnTrunk:
otherParam+=' Bicycle on Trunk: YES. '
else:
otherParam+=' Bicycle on Trunk: NO. '
if checkBoxAllowBikeOnPedestrian:
otherParam+=' Bicycle on Pedestrian path: YES. '
else:
otherParam+=' Bicycle on Pedestrian path: NO. '
if comboTravelType==2:
mobility='Pedestrian'
if checkBoxAllowPedBikeOnTrunk:
otherParam+=' Pedestrian on Trunk: YES. '
else:
otherParam+=' Pedestrian on Trunk: NO. '
otherParam+=' Concavity:'+repr(comboConcavity)+'. '
otherParam+=' Buffering:'+repr(comboBuffering)+'. '
isoDescr+=' Mobility:'+mobility+'.'
isoDescr+=fastestRoutingText
approxValue=self.dlg.comboApprox.currentText()
isoDescr+=' Start Point Appoximation:'+approxValue+'.'
isoDescr+=speedType
isoDescr+=otherParam
if checkBoxPopulation:
isoDescr+=' Population=YES '
else:
isoDescr+=' Population=NO '
return isoDescr
def managePolygonOption(self):
isChecked=self.dlg.radioButtonPolygon.isChecked()
if isChecked==True :
self.dlg.comboConcavity.setEnabled(True)
self.dlg.comboBuffering.setEnabled(True)
self.dlg.chkPopulation.setEnabled(True)
else:
self.dlg.comboConcavity.setEnabled(False)
self.dlg.comboBuffering.setEnabled(False)
self.dlg.chkPopulation.setEnabled(False)
def manageTravelType(self):
idx=self.dlg.comboTravelType.currentIndex()
self.dlg.checkBoxAllowBikeOnPedestrian.setEnabled(False)
self.dlg.checkBoxAllowPedBikeOnTrunk.setEnabled(False)
self.dlg.checkBoxFastestRoute.setEnabled(True)
if idx==1:
self.dlg.checkBoxAllowBikeOnPedestrian.setEnabled(True)
self.dlg.checkBoxAllowPedBikeOnTrunk.setEnabled(True)
self.dlg.checkBoxFastestRoute.setEnabled(False)
if idx==2:
self.dlg.checkBoxAllowPedBikeOnTrunk.setEnabled(True)
self.dlg.checkBoxFastestRoute.setEnabled(False)
if self.dlg.radioButtonIsochrone.isChecked():
self.dlg.checkBoxFastestRoute.setEnabled(False)
manageSpeed(self)
def manageSpeed(self):
idxTT=self.dlg.comboTravelType.currentIndex()
idxST=self.dlg.comboSpeedType.currentIndex()
isChecked=self.dlg.radioButtonIsochrone.isChecked()
self.dlg.labelInfo.setText('')
self.dlg.lineSpeed.setText('')
if isChecked==1:
self.dlg.comboSpeedType.setEnabled(True)
self.dlg.checkBoxReduceQueueTime.setEnabled(True)
self.dlg.lineSpeed.setEnabled(True)
if idxTT==1:
if idxST==0:
self.dlg.lineSpeed.setText('8')
if idxST==1:
self.dlg.lineSpeed.setText('12')
if idxST==2:
self.dlg.lineSpeed.setText('16')
if idxST==3:
self.dlg.lineSpeed.setText('40')
self.dlg.labelInfo.setText('High default speed value! Please adjust it')
if idxTT==2:
if idxST==0:
self.dlg.lineSpeed.setText('3')
if idxST==1:
self.dlg.lineSpeed.setText('4.4')
if idxST==2:
self.dlg.lineSpeed.setText('5.4')
if idxST==3:
self.dlg.lineSpeed.setText('20')
self.dlg.labelInfo.setText('High default speed value! Please adjust it')
else:
self.dlg.comboSpeedType.setEnabled(False)
self.dlg.checkBoxReduceQueueTime.setEnabled(False)
self.dlg.lineSpeed.setEnabled(False)
return None
def progdialog(progress):
dialog = QProgressDialog()
dialog.setWindowTitle("Progress")
dialog.setLabelText("text")
bar = QProgressBar(dialog)
bar.setTextVisible(True)
bar.setValue(progress)
dialog.setBar(bar)
dialog.setMinimumWidth(300)
dialog.show()
return dialog, bar
|
<filename>util/MatrixLDPC.py
#
# This file is used to construct the sparse matrices needed for the MatrixLDPC
# implementation, from the specifications in the 802.11n-2009 standard.
#
import numpy
def outputCpp(rowPtrs, cols, vals, n, rateNumerator, rateDenominator):
print "#define CODE%d_RATE_%d_%d_NUM_ROWS %d" % (n, rateNumerator, rateDenominator, len(rowPtrs))
print "#define CODE%d_RATE_%d_%d_NUM_VALUES %d" % (n, rateNumerator, rateDenominator, len(vals))
print "const unsigned int code%d_rate_%d_%d_rowPtrs[CODE%d_RATE_%d_%d_NUM_ROWS] = { %s };" % \
(n, rateNumerator, rateDenominator, n, rateNumerator, rateDenominator, ','.join(map(str,rowPtrs)))
print "const unsigned int code%d_rate_%d_%d_cols[CODE%d_RATE_%d_%d_NUM_VALUES] = { %s };" % \
(n, rateNumerator, rateDenominator, n, rateNumerator, rateDenominator, ','.join(map(str,cols)))
print "const unsigned char code%d_rate_%d_%d_vals[CODE%d_RATE_%d_%d_NUM_VALUES] = { %s };" % \
(n, rateNumerator, rateDenominator, n, rateNumerator, rateDenominator, ','.join(map(str,vals)))
def getMatrixRepresentation(values, shouldTranspose, n, Z, rateNumerator, rateDenominator):
"""
@param values: a list of strings, ('-' for empty cell). These
represent the values
@param shouldTranspose: if True, the values are given column by column
(first col1 from top to bottom, then col2 from top to bottom, etc).
If False, values are given in rows.
@param n: the number of coded bits (length of codeword)
@param Z: the number of bits per matrix
@param rateNumerator: the numerator of the rate (ie 3 for R=3/4)
@param rateDenominator: the denominator of the rate (ie 4 for R=3/4)
@return a tuple of lists (rowPtrs, values, columnIndices)
"""
assert(n % Z == 0)
assert(n % rateDenominator == 0)
k = n * rateNumerator / rateDenominator
nWords = n / Z
kWords = k / Z
def mapOne(val):
if val=='-':
return -1
else:
return int(val)
# Map strings to values
m = numpy.array([mapOne(x) for x in values], dtype = numpy.int32)
print m
# Shape into matrix
if shouldTranspose:
m = m.reshape([nWords,nWords - kWords]).transpose()
else:
m = m.reshape([nWords - kWords, nWords])
print m
# Disregard cyclic part
m = m[:,0:kWords]
print m
rowNumNonempty = (m != -1).sum(1)
rowPtrs = numpy.cumsum([0] + list(rowNumNonempty[:-1]))
print rowPtrs
cols = [(i % kWords) for i,v in enumerate(list(m.flatten())) if v!=-1]
vals = [v for i,v in enumerate(list(m.flatten())) if v!=-1]
print cols
print numpy.array(cols)[rowPtrs]
print vals
outputCpp(list(rowPtrs), cols, vals, n, rateNumerator, rateDenominator)
return list(rowPtrs), cols, vals
# 648, 1/2
s = '0\n22\n6\n2\n23\n24\n25\n13\n7\n11\n25\n3\n-\n0\n-\n-\n-\n-\n-\n24\n20\n-\n-\n-\n-\n-\n0\n-\n-\n23\n-\n-\n-\n-\n8\n-\n-\n-\n-\n0\n-\n1\n-\n-\n16\n-\n-\n-\n0\n17\n10\n20\n3\n17\n8\n0\n22\n19\n23\n16\n0\n-\n-\n-\n-\n-\n-\n-\n10\n-\n18\n-\n-\n0\n-\n-\n-\n3\n-\n8\n-\n-\n-\n-\n-\n0\n-\n-\n-\n-\n-\n-\n-\n-\n14\n2\n0\n12\n24\n25\n0\n10\n7\n6\n23\n13\n9\n25\n-\n-\n-\n0\n-\n-\n18\n-\n-\n-\n-\n5\n-\n-\n0\n-\n9\n-\n-\n-\n-\n3\n-\n-\n0\n-\n-\n-\n11\n-\n-\n-\n-\n17\n-\n-\n1\n-\n-\n-\n-\n-\n0\n-\n-\n-\n-\n1\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n-\n0\n0'
# 648, 2/3
s = '25 26 14 - 20 - 2 - 4 - - 8 - 16 - 18 1 0 - - - - - -\n10 9 15 11 - 0 - 1 - - 18 - 8 - 10 - - 0 0 - - - - -\n16 2 20 26 21 - 6 - 1 26 - 7 - - - - - - 0 0 - - - -\n10 13 5 0 - 3 - 7 - - 26 - - 13 - 16 - - - 0 0 - - -\n23 14 24 - 12 - 19 - 17 - - - 20 - 21 - 0 - - - 0 0 - -\n6 22 9 20 - 25 - 17 - 8 - 14 - 18 - - - - - - - 0 0 -\n14 23 21 11 20 - 24 - 18 - 19 - - - - 22 - - - - - - 0 0\n17 11 11 20 - 21 - 26 - 3 - - 18 - 26 - 1 - - - - - - 0'
# 648, 3/4
s = '16 17 22 24 9 3 14 - 4 2 7 - 26 - 2 - 21 - 1 0 - - - -\n25 12 12 3 3 26 6 21 - 15 22 - 15 - 4 - - 16 - 0 0 - - -\n25 18 26 16 22 23 9 - 0 - 4 - 4 - 8 23 11 - - - 0 0 - -\n9 7 0 1 17 - - 7 3 - 3 23 - 16 - - 21 - 0 - - 0 0 -\n24 5 26 7 1 - - 15 24 15 - 8 - 13 - 13 - 11 - - - - 0 0\n2 2 19 14 24 1 15 19 - 21 - 2 - 24 - 3 - 2 1 - - - - 0'
# 648, 5/6
s = '17 13 8 21 9 3 18 12 10 0 4 15 19 2 5 10 26 19 13 13 1 0 - -\n3 12 11 14 11 25 5 18 0 9 2 26 26 10 24 7 14 20 4 2 - 0 0 -\n22 16 4 3 10 21 12 5 21 14 19 5 - 8 5 18 11 5 5 15 0 - 0 0\n7 7 14 14 4 16 16 24 24 10 1 7 15 6 10 26 8 18 21 14 1 - - 0'
# 1296, 1/2
s = '40 - - - 22 - 49 23 43 - - - 1 0 - - - - - - - - - -\n50 1 - - 48 35 - - 13 - 30 - - 0 0 - - - - - - - - -\n39 50 - - 4 - 2 - - - - 49 - - 0 0 - - - - - - - -\n33 - - 38 37 - - 4 1 - - - - - - 0 0 - - - - - - -\n45 - - - 0 22 - - 20 42 - - - - - - 0 0 - - - - - -\n51 - - 48 35 - - - 44 - 18 - - - - - - 0 0 - - - - -\n47 11 - - - 17 - - 51 - - - 0 - - - - - 0 0 - - - -\n5 - 25 - 6 - 45 - 13 40 - - - - - - - - - 0 0 - - -\n33 - - 34 24 - - - 23 - - 46 - - - - - - - - 0 0 - -\n1 - 27 - 1 - - - 38 - 44 - - - - - - - - - - 0 0 -\n- 18 - - 23 - - 8 0 35 - - - - - - - - - - - - 0 0\n49 - 17 - 30 - - - 34 - - 19 1 - - - - - - - - - - 0'
# 1296, 2/3
s = '39 31 22 43 - 40 4 - 11 - - 50 - - - 6 1 0 - - - - - -\n25 52 41 2 6 - 14 - 34 - - - 24 - 37 - - 0 0 - - - - -\n43 31 29 0 21 - 28 - - 2 - - 7 - 17 - - - 0 0 - - - -\n20 33 48 - 4 13 - 26 - - 22 - - 46 42 - - - - 0 0 - - -\n45 7 18 51 12 25 - - - 50 - - 5 - - - 0 - - - 0 0 - -\n35 40 32 16 5 - - 18 - - 43 51 - 32 - - - - - - - 0 0 -\n9 24 13 22 28 - - 37 - - 25 - - 52 - 13 - - - - - - 0 0\n32 22 4 21 16 - - - 27 28 - 38 - - - 8 1 - - - - - - 0'
# 1296, 3/4
s = '39 40 51 41 3 29 8 36 - 14 - 6 - 33 - 11 - 4 1 0 - - - -\n48 21 47 9 48 35 51 - 38 - 28 - 34 - 50 - 50 - - 0 0 - - -\n30 39 28 42 50 39 5 17 - 6 - 18 - 20 - 15 - 40 - - 0 0 - -\n29 0 1 43 36 30 47 - 49 - 47 - 3 - 35 - 34 - 0 - - 0 0 -\n1 32 11 23 10 44 12 7 - 48 - 4 - 9 - 17 - 16 - - - - 0 0\n13 7 15 47 23 16 47 - 43 - 29 - 52 - 2 - 53 - 1 - - - - 0'
# 1296, 5/6
s = '48 29 37 52 2 16 6 14 53 31 34 5 18 42 53 31 45 - 46 52 1 0 - -\n17 4 30 7 43 11 24 6 14 21 6 39 17 40 47 7 15 41 19 - - 0 0 -\n7 2 51 31 46 23 16 11 53 40 10 7 46 53 33 35 - 25 35 38 0 - 0 0\n19 48 41 1 10 7 36 47 5 29 52 52 31 10 26 6 3 2 - 51 1 - - 0'
# 1944, 1/2
s = '57 - - - 50 - 11 - 50 - 79 - 1 0 - - - - - - - - - -\n3 - 28 - 0 - - - 55 7 - - - 0 0 - - - - - - - - -\n30 - - - 24 37 - - 56 14 - - - - 0 0 - - - - - - - -\n62 53 - - 53 - - 3 35 - - - - - - 0 0 - - - - - - -\n40 - - 20 66 - - 22 28 - - - - - - - 0 0 - - - - - -\n0 - - - 8 - 42 - 50 - - 8 - - - - - 0 0 - - - - -\n69 79 79 - - - 56 - 52 - - - 0 - - - - - 0 0 - - - -\n65 - - - 38 57 - - 72 - 27 - - - - - - - - 0 0 - - -\n64 - - - 14 52 - - 30 - - 32 - - - - - - - - 0 0 - -\n- 45 - 70 0 - - - 77 9 - - - - - - - - - - - 0 0 -\n2 56 - 57 35 - - - - - 12 - - - - - - - - - - - 0 0\n24 - 61 - 60 - - 27 51 - - 16 1 - - - - - - - - - - 0'
# 1944, 2/3
s = '61 75 4 63 56 - - - - - - 8 - 2 17 25 1 0 - - - - - -\n56 74 77 20 - - - 64 24 4 67 - 7 - - - - 0 0 - - - - -\n28 21 68 10 7 14 65 - - - 23 - - - 75 - - - 0 0 - - - -\n48 38 43 78 76 - - - - 5 36 - 15 72 - - - - - 0 0 - - -\n40 2 53 25 - 52 62 - 20 - - 44 - - - - 0 - - - 0 0 - -\n69 23 64 10 22 - 21 - - - - - 68 23 29 - - - - - - 0 0 -\n12 0 68 20 55 61 - 40 - - - 52 - - - 44 - - - - - - 0 0\n58 8 34 64 78 - - 11 78 24 - - - - - 58 1 - - - - - - 0'
# 1944, 3/4
s = '48 29 28 39 9 61 - - - 63 45 80 - - - 37 32 22 1 0 - - - -\n4 49 42 48 11 30 - - - 49 17 41 37 15 - 54 - - - 0 0 - - -\n35 76 78 51 37 35 21 - 17 64 - - - 59 7 - - 32 - - 0 0 - -\n9 65 44 9 54 56 73 34 42 - - - 35 - - - 46 39 0 - - 0 0 -\n3 62 7 80 68 26 - 80 55 - 36 - 26 - 9 - 72 - - - - - 0 0\n26 75 33 21 69 59 3 38 - - - 35 - 62 36 26 - - 1 - - - - 0'
# 1944, 5/6
s = '13 48 80 66 4 74 7 30 76 52 37 60 - 49 73 31 74 73 23 - 1 0 - -\n69 63 74 56 64 77 57 65 6 16 51 - 64 - 68 9 48 62 54 27 - 0 0 -\n51 15 0 80 24 25 42 54 44 71 71 9 67 35 - 58 - 29 - 53 0 - 0 0\n16 29 36 41 44 56 59 37 50 24 - 65 4 65 52 - 4 - 73 52 1 - - 0'
vals = s.replace(' ', '\n').splitlines()
for i,x in enumerate(vals):
if x == '':
print i
rowPtrs, cols, vals = getMatrixRepresentation(vals, False, 1944, 81, 5, 6)
print "rowPtrs: ", rowPtrs
print "cols: ", cols
print "vals: ", vals
print "numRows ", len(rowPtrs)
print "numVals ", len(vals)
|
import sys
import time
import threading
import re
import requests
import traceback
from typing import List
from datetime import (date, datetime)
from .model.finacial_history import (FinancialHistory, History, Row, Period)
from .parser import Parser
from bs4 import (BeautifulSoup, Tag)
from random import randint
# FIELDS
FIELD_SHARED_DIVIDENDS = "Dividendos Pagos"
FIELD_NET_INCOME = "Lucro Líquido"
FIELD_NET_WORTH = "Patrimônio Líquido Total"
FIELD_TOTAL_DEBITS = "Total de Passivos"
class Crawler():
url_balance_sheet: str = ""
url_cash_flow: str = ""
dt_processing: str = ""
default_sleep_time_sec: int = 0
financial_history: FinancialHistory = None
request_headers = {
'user-agent': "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
}
def __init__(self, _url_cash_flow: str, _url_balance_sheet: str, _dt_processing: str, _default_sleep_time_sec: int = 0):
self.url_balance_sheet = _url_balance_sheet
self.url_cash_flow = _url_cash_flow
self.dt_processing = _dt_processing
self.default_sleep_time_sec = _default_sleep_time_sec
pass
def get_data(self, _stock_code: str) -> FinancialHistory:
self.financial_history = FinancialHistory(_stock_code, self.dt_processing)
cash_flow_rows: List[str] = [[FIELD_SHARED_DIVIDENDS, True], [FIELD_NET_INCOME, False]]
balance_sheet_rows: List[str] = [[FIELD_NET_WORTH, False], [FIELD_TOTAL_DEBITS, False]]
# We're changing scraping order to fool target site crawler detection layer
if ((randint(0, 10) % 2) == 0):
cash_flow_hist_aux = self.__get_history("Fluxo de Caixa", cash_flow_rows, _stock_code, self.url_cash_flow, True)
time.sleep(randint(0, self.default_sleep_time_sec))
balnace_hist_aux = self.__get_history("Balanço", balance_sheet_rows, _stock_code, self.url_balance_sheet, False)
else:
balnace_hist_aux = self.__get_history("Balanço", balance_sheet_rows, _stock_code, self.url_balance_sheet, False)
time.sleep(randint(0, self.default_sleep_time_sec))
cash_flow_hist_aux = self.__get_history("Fluxo de Caixa", cash_flow_rows, _stock_code, self.url_cash_flow, True)
if cash_flow_hist_aux:
self.financial_history.history.append(cash_flow_hist_aux)
if balnace_hist_aux:
self.financial_history.history.append(balnace_hist_aux)
return self.financial_history
def __get_history(self, _hist_description: str, _rows: List[str], _stock_code: str, url: str, _turn_value_positive: bool) -> History:
res: requests.Response = None
page: BeautifulSoup = None
hist_aux: History = None
res = requests.get(url.format(_stock_code), headers=self.request_headers)
page = BeautifulSoup(res.content)
hist_aux: History = History(_hist_description, "{0} - {1}".format(res.status_code, res.reason))
hist_aux.periods = []
if not page:
return History(_hist_description, "Target server refused the connection")
period_index: int = -1
# gettin' the periods
header_section: Tag = page.find("div", attrs={"class": "D(tbr) C($primaryColor)", "data-reactid": "32"})
if header_section is None:
return History(_hist_description, "Section not found")
columns: List[Tag] = header_section.select("div > span")
for column in columns:
period_index = period_index + 1
if not (column is None):
text_aux: str = ""
if (str(column.get_text()).lower() == "ttm"):
text_aux = datetime.today().strftime("%d/%m/%Y")
else:
text_aux = column.get_text()
_date_aux = text_aux.split("/")
if not (_date_aux is None or _date_aux == "" or len(_date_aux) < 3):
period_date: date = date(int(_date_aux[2]), int(_date_aux[1]), int(_date_aux[0])).isoformat()
period_obj: Period = Period(period_date)
for row in _rows:
desc: str = row[0]
turn_positive: bool = (row[1] if row[1] else False) if len(row) > 1 else False
# Row
period_obj.rows.append(self.__get_row(page, period_index, desc, turn_positive))
pass
# Period
hist_aux.periods.append(period_obj)
pass
pass
pass
return hist_aux
def __get_row(self, _page: BeautifulSoup, _period_index: int, _description: str, _turn_value_positive: bool) -> Row:
if not _page:
return None
# getting the values
value_row: Tag = _page.find("span", text=re.compile(
"^{}$".format(_description), re.IGNORECASE))
if value_row is None:
return None
value_cell: List[Tag] = list(value_row.parent.parent.parent.children)
if value_cell is None or len(value_cell) == 0:
return None
if value_cell[_period_index] is None:
return None
value_span: Tag = value_cell[_period_index].find("span")
if value_span is None:
return None
formatted_value_aux = 0.00
if _turn_value_positive is True:
formatted_value_aux = (Parser.ParseFloat(value_span.get_text()))
if (formatted_value_aux < 0):
formatted_value_aux = formatted_value_aux * -1
pass
else:
formatted_value_aux = Parser.ParseFloat(value_span.get_text())
pass
return_row: Row = Row(_description, (formatted_value_aux * 1000))
return return_row
pass
|
<reponame>helyx-rterry/solutions-geoprocessing-toolbox
# Patrol Report (from XML) to Table
#-------------------------------------------------------------------------------
# Copyright 2010-2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# This script will take an XML file representing
# a completed InfoPath Patrol Report and
# add the 'PatrolReport' elements as a row to
# an ArcGIS Table of identical schema, against
# the specified Track ID
# INPUTS:
# XML file (FILE)
# Track ID (STRING)
# OUTPUTS:
# Output Table (TABLE)
#-------------------------------------------------------------------------------
from xml.etree import ElementTree
from datetime import datetime
import arcpy, string
import re
def ptrlreptodict(xmlfile):
''' Parses the xml to extract the PatrolReport fields into a dictionary '''
PATROLREPORT_NS = '{http://helyx.co.uk/infopath/2003/myXSD/2010-06-09}'
tree = ElementTree.parse(xmlfile)
d = {}
''' get each of the grouped nodes '''
report = tree.find(PATROLREPORT_NS + 'Report')
patrol = tree.find(PATROLREPORT_NS + 'Patrol')
task = tree.find(PATROLREPORT_NS + 'Task')
observations = tree.find(PATROLREPORT_NS + 'Observations')
patrolcondition = tree.find(PATROLREPORT_NS + 'PatrolCondition')
''' get report nodes '''
reportnumber = report.find(PATROLREPORT_NS + 'ReportNumber').text
classification = report.find(PATROLREPORT_NS + 'Classification').text
_to = report.find(PATROLREPORT_NS + 'To').text
_from = report.find(PATROLREPORT_NS + 'From').text
reportdatetime = report.find(PATROLREPORT_NS + 'ReportDateTime').text
''' get patrol nodes '''
callsign = patrol.find(PATROLREPORT_NS + 'Callsign').text
subunit = patrol.find(PATROLREPORT_NS + 'Subunit').text
patrolbase = patrol.find(PATROLREPORT_NS + 'PatrolBase').text
patroltype = patrol.find(PATROLREPORT_NS + 'PatrolType').text
patrolcommand = patrol.find(PATROLREPORT_NS + 'PatrolCommand').text
interpreter = patrol.find(PATROLREPORT_NS + 'Interpreter').text
patrolsize = patrol.find(PATROLREPORT_NS + 'PatrolSize').text
composition = patrol.find(PATROLREPORT_NS + 'Composition').text
''' get task nodes '''
opname = task.find(PATROLREPORT_NS + 'OpName').text
taskname = task.find(PATROLREPORT_NS + 'TaskName').text
taskdescription = task.find(PATROLREPORT_NS + 'TaskDescription').text
''' get observation nodes '''
terraindescription = observations.find(PATROLREPORT_NS + 'TerrainDescription').text
miscinfo = observations.find(PATROLREPORT_NS + 'MiscInfo').text
conclusions = observations.find(PATROLREPORT_NS + 'Conclusions').text
''' ignore the EnemySightings nodes which go in another table '''
''' get patrolcondition nodes '''
numpatrolok = patrolcondition.find(PATROLREPORT_NS + 'NumPatrolOK').text
numpatrolwounded = patrolcondition.find(PATROLREPORT_NS + 'NumPatrolWounded').text
numpatrolkia = patrolcondition.find(PATROLREPORT_NS + 'NumPatrolKIA').text
numpatrolmissing = patrolcondition.find(PATROLREPORT_NS + 'NumPatrolMissing').text
numpatrolcaptured = patrolcondition.find(PATROLREPORT_NS + 'NumPatrolCaptured').text
yield reportnumber, classification, _to, _from, reportdatetime, \
callsign, subunit, patrolbase, patroltype, patrolcommand, \
interpreter, patrolsize, composition, opname, taskname, \
taskdescription, terraindescription, miscinfo, conclusions, \
numpatrolok, numpatrolwounded, numpatrolkia, numpatrolmissing, \
numpatrolcaptured
def parse_timestamp(s):
''' Returns (datetime, tz offset in minutes) or (None, None). '''
m = re.match(""" ^
(?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2})
T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2})
(?P<microsecond>\.[0-9]{1,6})?
(?P<tz>
Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2})
)?
$ """, s, re.X)
if m is not None:
values = m.groupdict()
if values["tz"] in ("Z", None):
tz = 0
else:
tz = int(values["tz_hr"]) * 60 + int(values["tz_min"])
if values["microsecond"] is None:
values["microsecond"] = 0
else:
values["microsecond"] = values["microsecond"][1:]
values["microsecond"] += "0" * (6 - len(values["microsecond"]))
#values = dict((k, int(v)) for k, v in values.iteritems() #UPDATE
values = dict((k, int(v)) for k, v in list(values.items())
if not k.startswith("tz"))
try:
return datetime(**values), tz
except ValueError:
pass
return None, None
if __name__ == "__main__":
#Get GPX and Output directory paramaters
xmlfile = arcpy.GetParameterAsText(0)
trackid = arcpy.GetParameterAsText(1)
outTable = arcpy.GetParameterAsText(2)
rows, row = None, None
try:
rows = arcpy.InsertCursor(outTable)
recComplete = 0
# walk through each patrol report, create and insert a record into the table for each
for reportnumber, classification, _to, _from, reportdatetime, \
callsign, subunit, patrolbase, patroltype, patrolcommand, \
interpreter, patrolsize, composition, opname, taskname, \
taskdescription, terraindescription, miscinfo, conclusions, \
numpatrolok, numpatrolwounded, numpatrolkia, numpatrolmissing, \
numpatrolcaptured \
in ptrlreptodict(xmlfile):
row = rows.newRow()
row.ReportNumber = reportnumber
row.Classification = classification
row.ReportTo = _to
row.ReportFrom = _from
row.ReportDateTime = parse_timestamp(reportdatetime)[0]
row.Callsign = callsign
row.Subunit = subunit
row.PatrolBase = patrolbase
row.PatrolType = patroltype
row.PatrolCommand = patrolcommand
row.Interpreter = interpreter
row.PatrolSize = patrolsize
row.Composition = composition
row.OpName = opname
row.TaskName = taskname
row.TaskDescription = taskdescription
row.TerrainDescription = terraindescription
row.MiscInfo = miscinfo
row.Conclusions = conclusions
row.NumPatrolOK = numpatrolok
row.NumPatrolWounded = numpatrolwounded
row.NumPatrolKIA = numpatrolkia
row.NumPatrolMissing = numpatrolmissing
row.NumPatrolCaptured = numpatrolcaptured
''' Add the Track ID, which was passed as a parameter, to link report to a track '''
row.FK_TrackGUID = trackid
rows.insertRow(row)
recComplete += 1
arcpy.AddMessage("Processed " + str(recComplete) + " records.")
arcpy.SetParameterAsText(3,'True')
#except Exception, ErrorDesc: #UPDATE
except Exception as ErrorDesc:
arcpy.AddError(str(ErrorDesc))
finally:
if rows:
del rows
if row:
del row
|
<reponame>sourabh-bhide/tissue2cells
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import matplotlib.pyplot as plt
import tissue2cells as tc
import os
import os.path
import ipywidgets as widgets
w = widgets.Select(
options=['Area','concentration_myo','DV_asymmetry', 'Ventral_fraction', 'offset',
'sum_myosin_cells', 'eccentricity'],
value='Area',
# rows=10,
description='Qyantity:',
disabled=False)
def Fill_Error(df,col_name,color):
df_mean = df.set_index('filename').groupby('frame_nb').agg(['mean'])
df_std = df.set_index('filename').groupby('frame_nb').agg(['std'])
error_down = (df_mean[str(col_name)].values)-(df_std[str(col_name)].values)
error_up = (df_std[str(col_name)].values)+(df_mean[str(col_name)].values)
Y_down = error_down.flatten()
Y_up = error_up.flatten()
Xaxis = range(0,len(df_mean)*25,25)#this number depends upon the time resolution
ax_df = plt.plot(Xaxis,df_mean[col_name],color=str(color))
return plt.fill_between(Xaxis, Y_up, Y_down, alpha=0.1, color= str(color))
def RowAnalysis(AcrossRows,Colors1,label_rows):
path_plots = 'Results/RowAnalysis_plots/'
os.makedirs(path_plots, exist_ok=True)
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'Area',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('cell area ($\mu$m$^{2}$)', fontsize=12)
plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_cell_area.png', transparent=True)
plt.show()
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'sum_myosin_cells',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('sum_myosin_cells', fontsize=12)
#plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_sum_myosin_cells.png', transparent=True)
plt.show()
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'concentration_myo',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('concentration_myo', fontsize=12)
#plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_concentration_myo.png', transparent=True)
plt.show()
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'eccentricity',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('eccentricity', fontsize=12)
#plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_eccentricity.png', transparent=True)
plt.show()
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'Offset_myo',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('Offset_myo', fontsize=12)
#plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_Offset_myo.png', transparent=True)
plt.show()
for i,j in zip(AcrossRows,Colors1):
Fill_Error(i,'DV_asymmetry',j)
plt.xlabel('Time(sec)', fontsize=10)
plt.ylabel('DV_asymmetry', fontsize=12)
#plt.ylim(0,110)
#plt.xlim(0,700)
plt.legend(label_rows, loc='upper left', fontsize=11)
plt.savefig(path_plots+'all_rows_DV_assymetry_roi.png', transparent=True)
plt.show()
|
<reponame>Erwanp-python-game/Evolve
import pygame
from pygame.locals import *
import pickle
from OpenGL.GL import *
from OpenGL.GLU import *
verticies = (
( 1, -1, -1), # 0
( 1, 1, -1), # 1
(-1, 1, -1), # 2
(-1, -1, -1), # 3
( 1, -1, 1), # 4
( 1, 1, 1), # 5
(-1, -1, 1), # 6
(-1, 1, 1), # 7
)
surfaces = (
(0,1,2,3),
(3,2,7,6),
(6,7,5,4),
(4,5,1,0),
(1,5,7,2),
(4,0,3,6),
)
normals = [
( 0, 0, -1), # surface 0
(-1, 0, 0), # surface 1
( 0, 0, 1), # surface 2
( 1, 0, 0), # surface 3
( 0, 1, 0), # surface 4
( 0, -1, 0) # surface 5
]
colors = (
(1,1,1),
(0,1,0),
(0,0,1),
(0,1,0),
(0,0,1),
(1,0,1),
(0,1,0),
(1,0,1),
(0,1,0),
(0,0,1),
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7),
)
def loadTexture():
textureSurface = pygame.image.load('wood.png')
textureData = pygame.image.tostring(textureSurface, "RGBA", 1)
width = textureSurface.get_width()
height = textureSurface.get_height()
glEnable(GL_TEXTURE_2D)
texid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height,
0, GL_RGBA, GL_UNSIGNED_BYTE, textureData)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
return texid
def Cube():
glBegin(GL_QUADS)
for i_surface, surface in enumerate(surfaces):
x = 0
glNormal3fv(normals[i_surface])
for vertex in surface:
x+=1
glColor3fv((1,0,0))
if x==1:
glTexCoord2f(0.0, 0.0)
if x==2:
glTexCoord2f(0, 1.0)
if x==3:
glTexCoord2f(1, 00)
if x==4:
glTexCoord2f(1, 1.0)
glVertex3fv(verticies[vertex])
glEnd()
glColor3fv(colors[0])
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(verticies[vertex])
glEnd()
def read():
global surfaces,verticies,normals
surfaces=[]
verticies=[]
normals=[]
f = open("cuby", "rb")
g=pickle.load(f)
while g!='fin':
surfaces.append((g[0][0],g[1][0],g[2][0]))
g=pickle.load(f)
g=pickle.load(f)
while g!='fin':
verticies.append((g[1],g[2],g[3]))
g=pickle.load(f)
g=pickle.load(f)
while g!='fin':
normals.append((g[1],g[2],g[3]))
g=pickle.load(f)
read()
print(len(normals),len(verticies))
def objdraw():
glBegin(GL_TRIANGLES)
for i_surface, surface in enumerate(surfaces):
x = 0
#
for vertex in surface:
x+=1
glColor3fv((1,0,0))
if x==1:
glTexCoord2f(0.0, 0.0)
if x==2:
glTexCoord2f(0, 1.0)
if x==3:
glTexCoord2f(1, 00)
if x==4:
glTexCoord2f(1, 1.0)
glNormal3fv(normals[vertex-1])
glVertex3fv(verticies[vertex-1])
glEnd()
#glColor3fv(colors[0])
#glBegin(GL_LINES)
#for edge in edges:
#for vertex in edge:
#glVertex3fv(verticies[vertex])
#glEnd()
def drawText(x, y, text):
position = (x, y, 0)
font = pygame.font.Font(None, 64)
textSurface = font.render(text, True, (255,255,255,255),
(0,0,0,255))
textData = pygame.image.tostring(textSurface, "RGBA", True)
glRasterPos3d(*position)
glDrawPixels(textSurface.get_width(), textSurface.get_height(),GL_RGBA, GL_UNSIGNED_BYTE, textData)
def main():
global surfaces
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
clock = pygame.time.Clock()
loadTexture()
glMatrixMode(GL_PROJECTION)
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)
glMatrixMode(GL_MODELVIEW)
glTranslatef(0, 0, -5)
#glLight(GL_LIGHT0, GL_POSITION, (0, 0, 1, 0)) # directional light from the front
glLight(GL_LIGHT0, GL_POSITION, (5, 5, 5, 1)) # point light from the left, top, front
glLightfv(GL_LIGHT0, GL_AMBIENT, (0, 0, 0, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (1, 1, 1, 1))
glEnable(GL_DEPTH_TEST)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE )
glRotatef(1, 3, 1, 1)
objdraw()
drawText(400,600,'gnap')
glDisable(GL_LIGHT0)
glDisable(GL_LIGHTING)
glDisable(GL_COLOR_MATERIAL)
pygame.display.flip()
clock.tick(60)
main()
|
#!/usr/bin/env python
'''make-rt-struct-assessor.py
Read in an RT-STRUCT DICOM file. Write out an icr:roiCollectionData assessor.
Usage:
make-rt-struct-assessor.py PROJECT SUBJECT_ID SESSION_LABEL NEW_LABELS
Options:
PROJECT Project of parent session
SUBJECT_ID ID of parent subject
SESSION_LABEL Label of parent session
NEW_LABELS New ROI labels
'''
import os
import sys
import uuid
import pydicom
import requests
import logging
import datetime as dt
import subprocess
from docopt import docopt
import configparser
#config = ConfigParser.RawConfigParser()
#config.read('/data/xnat/home/xnat.cfg')
server = 'http://localhost:8080' #config.get('xnat', 'xnat_host')
usr = os.environ['XNAT_ADMIN'] # config.get('xnat', 'xnat_user')
pwd = os.environ['XNAT_ADMIN_PWD'] # config.get('xnat', 'xnat_pwd')
date_now = '{0:%Y-%m-%d}'.format(dt.datetime.now())
logfile = '/data/xnat/scripts/logs/replace_rt_labels_{}.log'.format(date_now)
logging.basicConfig(filename=logfile, format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def get_dicom_header_value(line):
left_bracket_idx = line.find('[')
right_bracket_idx = line.find(']')
if left_bracket_idx == -1 or right_bracket_idx == -1:
return None
return line[left_bracket_idx + 1:right_bracket_idx]
version = "1.0"
args = docopt(__doc__, version=version)
subject_id = args.get("SUBJECT_ID")
session_label = args.get('SESSION_LABEL')
project = args.get('PROJECT')
new_labels_csv = args.get('NEW_LABELS')
#logging.debug "Debug: args " + ", ".join("{}={}".format(name, value) for name, value in args.items()) + "\n"
logging.info("##### Replacing RT Labels for: {} {}".format(subject_id, session_label))
new_labels = new_labels_csv.split(',')
logging.debug(' new labels: {}'.format(new_labels))
dicom_path = ""
roi_labels_path = "/data/xnat/cache/temp/{}/roi_labels_{}.txt".format(session_label, session_label)
if os.path.exists(roi_labels_path ):
os.remove(roi_labels_path)
rt_num = 1
url = "http://localhost:8080"
session = requests.Session()
session.auth = (usr, pwd)
auth = session.post(url)
session_path = os.path.join('/data','xnat','archive',project,'arc001',session_label)
#### Find the RTSTRUCT file
tags = {
'SeriesNumber': (0x0020, 0x0011),
'RTROIObservationsSequence': (0x3006,0x0080),
'ROIName' : (0x3006,0x0020),
'Modality': (0x0008, 0x0060)
}
roiObservationLabels = []
for root, dirs, files in os.walk(session_path):
#logging.debug("searching through {}".format(root))
for file in files:
if file.endswith(".dcm"):
with open(os.path.join(root,file), 'rb') as f:
ds = pydicom.dcmread(f)
modality = ds.Modality
if 'RTSTRUCT' in modality.upper() and 'ASSESSOR' not in root.upper():
series_number = 0; #dataset[tags['SeriesNumber']].value
# Need to get ROI observation labels to check to see if they conform....upload as csv?
new_label_num = 0
if 'StructureSetROISequence' in ds:
try:
for roi in ds.StructureSetROISequence:
roi_label = roi.ROIName
roi.ROIName = new_labels[new_label_num]
roiObservationLabels.append(new_labels[new_label_num])
new_label_num += 1
except Exception as e:
logging.error("ERROR: StructureSetROISequence error - {}".format(e) )
sys.exit(1)
new_labels_list_complete = False
if len(roiObservationLabels) == len(new_labels):
new_labels_list_complete = True
if 'RTROIObservationsSequence' in ds :
try:
new_label_num = 0
for sequence in ds.RTROIObservationsSequence:
if 'ROIObservationLabel' in sequence:
roi_label = sequence.ROIObservationLabel
sequence.ROIObservationLabel = new_labels[new_label_num]
if not new_labels_list_complete:
roiObservationLabels.append(new_labels[new_label_num])
new_label_num += 1
except Exception as e:
logging.error("ERROR: RTROIObservationsSequence error - {}".format(e) )
sys.exit(1)
if len(roiObservationLabels) != len(new_labels):
logging.error("ERROR: Not all labels found - error - {}".format(roiObservationLabels))
sys.exit(1)
ds.save_as(os.path.join(root,file), write_like_original=True)
logging.info("Writing labels to roi_file: {} - {} ".format(file, roiObservationLabels))
with open(roi_labels_path, 'a+') as f:
for lab in roiObservationLabels:
label_string='{}'.format(lab)
f.write('{}\n'.format(label_string)) #.encode(encoding='utf-8',errors='strict'))
#f.write('{}\n'.format(lab.replace(" ","")).encode(encoding='utf-8',errors='strict')) #f.write('{}'.format(roiObservationLabels))
url = "http://localhost:8080/data/archive/projects/{}/subjects/{}/experiments/{}/resources/roi_labels/files/roi_labels.txt?overwrite=true".format(project, subject_id,session_label)
files = {'upload_file': open(roi_labels_path, 'rb')}
r = session.put(url, files=files)
if r.status_code != 201 and r.status_code != 200:
logging.error(
"ERROR: Cannot upload ROI lables file: {} - {} - {}".format(r.status_code, url,roiObservationLabels))
sys.exit(20)
#delete at end as seems to double write...
os.remove(roi_labels_path)
sys.exit(0)
|
'''
Base tasks for generic point-to-point reaching
'''
import numpy as np
from collections import OrderedDict
import time
import os
import math
import traceback
from riglib.stereo_opengl.primitives import Sphere, Cube
####### CONSTANTS
sec_per_min = 60.0
RED = (1,0,0,.5)
GREEN = (0,1,0,0.5)
GOLD = (1., 0.843, 0., 0.5)
mm_per_cm = 1./10
class CircularTarget(object):
def __init__(self, target_radius=2, target_color=(1, 0, 0, .5), starting_pos=np.zeros(3)):
self.target_color = target_color
self.default_target_color = tuple(self.target_color)
self.target_radius = target_radius
self.target_color = target_color
self.position = starting_pos
self.int_position = starting_pos
self._pickle_init()
def _pickle_init(self):
self.sphere = Sphere(radius=self.target_radius, color=self.target_color)
self.graphics_models = [self.sphere]
self.sphere.translate(*self.position)
def move_to_position(self, new_pos):
self.int_position = new_pos
print('OHOHOHOHOHOOHO')
print(self.int_position)
self.drive_to_new_pos()
def drive_to_new_pos(self):
raise NotImplementedError
class VirtualCircularTarget(CircularTarget):
def drive_to_new_pos(self):
self.position = self.int_position
self.sphere.translate(*self.position, reset=True)
def hide(self):
self.sphere.detach()
def show(self):
self.sphere.attach()
def cue_trial_start(self):
self.sphere.color = RED
self.show()
def cue_trial_end_success(self):
self.sphere.color = GREEN
def cue_trial_end_failure(self):
self.sphere.color = RED
self.hide()
# self.sphere.color = GREEN
def turn_yellow(self):
self.sphere.color = GOLD
def idle(self):
self.sphere.color = RED
self.hide()
def pt_inside(self, pt):
'''
Test if a point is inside the target
'''
pos = self.sphere.xfm.move
return (np.abs(pt[0] - pos[0]) < self.target_radius) and (np.abs(pt[2] - pos[2]) < self.target_radius)
def reset(self):
self.sphere.color = self.default_target_color
def get_position(self):
return self.sphere.xfm.move
class RectangularTarget(object):
def __init__(self, target_width=4, target_height=4, target_color=(1, 0, 0, .5), starting_pos=np.zeros(3)):
self.target_width = target_width
self.target_height = target_height
self.target_color = target_color
self.default_target_color = tuple(self.target_color)
self.position = starting_pos
self.int_position = starting_pos
self._pickle_init()
def _pickle_init(self):
self.cube = Cube(side_len=self.target_width, color=self.target_color)
self.graphics_models = [self.cube]
self.cube.translate(*self.position)
#self.center_offset = np.array([self.target_width, 0, self.target_width], dtype=np.float64) / 2
self.center_offset = np.array([0, 0, self.target_width], dtype=np.float64) / 2
def move_to_position(self, new_pos):
self.int_position = new_pos
self.drive_to_new_pos()
def drive_to_new_pos(self):
raise NotImplementedError
class VirtualRectangularTarget(RectangularTarget):
def drive_to_new_pos(self):
self.position = self.int_position
corner_pos = self.position - self.center_offset
self.cube.translate(*corner_pos, reset=True)
def hide(self):
self.cube.detach()
def show(self):
self.cube.attach()
def cue_trial_start(self):
self.cube.color = RED
self.show()
def cue_trial_end_success(self):
self.cube.color = GREEN
def cue_trial_end_failure(self):
self.cube.color = RED
self.hide()
def idle(self):
self.cube.color = RED
self.hide()
def pt_inside(self, pt):
'''
Test if a point is inside the target
'''
pos = self.cube.xfm.move + self.center_offset
# TODO this currently assumes that the cube doesn't rotate
# print (pt[0] - pos[0]), (pt[2] - pos[2])
return (np.abs(pt[0] - pos[0]) < self.target_width/2) and (np.abs(pt[2] - pos[2]) < self.target_height/2)
def reset(self):
self.cube.color = self.default_target_color
def get_position(self):
return self.cube.xfm.move
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import os
import warnings
import json
import copy
import re
from astropy.extern import six
from astropy.extern.six import BytesIO
import astropy.units as u
import astropy.coordinates as coord
import astropy.table as tbl
import astropy.utils.data as aud
from collections import OrderedDict
import astropy.io.votable as votable
from astropy.io import ascii, fits
from ..query import BaseQuery
from ..utils import commons
from ..utils import async_to_sync
from ..utils import schema
from . import conf
from ..exceptions import TableParseError
__all__ = ['Vizier', 'VizierClass']
__doctest_skip__ = ['VizierClass.*']
@async_to_sync
class VizierClass(BaseQuery):
_str_schema = schema.Or(*six.string_types)
_schema_columns = schema.Schema([_str_schema],
error="columns must be a list of strings")
_schema_ucd = schema.Schema(_str_schema, error="ucd must be string")
_schema_column_filters = schema.Schema(
{schema.Optional(_str_schema): _str_schema},
error=("column_filters must be a dictionary where both keys "
"and values are strings"))
_schema_catalog = schema.Schema(
schema.Or([_str_schema], _str_schema, None),
error="catalog must be a list of strings or a single string")
def __init__(self, columns=["*"], column_filters={}, catalog=None,
keywords=None, ucd="", timeout=conf.timeout,
vizier_server=conf.server, row_limit=conf.row_limit):
super(VizierClass, self).__init__()
self.columns = columns
self.column_filters = column_filters
self.catalog = catalog
self._keywords = None
self.ucd = ucd
if keywords:
self.keywords = keywords
self.TIMEOUT = timeout
self.VIZIER_SERVER = vizier_server
self.ROW_LIMIT = row_limit
@property
def columns(self):
""" Columns to include. The special keyword 'all' will return ALL
columns from ALL retrieved tables. """
# columns need to be immutable but still need to be a list
return list(tuple(self._columns))
@columns.setter
def columns(self, values):
self._columns = VizierClass._schema_columns.validate(values)
@property
def column_filters(self):
"""
Filters to run on the individual columns. See the Vizier website
for details.
"""
return self._column_filters
@column_filters.setter
def column_filters(self, values):
self._column_filters = (
VizierClass._schema_column_filters.validate(values))
@property
def catalog(self):
"""
The default catalog to search. If left empty, will search all
catalogs.
"""
return self._catalog
@catalog.setter
def catalog(self, values):
self._catalog = VizierClass._schema_catalog.validate(values)
@property
def ucd(self):
"""
UCD criteria: see http://vizier.u-strasbg.fr/vizier/vizHelp/1.htx#ucd
Examples
--------
>>> Vizier.ucd = '(spect.dopplerVeloc*|phys.veloc*)'
"""
return self._ucd
@ucd.setter
def ucd(self, values):
self._ucd = VizierClass._schema_ucd.validate(values)
def _server_to_url(self, return_type='votable'):
"""
Not generally meant to be modified, but there are different valid
return types supported by Vizier, listed here:
http://vizier.u-strasbg.fr/doc/asu-summary.htx
HTML: VizieR
votable: votable
tab-separated-values: asu-tsv
FITS ascii table: asu-fits
FITS binary table: asu-binfits
plain text: asu-txt
"""
# Only votable is supported now, but in case we try to support
# something in the future we should disallow invalid ones.
assert return_type in ('votable', 'asu-tsv', 'asu-fits',
'asu-binfits', 'asu-txt')
if return_type in ('asu-txt',):
# I had a look at the format of these "tables" and... they just
# aren't. They're quasi-fixed-width without schema. I think they
# follow the general philosophy of "consistency is overrated"
# The CDS reader chokes on it.
raise TypeError("asu-txt is not and cannot be supported: the "
"returned tables are not and cannot be made "
"parseable.")
return "http://" + self.VIZIER_SERVER + "/viz-bin/" + return_type
@property
def keywords(self):
"""The set of keywords to filter the Vizier search"""
return self._keywords
@keywords.setter
def keywords(self, value):
self._keywords = VizierKeyword(value)
@keywords.deleter
def keywords(self):
self._keywords = None
def find_catalogs(self, keywords, include_obsolete=False, verbose=False,
max_catalogs=None, return_type='votable'):
"""
Search Vizier for catalogs based on a set of keywords, e.g. author name
Parameters
----------
keywords : list or string
List of keywords, or space-separated set of keywords.
From `Vizier <http://vizier.u-strasbg.fr/doc/asu-summary.htx>`_:
"names or words of title of catalog. The words are and'ed, i.e.
only the catalogues characterized by all the words are selected."
include_obsolete : bool, optional
If set to True, catalogs marked obsolete will also be returned.
max_catalogs : int or None
The maximum number of catalogs to return. If ``None``, all
catalogs will be returned.
Returns
-------
resource_dict : dict
Dictionary of the "Resource" name and the VOTable resource object.
"Resources" are generally publications; one publication may contain
many tables.
Examples
--------
>>> from astroquery.vizier import Vizier
>>> catalog_list = Vizier.find_catalogs('Kang W51')
>>> print(catalog_list)
{u'J/ApJ/706/83': <astropy.io.votable.tree.Resource at 0x108d4d490>,
u'J/ApJS/191/232': <astropy.io.votable.tree.Resource at 0x108d50490>}
>>> print({k:v.description for k,v in catalog_list.items()})
{u'J/ApJ/706/83': u'Embedded YSO candidates in W51 (Kang+, 2009)',
u'J/ApJS/191/232': u'CO survey of W51 molecular cloud (Bieging+, 2010)'}
"""
if isinstance(keywords, list):
keywords = " ".join(keywords)
data_payload = {'-words': keywords, '-meta.all': 1}
if max_catalogs is not None:
data_payload['-meta.max'] = max_catalogs
response = self._request(
method='POST', url=self._server_to_url(return_type=return_type),
data=data_payload, timeout=self.TIMEOUT)
if 'STOP, Max. number of RESOURCE reached' in response.text:
raise ValueError("Maximum number of catalogs exceeded. Try "
"setting max_catalogs to a large number and"
" try again")
result = self._parse_result(response, verbose=verbose,
get_catalog_names=True)
# Filter out the obsolete catalogs, unless requested
if include_obsolete is False:
for key in list(result):
for info in result[key].infos:
if (info.name == 'status') and (info.value == 'obsolete'):
del result[key]
return result
def get_catalogs_async(self, catalog, verbose=False, return_type='votable',
get_query_payload=False):
"""
Query the Vizier service for a specific catalog
Parameters
----------
catalog : str or list, optional
The catalog(s) that will be retrieved
Returns
-------
response : `~requests.Response`
Returned if asynchronous method used
"""
if not isinstance(catalog, six.string_types):
catalog = list(catalog)
data_payload = self._args_to_payload(catalog=catalog)
if get_query_payload:
return data_payload
response = self._request(
method='POST', url=self._server_to_url(return_type=return_type),
data=data_payload, timeout=self.TIMEOUT)
return response
def query_object_async(self, object_name, catalog=None, radius=None,
coordinate_frame=None, get_query_payload=False,
return_type='votable', cache=True):
"""
Serves the same purpose as `query_object` but only
returns the HTTP response rather than the parsed result.
Parameters
----------
object_name : str
The name of the identifier.
catalog : str or list, optional
The catalog(s) which must be searched for this identifier.
If not specified, all matching catalogs will be searched.
radius : `~astropy.units.Quantity` or None
A degree-equivalent radius (optional).
coordinate_system : str or None
If the object name is given as a coordinate, you *should* use
`~astroquery.vizier.VizierClass.query_region`, but you can
specify a coordinate frame here instead (today, J2000, B1975,
B1950, B1900, B1875, B1855, Galactic, Supergal., Ecl.J2000, )
Returns
-------
response : `~requests.Response`
The response of the HTTP request.
"""
catalog = VizierClass._schema_catalog.validate(catalog)
if radius is None:
center = {'-c': object_name}
else:
radius_arcmin = radius.to(u.arcmin).value
cframe = (coordinate_frame if coordinate_frame in
["today", "J2000", "B1975", "B1950", "B1900", "B1875",
"B1855", "Galactic", "Supergal.", "Ecl.J2000"]
else 'J2000')
center = {'-c': object_name, '-c.u': 'arcmin', '-c.geom': 'r',
'-c.r': radius_arcmin, '-c.eq': cframe}
data_payload = self._args_to_payload(
center=center,
catalog=catalog)
if get_query_payload:
return data_payload
response = self._request(
method='POST', url=self._server_to_url(return_type=return_type),
data=data_payload, timeout=self.TIMEOUT, cache=cache)
return response
def query_region_async(self, coordinates, radius=None, inner_radius=None,
width=None, height=None, catalog=None,
get_query_payload=False, cache=True,
return_type='votable'):
"""
Serves the same purpose as `query_region` but only
returns the HTTP response rather than the parsed result.
Parameters
----------
coordinates : str, `astropy.coordinates` object, or `~astropy.table.Table`
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as a string. If a table is used, each of
its rows will be queried, as long as it contains two columns
named ``_RAJ2000`` and ``_DEJ2000`` with proper angular units.
radius : convertible to `~astropy.coordinates.Angle`
The radius of the circular region to query.
inner_radius : convertible to `~astropy.coordinates.Angle`
When set in addition to ``radius``, the queried region becomes
annular, with outer radius ``radius`` and inner radius
``inner_radius``.
width : convertible to `~astropy.coordinates.Angle`
The width of the square region to query.
height : convertible to `~astropy.coordinates.Angle`
When set in addition to ``width``, the queried region becomes
rectangular, with the specified ``width`` and ``height``.
catalog : str or list, optional
The catalog(s) which must be searched for this identifier.
If not specified, all matching catalogs will be searched.
Returns
-------
response : `requests.Response`
The response of the HTTP request.
"""
catalog = VizierClass._schema_catalog.validate(catalog)
center = {}
columns = []
if isinstance(coordinates, (commons.CoordClasses,) + six.string_types):
c = commons.parse_coordinates(coordinates).transform_to('fk5')
if not c.isscalar:
pos_list = []
for pos in c:
ra_deg = pos.ra.to_string(unit="deg", decimal=True,
precision=8)
dec_deg = pos.dec.to_string(unit="deg", decimal=True,
precision=8, alwayssign=True)
pos_list += ["{}{}".format(ra_deg, dec_deg)]
center["-c"] = "<<;" + ";".join(pos_list)
columns += ["_q"] # request a reference to the input table
else:
ra = c.ra.to_string(unit='deg', decimal=True, precision=8)
dec = c.dec.to_string(unit="deg", decimal=True, precision=8,
alwayssign=True)
center["-c"] = "{ra}{dec}".format(ra=ra, dec=dec)
elif isinstance(coordinates, tbl.Table):
if (("_RAJ2000" in coordinates.keys()) and ("_DEJ2000" in
coordinates.keys())):
pos_list = []
sky_coord = coord.SkyCoord(coordinates["_RAJ2000"],
coordinates["_DEJ2000"],
unit=(coordinates["_RAJ2000"].unit,
coordinates["_DEJ2000"].unit))
for (ra, dec) in zip(sky_coord.ra, sky_coord.dec):
ra_deg = ra.to_string(unit="deg", decimal=True,
precision=8)
dec_deg = dec.to_string(unit="deg", decimal=True,
precision=8, alwayssign=True)
pos_list += ["{}{}".format(ra_deg, dec_deg)]
center["-c"] = "<<;" + ";".join(pos_list)
columns += ["_q"] # request a reference to the input table
else:
raise ValueError("Table must contain '_RAJ2000' and "
"'_DEJ2000' columns!")
else:
raise TypeError("Coordinates must be one of: string, astropy "
"coordinates, or table containing coordinates!")
# decide whether box or radius
if radius is not None:
# is radius a disk or an annulus?
if inner_radius is None:
radius = coord.Angle(radius)
unit, value = _parse_angle(radius)
key = "-c.r" + unit
center[key] = value
else:
i_radius = coord.Angle(inner_radius)
o_radius = coord.Angle(radius)
if i_radius.unit != o_radius.unit:
o_radius = o_radius.to(i_radius.unit)
i_unit, i_value = _parse_angle(i_radius)
o_unit, o_value = _parse_angle(o_radius)
key = "-c.r" + i_unit
center[key] = ",".join([str(i_value), str(o_value)])
elif width is not None:
# is box a rectangle or square?
if height is None:
width = coord.Angle(width)
unit, value = _parse_angle(width)
key = "-c.b" + unit
center[key] = "x".join([str(value)] * 2)
else:
w_box = coord.Angle(width)
h_box = coord.Angle(height)
if w_box.unit != h_box.unit:
h_box = h_box.to(w_box.unit)
w_unit, w_value = _parse_angle(h_box)
h_unit, h_value = _parse_angle(w_box)
key = "-c.b" + w_unit
center[key] = "x".join([str(w_value), str(h_value)])
else:
raise Exception(
"At least one of radius, width/height must be specified")
data_payload = self._args_to_payload(center=center, columns=columns,
catalog=catalog)
if get_query_payload:
return data_payload
response = self._request(
method='POST', url=self._server_to_url(return_type=return_type),
data=data_payload, timeout=self.TIMEOUT, cache=cache)
return response
def query_constraints_async(self, catalog=None, return_type='votable',
cache=True,
**kwargs):
"""
Send a query to Vizier in which you specify constraints with
keyword/value pairs.
See `the vizier constraints page
<http://vizier.cfa.harvard.edu/vizier/vizHelp/cst.htx>`_ for details.
Parameters
----------
catalog : str or list, optional
The catalog(s) which must be searched for this identifier.
If not specified, all matching catalogs will be searched.
kwargs : dict
Any key/value pairs besides "catalog" will be parsed
as additional column filters.
Returns
-------
response : `requests.Response`
The response of the HTTP request.
Examples
--------
>>> from astroquery.vizier import Vizier
>>> # note that glon/glat constraints here *must* be floats
>>> result = Vizier.query_constraints(catalog='J/ApJ/723/492/table1',
... GLON='>49.0 & <51.0', GLAT='<0')
>>> result[result.keys()[0]].pprint()
GRSMC GLON GLAT VLSR ... RD09 _RA.icrs _DE.icrs
------------- ------ ------ ------ ... ---- -------- --------
G049.49-00.41 49.49 -0.41 56.90 ... RD09 290.95 14.50
G049.39-00.26 49.39 -0.26 50.94 ... RD09 290.77 14.48
G049.44-00.06 49.44 -0.06 62.00 ... RD09 290.61 14.62
G049.04-00.31 49.04 -0.31 66.25 ... RD09 290.64 14.15
G049.74-00.56 49.74 -0.56 67.95 ... RD09 291.21 14.65
G050.39-00.41 50.39 -0.41 41.17 ... RD09 291.39 15.29
G050.24-00.61 50.24 -0.61 41.17 ... RD09 291.50 15.06
G050.94-00.61 50.94 -0.61 40.32 ... RD09 291.85 15.68
G049.99-00.16 49.99 -0.16 46.27 ... RD09 290.97 15.06
G049.44-00.06 49.44 -0.06 46.27 ... RD09 290.61 14.62
G049.54-00.01 49.54 -0.01 56.05 ... RD09 290.61 14.73
G049.74-00.01 49.74 -0.01 48.39 ... RD09 290.71 14.91
G049.54-00.91 49.54 -0.91 43.29 ... RD09 291.43 14.31
G049.04-00.46 49.04 -0.46 58.60 ... RD09 290.78 14.08
G049.09-00.06 49.09 -0.06 46.69 ... RD09 290.44 14.31
G050.84-00.11 50.84 -0.11 50.52 ... RD09 291.34 15.83
G050.89-00.11 50.89 -0.11 59.45 ... RD09 291.37 15.87
G050.44-00.41 50.44 -0.41 64.12 ... RD09 291.42 15.34
G050.84-00.76 50.84 -0.76 61.15 ... RD09 291.94 15.52
G050.29-00.46 50.29 -0.46 14.81 ... RD09 291.39 15.18
"""
catalog = VizierClass._schema_catalog.validate(catalog)
data_payload = self._args_to_payload(
catalog=catalog,
column_filters=kwargs,
center={'-c.rd': 180})
response = self._request(
method='POST', url=self._server_to_url(return_type=return_type),
data=data_payload, timeout=self.TIMEOUT, cache=cache)
return response
def _args_to_payload(self, *args, **kwargs):
"""
accepts the arguments for different query functions and
builds a script suitable for the Vizier votable CGI.
"""
body = OrderedDict()
center = kwargs.get('center')
# process: catalog
catalog = kwargs.get('catalog', self.catalog)
if catalog is not None:
if isinstance(catalog, six.string_types):
body['-source'] = catalog
elif isinstance(catalog, list):
body['-source'] = ",".join(catalog)
else:
raise TypeError("Catalog must be specified as list or string")
# process: columns
columns = kwargs.get('columns', copy.copy(self.columns))
if columns is not None:
columns = self.columns + columns
# special keywords need to be treated separately
# keyword names that can mean 'all'
alls = ['all', '**']
if any(x in columns for x in alls):
for x in alls:
if x in columns:
columns.remove(x)
body['-out.all'] = 2
# keyword name that means default columns
if '*' in columns:
columns.remove('*')
columns_default = True
else:
columns_default = False
# process: columns - identify sorting requests
columns_out = []
sorts_out = []
for column in columns:
if column[0] == '+':
columns_out += [column[1:]]
sorts_out += [column[1:]]
elif column[0] == '-':
columns_out += [column[1:]]
sorts_out += [column]
else:
columns_out += [column]
if columns_default:
body['-out'] = '*'
else:
body['-out'] = columns_out
if columns_out:
body['-out.add'] = ','.join(columns_out)
if len(sorts_out) > 0:
body['-sort'] = ','.join(sorts_out)
# process: maximum rows returned
row_limit = kwargs.get('row_limit') or self.ROW_LIMIT
if row_limit < 0:
body["-out.max"] = 'unlimited'
else:
body["-out.max"] = row_limit
# process: column filters
column_filters = self.column_filters.copy()
column_filters.update(kwargs.get('column_filters', {}))
for (key, value) in column_filters.items():
body[key] = value
# process: center
if center is not None:
for (key, value) in center.items():
body[key] = value
# add column metadata: name, unit, UCD1+, and description
body["-out.meta"] = "huUD"
# merge tables when a list is queried against a single catalog
body["-out.form"] = "mini"
# computed position should always be in decimal degrees
body["-oc.form"] = "d"
ucd = kwargs.get('ucd', "") + self.ucd
if ucd:
body['-ucd'] = ucd
# create final script
script = "\n".join(["{key}={val}".format(key=key, val=val)
for key, val in body.items()])
# add keywords
if (not isinstance(self.keywords, property) and
self.keywords is not None):
script += "\n" + str(self.keywords)
return script
def _parse_result(self, response, get_catalog_names=False, verbose=False,
invalid='warn'):
"""
Parses the HTTP response to create a `~astropy.table.Table`.
Returns the raw result as a string in case of parse errors.
Parameters
----------
response : `requests.Response`
The response of the HTTP POST request
get_catalog_names : bool
(only for VOTABLE queries)
If specified, return only the table names (useful for table
discovery).
invalid : 'warn', 'mask' or 'exception'
(only for VOTABLE queries)
The behavior if a VOTABLE cannot be parsed. The default is
'warn', which will try to parse the table, but if an
exception is raised during parsing, the exception will be
issued as a warning instead and a masked table will be
returned. A value of 'exception' will not catch the
exception, while a value of 'mask' will simply always mask
invalid values.
Returns
-------
table_list : `astroquery.utils.TableList` or str
If there are errors in the parsing, then returns the raw results
as a string.
"""
if response.content[:5] == b'<?xml':
try:
return parse_vizier_votable(
response.content, verbose=verbose, invalid=invalid,
get_catalog_names=get_catalog_names)
except Exception as ex:
self.response = response
self.table_parse_error = ex
raise TableParseError("Failed to parse VIZIER result! The "
"raw response can be found in "
"self.response, and the error in "
"self.table_parse_error. The attempted "
"parsed result is in "
"self.parsed_result.\n Exception: " +
str(self.table_parse_error))
elif response.content[:5] == b'#\n# ':
return parse_vizier_tsvfile(response.content, verbose=verbose)
elif response.content[:6] == b'SIMPLE':
return fits.open(BytesIO(response.content),
ignore_missing_end=True)
@property
def valid_keywords(self):
if not hasattr(self, '_valid_keyword_dict'):
file_name = aud.get_pkg_data_filename(
os.path.join("data", "inverse_dict.json"))
with open(file_name, 'r') as f:
kwd = json.load(f)
self._valid_keyword_types = sorted(kwd.values())
self._valid_keyword_dict = OrderedDict([(k, kwd[k])
for k in sorted(kwd)])
return self._valid_keyword_dict
def parse_vizier_tsvfile(data, verbose=False):
"""
Parse a Vizier-generated list of tsv data tables into a list of astropy
Tables.
Parameters
----------
data : ascii str
An ascii string containing the vizier-formatted list of tables
"""
# http://stackoverflow.com/questions/4664850/find-all-occurrences-of-a-substring-in-python
split_indices = [m.start() for m in re.finditer('\n\n#', data)]
# we want to slice out chunks of the file each time
split_limits = zip(split_indices[:-1], split_indices[1:])
tables = [ascii.read(BytesIO(data[a:b]), format='fast_tab', delimiter='\t',
header_start=0, comment="#") for
a, b in split_limits]
return tables
def parse_vizier_votable(data, verbose=False, invalid='warn',
get_catalog_names=False):
"""
Given a votable as string, parse it into dict or tables
"""
if not verbose:
commons.suppress_vo_warnings()
tf = BytesIO(data)
if invalid == 'mask':
vo_tree = votable.parse(tf, pedantic=False, invalid='mask')
elif invalid == 'warn':
try:
vo_tree = votable.parse(tf, pedantic=False, invalid='exception')
except Exception as ex:
warnings.warn("VOTABLE parsing raised exception: {0}".format(ex))
vo_tree = votable.parse(tf, pedantic=False, invalid='mask')
elif invalid == 'exception':
vo_tree = votable.parse(tf, pedantic=False, invalid='exception')
else:
raise ValueError("Invalid keyword for 'invalid'. "
"Must be exception, mask, or warn")
if get_catalog_names:
return OrderedDict([(R.name, R) for R in vo_tree.resources])
else:
table_dict = OrderedDict()
for t in vo_tree.iter_tables():
if len(t.array) > 0:
if t.ref is not None:
name = vo_tree.get_table_by_id(t.ref).name
else:
name = t.name
if name not in table_dict.keys():
table_dict[name] = []
table_dict[name] += [t.to_table()]
for name in table_dict.keys():
if len(table_dict[name]) > 1:
table_dict[name] = tbl.vstack(table_dict[name])
else:
table_dict[name] = table_dict[name][0]
return commons.TableList(table_dict)
def _parse_angle(angle):
"""
Returns the Vizier-formatted units and values for box/radius
dimensions in case of region queries.
Parameters
----------
angle : convertible to `astropy.coordinates.Angle`
Returns
-------
(unit, value) : tuple
formatted for Vizier.
"""
angle = coord.Angle(angle)
if angle.unit == u.arcsec:
unit, value = 's', angle.value
elif angle.unit == u.arcmin:
unit, value = 'm', angle.value
else:
unit, value = 'd', angle.to(u.deg).value
return unit, value
class VizierKeyword(list):
"""Helper class for setting keywords for Vizier queries"""
def __init__(self, keywords):
file_name = aud.get_pkg_data_filename(
os.path.join("data", "inverse_dict.json"))
with open(file_name, 'r') as f:
kwd = json.load(f)
self.keyword_types = sorted(kwd.values())
self.keyword_dict = OrderedDict([(k, kwd[k]) for k in sorted(kwd)])
self._keywords = None
self.keywords = keywords
@property
def keywords(self):
"""
List or string for keyword(s) that must be set for the Vizier
object.
"""
return self._keywords
@keywords.setter
def keywords(self, values):
if isinstance(values, six.string_types):
values = list(values)
keys = [key.lower() for key in self.keyword_dict]
values = [val.lower() for val in values]
# warn about unknown keywords
for val in set(values) - set(keys):
warnings.warn("{val} : No such keyword".format(val=val))
valid_keys = [
key for key in self.keyword_dict.keys()
if key.lower() in list(map(str.lower, values))]
# create a dict for each type of keyword
set_keywords = OrderedDict()
for key in self.keyword_dict:
if key in valid_keys:
if self.keyword_dict[key] in set_keywords:
set_keywords[self.keyword_dict[key]].append(key)
else:
set_keywords[self.keyword_dict[key]] = [key]
self._keywords = OrderedDict(
[(k, sorted(set_keywords[k]))
for k in set_keywords])
@keywords.deleter
def keywords(self):
del self._keywords
def __repr__(self):
return "\n".join([x for key in self.keywords
for x in self.get_keyword_str(key)])
def get_keyword_str(self, key):
"""
Helper function that returns the keywords, grouped into appropriate
categories and suitable for the Vizier votable CGI.
Comma-separated is not valid!!!
"""
keyword_name = "-kw." + key
return [keyword_name + "=" + s for s in self.keywords[key]]
Vizier = VizierClass()
|
'''
Source codes for Python Machine Learning By Example 2nd Edition (Packt Publishing)
Chapter 5: Classifying Newsgroup Topic with Support Vector Machine
Author: Yuxi (<NAME>
'''
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets import fetch_20newsgroups
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def is_letter_only(word):
return word.isalpha()
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
def clean_text(docs):
docs_cleaned = []
for doc in docs:
doc = doc.lower()
doc_cleaned = ' '.join(lemmatizer.lemmatize(word) for word in doc.split()
if is_letter_only(word) and word not in all_names and word not in stop_words)
docs_cleaned.append(doc_cleaned)
return docs_cleaned
# Binary classification
categories = ['comp.graphics', 'sci.space']
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
from collections import Counter
Counter(label_train)
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_features=None)
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=42)
svm.fit(term_docs_train, label_train)
accuracy = svm.score(term_docs_test, label_test)
print('The accuracy of binary classification is: {0:.1f}%'.format(accuracy*100))
# Multiclass classification
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
'rec.sport.hockey'
]
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
svm = SVC(kernel='linear', C=1.0, random_state=42)
svm.fit(term_docs_train, label_train)
accuracy = svm.score(term_docs_test, label_test)
print('The accuracy of 5-class classification is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import classification_report
prediction = svm.predict(term_docs_test)
report = classification_report(label_test, prediction)
print(report)
# Grid search
categories = None
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_features=None)
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
parameters = {'C': [0.1, 1, 10, 100]}
svc_libsvm = SVC(kernel='linear')
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(svc_libsvm, parameters, n_jobs=-1, cv=5)
import timeit
start_time = timeit.default_timer()
grid_search.fit(term_docs_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
svc_libsvm_best = grid_search.best_estimator_
accuracy = svc_libsvm_best.score(term_docs_test, label_test)
print('The accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
from sklearn.svm import LinearSVC
svc_linear = LinearSVC()
grid_search = GridSearchCV(svc_linear, parameters, n_jobs=-1, cv=5)
start_time = timeit.default_timer()
grid_search.fit(term_docs_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
svc_linear_best = grid_search.best_estimator_
accuracy = svc_linear_best.score(term_docs_test, label_test)
print('TThe accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
# Pipeline
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english')),
('svc', LinearSVC()),
])
parameters_pipeline = {
'tfidf__max_df': (0.25, 0.5, 1.0),
'tfidf__max_features': (10000, None),
'tfidf__sublinear_tf': (True, False),
'tfidf__smooth_idf': (True, False),
'svc__C': (0.3, 1, 3),
}
grid_search = GridSearchCV(pipeline, parameters_pipeline, n_jobs=-1, cv=5)
start_time = timeit.default_timer()
grid_search.fit(cleaned_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
pipeline_best = grid_search.best_estimator_
accuracy = pipeline_best.score(cleaned_test, label_test)
print('The accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
|
<reponame>danvergara/pyblock
import pickle
import os
import sys
import base64, codecs, json, requests
import time as t
from pblogo import *
from cfonts import render, say
def clear(): # clear the screen
os.system('cls' if os.name=='nt' else 'clear')
def rectangle(n):
x = n - 3
y = n - x
[
print(''.join(i))
for i in
(
''*x
if i in (0,y-1)
else
(
f'{""*n}{"|"*n}{""*n}'
if i >= (n+1)/2 and i <= (1*n)/2
else
f'\033[A\u001b[38;5;27m{"█"*(x-1)}\033[A'
)
for i in range(y)
)
]
def pathexec():
global path
path = {"ip_port":"", "rpcuser":"", "rpcpass":"", "bitcoincli":""}
pathv = pickle.load(open("config/bclock.conf", "rb")) # Load the file 'bclock.conf'
path = pathv # Copy the variable pathv to 'path'
def counttxs():
try:
bitcoinclient = f'{path["bitcoincli"]} getblockcount'
block = os.popen(str(bitcoinclient)).read() # 'getblockcount' convert to string
b = block
a = b
pathexec()
clear()
getrawmempool = " getrawmempool"
gna = os.popen(path['bitcoincli'] + getrawmempool)
gnaa = gna.read()
gna1 = str(gnaa)
d = json.loads(gna1)
e = len(d)
n = e / 10
nn = n
getrawmempool = " getrawmempool"
while True:
x = a
bitcoinclient = f'{path["bitcoincli"]} getblockcount'
block = os.popen(str(bitcoinclient)).read() # 'getblockcount' convert to string
b = block
pathexec()
gna = os.popen(path['bitcoincli'] + getrawmempool)
gnaa = gna.read()
gna1 = str(gnaa)
d = json.loads(gna1)
e = len(d)
n = e / 10
if e > nn:
clear()
outputtxs = render(
f'{e} txs',
colors=[settingsClock['colorA'], settingsClock['colorB']],
align='center',
font='tiny',
)
print("\x1b[?25l" + outputtxs)
shq = int(n)
ss = str(rectangle(shq))
qq = ss.replace("None","")
print(f"\033[A{qq}\033[A")
nn = e
if b > a:
print("\n\n\n")
output = render(str(b), colors=[settingsClock['colorA'], settingsClock['colorB']], align='center', font='tiny')
print("\a\x1b[?25l" + output)
bitcoinclient = f'{path["bitcoincli"]} getbestblockhash'
bb = os.popen(str(bitcoinclient)).read()
ll = bb
bitcoinclientgetblock = f'{path["bitcoincli"]} getblock {ll}'
qq = os.popen(bitcoinclientgetblock).read()
yy = json.loads(qq)
mm = yy
outputtxs = render(str(mm['nTx']) + " txs", colors=[settingsClock['colorA'], settingsClock['colorB']], align='center', font='tiny')
print("\x1b[?25l" + outputtxs)
sh = int(mm['nTx']) / 10
shq = int(sh)
ss = str(rectangle(shq))
print(ss.replace("None",""))
t.sleep(5)
txs = str(mm['nTx'])
if txs == "1":
try:
p = subprocess.Popen(['curl', 'https://poptart.spinda.net'])
p.wait(5)
except subprocess.TimeoutExpired:
p.kill()
print("\033[0;37;40m\x1b[?25l")
a = b
nn = e
except:
pass
settings = {"gradient":"", "design":"block", "colorA":"green", "colorB":"yellow"}
settingsClock = {"gradient":"", "colorA":"green", "colorB":"yellow"}
while True: # Loop
try:
clear()
path = {"ip_port":"", "rpcuser":"", "rpcpass":"", "bitcoincli":""}
if os.path.isfile('config/bclock.conf') or os.path.isfile('config/blnclock.conf'): # Check if the file 'bclock.conf' is in the same folder
pathv = pickle.load(open("config/bclock.conf", "rb")) # Load the file 'bclock.conf'
path = pathv # Copy the variable pathv to 'path'
else:
blogo()
print("Welcome to \033[1;31;40mPyBLOCK\033[0;37;40m\n\n")
print("\n\tIf you are going to use your local node leave IP:PORT/USER/PASSWORD in blank.\n")
path[
'ip_port'
] = f'http://{input("Insert IP:PORT to access your remote Bitcoin-Cli node: ")}'
path['rpcuser'] = input("RPC User: ")
path['rpcpass'] = input("RPC Password: ")
print("\n\tLocal Bitcoin Core Node connection.\n")
path['bitcoincli']= input("Insert the Path to Bitcoin-Cli: ")
pickle.dump(path, open("config/bclock.conf", "wb"))
counttxs()
except:
print("\n")
sys.exit(101)
|
#!/usr/bin/env python3
#
# tcl_env.py
# TCL environment for RL algorithms
#
# Author: <NAME>
import random
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import gym
# Trying out if this works for others. from gym import spaces had some issues
import gym.spaces as spaces
import threading
import math
# Default parameters for
# default TCL environment.
# From Taha's code
# days range
DEFAULT_DAY0=0
DEFAULT_DAYN=1
# Power generated in the microgrid
DEFAULT_POWER_GENERATED = np.genfromtxt("wind_generation_fortum.csv", delimiter=',', skip_header=0, usecols=[-1]) / 100
DEFAULT_WIND_POWER_COST = 3.2
# Balancing market prices
DEFAULT_DOWN_REG = np.genfromtxt("down_regulation.csv", delimiter=',', skip_header=1, usecols=[-1]) / 10
DEFAULT_UP_REG = np.genfromtxt("up_regulation.csv", delimiter=',', skip_header=1, usecols=[-1]) / 10
DEFAULT_TRANSFER_PRICE_IMPORT = 0.97
DEFAULT_TRANSFER_PRICE_EXPORT = 0.09
# Length of one episode
DEFAULT_ITERATIONS = 24
# TCLs
DEFAULT_NUM_TCLS = 100
DEFAULT_AVGTCLPOWER = 1.5
DEFAULT_TEMPERATURS = np.genfromtxt("temperatures.csv",usecols=[5],skip_header=1,delimiter=',')
DEFAULT_TCL_SALE_PRICE = 3.2
DEFAULT_TCL_TMIN = 19
DEFAULT_TCL_TMAX = 25
# Price responsive loads
DEFAULT_NUM_LOADS = 150
DEFAULT_BASE_LOAD = np.array(
[.4, .3,.2,.2,.2,.2,.3,.5,.6,.6,.5,.5,.5,.4,.4,.6,.8,1.4,1.2,.9,.8,.6,.5,.4])
DEFAULT_MARKET_PRICE = 5.48
DEFAULT_PRICE_TIERS = np.array([-3.0, -1.5, 0.0, 1.5, 3.0])
# Battery characteristics (kwh)
DEFAULT_BAT_CAPACITY=500
DEFAULT_MAX_CHARGE=250
DEFAULT_MAX_DISCHARGE=250
MAX_R = 100
# Rendering lists
SOCS_RENDER = []
LOADS_RENDER = []
BATTERY_RENDER = []
PRICE_RENDER = []
ENERGY_SOLD_RENDER = []
ENERGY_BOUGHT_RENDER = []
GRID_PRICES_BUY_RENDER = []
GRID_PRICES_SELL_RENDER = []
ENERGY_GENERATED_RENDER = []
TCL_CONTROL_RENDER = []
TCL_CONSUMPTION_RENDER = []
TOTAL_CONSUMPTION_RENDER=[]
TEMP_RENDER=[]
ACTIONS = [[i, j, k, l] for i in range(4) for j in range(5) for k in range(2) for l in range(2)]
class TCL:
"""
Simulates an invidual TCL
"""
def __init__(self, ca, cm, q, P, Tmin=DEFAULT_TCL_TMIN, Tmax=DEFAULT_TCL_TMAX):
self.ca = ca
self.cm = cm
self.q = q
self.P = P
self.Tmin = Tmin
self.Tmax = Tmax
# Added for clarity
self.u = 0
def set_T(self, T, Tm):
self.T = T
self.Tm = Tm
def control(self, ui=0):
# control TCL using u with respect to the backup controller
if self.T < self.Tmin:
self.u = 1
elif self.Tmin < self.T < self.Tmax:
self.u = ui
else:
self.u = 0
def update_state(self, T0):
# update the indoor and mass temperatures according to (22)
for _ in range(2):
self.T += self.ca * (T0 - self.T) + self.cm * (self.Tm - self.T) + self.P * self.u + self.q
self.Tm += self.cm * (self.T - self.Tm)
if self.T >= self.Tmax:
break
"""
@property allows us to write "tcl.SoC", and it will
run this function to get the value
"""
@property
def SoC(self):
return (self.T - self.Tmin) / (self.Tmax - self.Tmin)
class Battery:
# Simulates the battery system of the microGrid
def __init__(self, capacity, useD, dissipation, rateC, maxDD, chargeE):
self.capacity = capacity # full charge battery capacity
self.useD = useD # useful discharge coefficient
self.dissipation = dissipation # dissipation coefficient of the battery
self.rateC = rateC # charging rate
self.maxDD = maxDD # maximum power that the battery can deliver per timestep
self.chargeE = chargeE # max Energy given to the battery to charge
self.RC = 0 # remaining capacity
def charge(self, E):
empty = self.capacity - self.RC
if empty <= 0:
return E
else:
self.RC += self.rateC * min(E,self.chargeE)
leftover = self.RC - self.capacity + max(E-self.chargeE,0)
self.RC = min(self.capacity, self.RC)
return max(leftover, 0)
def supply(self, E):
remaining = self.RC
self.RC -= min(E, remaining,self.maxDD)
self.RC = max(self.RC, 0)
return min(E, remaining,self.maxDD) * self.useD
def dissipate(self):
self.RC = self.RC * math.exp(- self.dissipation)
@property
def SoC(self):
return self.RC / self.capacity
def reset(self):
self.RC=0
class Grid:
def __init__(self, down_reg,up_reg, exp_fees, imp_fees):
self.sell_prices = down_reg
self.buy_prices = up_reg
self.exp_fees=exp_fees
self.imp_fees = imp_fees
self.time = 0
def sell(self, E):
return (self.sell_prices[self.time] + self.exp_fees) * E
def buy(self, E):
return -(self.buy_prices[self.time] + self.imp_fees) * E
#
# def get_price(self,time):
# return self.prices[time]
def set_time(self, time):
self.time = time
def total_cost(self,prices, energy):
return sum(prices * energy / 100 + self.imp_fees * energy)
class Generation:
def __init__(self, generation):
self.power = generation
def current_generation(self, time):
# We consider that we have 2 sources of power a constant source and a variable source
return self.power[time]
class Load:
def __init__(self, price_sens, base_load, max_v_load,patience):
self.price_sens = max(0,price_sens)
self.orig_price_sens = max(0,price_sens)
self.base_load = base_load
self.max_v_load = max_v_load
self.response = 0
self.shifted_loads={}
self.patience=max(patience,1)
self.dr_load=0
def react(self, price_tier , time_day):
self.dr_load=self.base_load[time_day]
response = self.price_sens * (price_tier - 2)
if response != 0 :
self.dr_load -= self.base_load[time_day] * response
self.shifted_loads[time_day] = self.base_load[time_day] * response
for k in list(self.shifted_loads):
probability_of_execution = -self.shifted_loads[k]*(price_tier - 2) + (time_day-k)/self.patience
if random.random()<=probability_of_execution:
self.dr_load+=self.shifted_loads[k]
del self.shifted_loads[k]
def load(self):
return max(self.dr_load, 0)
class MicroGridEnv(gym.Env):
def __init__(self,**kwargs):
# Get number of iterations and TCLs from the
# parameters (we have to define it through kwargs because
# of how Gym works...)
self.iterations = kwargs.get("iterations", DEFAULT_ITERATIONS)
self.num_tcls = kwargs.get("num_tcls", DEFAULT_NUM_TCLS)
print(self.num_tcls)
self.avg_tcl_power = kwargs.get("tcl_power", DEFAULT_AVGTCLPOWER)
self.tcl_sale_price = kwargs.get("tcl_price", DEFAULT_TCL_SALE_PRICE)
self.num_loads = kwargs.get("num_loads", DEFAULT_NUM_LOADS)
self.typical_load = kwargs.get("base_load", DEFAULT_BASE_LOAD)
self.market_price = kwargs.get("normal_price", DEFAULT_MARKET_PRICE)
self.temperatures = kwargs.get("temperatures", DEFAULT_TEMPERATURS)
self.price_tiers = kwargs.get("price_tiers", DEFAULT_PRICE_TIERS)
self.day0 = kwargs.get("day0", DEFAULT_DAY0)
self.dayn = kwargs.get("dayn", self.day0+1)
self.power_cost = kwargs.get("power_cost", DEFAULT_WIND_POWER_COST)
self.down_reg = kwargs.get("down_reg", DEFAULT_DOWN_REG)
self.up_reg = kwargs.get("up_reg", DEFAULT_UP_REG)
self.imp_fees = kwargs.get("imp_fees", DEFAULT_TRANSFER_PRICE_IMPORT)
self.exp_fees = kwargs.get("exp_fees", DEFAULT_TRANSFER_PRICE_EXPORT)
self.bat_capacity = kwargs.get("battery_capacity", DEFAULT_BAT_CAPACITY)
self.max_discharge = kwargs.get("max_discharge", DEFAULT_MAX_DISCHARGE)
self.max_charge = kwargs.get("max_charge", DEFAULT_MAX_CHARGE)
# The current day: pick randomly
# self.day = random.randint(self.day0, self.dayn-1)
self.day = self.day0
# The current timestep
self.time_step = 0
# The cluster of TCLs to be controlled.
# These will be created in reset()
self.tcls_parameters = []
# The cluster of loads.
# These will be created in reset()
self.loads_parameters = []
self.generation = Generation(kwargs.get("generation_data", DEFAULT_POWER_GENERATED))
self.grid = Grid(down_reg=self.down_reg,up_reg=self.up_reg, exp_fees=self.exp_fees, imp_fees=self.imp_fees)
self.battery = Battery(capacity=self.bat_capacity, useD=0.9, dissipation=0.001, rateC=0.9, maxDD=self.max_discharge, chargeE=self.max_charge)
self.tcls = [self._create_tcl(*self._create_tcl_parameters()) for _ in range(self.num_tcls)]
self.loads = [self._create_load(*self._create_load_parameters()) for _ in range(self.num_loads)]
self.action_space_sep = spaces.Box(low=0, high=1, dtype=np.float32,
shape=(13,))
self.action_space = spaces.Discrete(80)
# Observations: A vector of TCLs SoCs + loads +battery soc+ power generation + price + temperature + time of day
self.observation_space = spaces.Box(low=-100, high=100, dtype=np.float32,
shape=(self.num_tcls + 7,))
def _create_tcl_parameters(self):
"""
Initialize one TCL randomly with given T_0,
and return it. Copy/paste from Taha's code
"""
# Hardcoded initialization values to create
# bunch of different TCLs
ca = random.normalvariate(0.004, 0.0008)
cm = random.normalvariate(0.3, 0.004)
q = random.normalvariate(0, 0.01)
P = random.normalvariate(self.avg_tcl_power, 0.01)
init_temp = random.uniform(15,24)
return [ca, cm, q, P,init_temp]
def _create_tcl(self, ca, cm, q, P, init_temp):
tcl = TCL(ca, cm, q, P)
tcl.set_T(init_temp, init_temp)
return tcl
def _create_load_parameters(self):
"""
Initialize one load randomly,
and return it.
"""
# Hardcoded initialization values to create
# bunch of different loads
price_sensitivity = random.normalvariate(0.4, 0.3)
max_v_load = random.normalvariate(0.4, 0.01)
patience= int(random.normalvariate(10,6))
return [price_sensitivity, max_v_load,patience]
def _create_load(self, price_sensitivity, max_v_load,patience):
load = Load(price_sensitivity, base_load=self.typical_load, max_v_load=max_v_load, patience=patience)
return load
def _build_state(self):
"""
Return current state representation as one vector.
Returns:
state: 1D state vector, containing state-of-charges of all TCLs, Loads, current battery soc, current power generation,
current temperature, current price and current time (hour) of day
"""
# SoCs of all TCLs binned + current temperature + current price + time of day (hour)
socs = np.array([tcl.SoC for tcl in self.tcls])
# Scaling between 0 and 1
# We need to standardize the generation and the price
# Minimum soc is -1
socs = (socs+np.ones(shape=socs.shape))/2
loads = self.typical_load[(self.time_step) % self.iterations]
loads = (loads - min(self.typical_load)) / (max(self.typical_load) - min(self.typical_load))
current_generation = self.generation.current_generation(self.day*self.iterations+self.time_step)
current_generation = (current_generation-
np.average(self.generation.power[self.day*self.iterations:self.day*self.iterations+self.iterations]))\
/np.std(self.generation.power[self.day*self.iterations:self.day*self.iterations+self.iterations])
temperature = self.temperatures[self.day*self.iterations+self.time_step]
temperature = (temperature-
min(self.temperatures[self.day*self.iterations:self.day*self.iterations+self.iterations]))\
/(max(self.temperatures[self.day*self.iterations:self.day*self.iterations+self.iterations])
-min(self.temperatures[self.day*self.iterations:self.day*self.iterations+self.iterations]))
price = self.grid.buy_prices[self.day*self.iterations+self.time_step]
price = (price -
np.average(self.grid.buy_prices[self.day*self.iterations:self.day*self.iterations+self.iterations])) \
/ np.std(self.grid.buy_prices[self.day*self.iterations:self.day*self.iterations+self.iterations])
price_grid_sell = self.grid.sell_prices[self.day*self.iterations+self.time_step]
price_grid_sell = (price_grid_sell -
np.average(self.grid.sell_prices[self.day*self.iterations:self.day*self.iterations + self.iterations])) \
/ np.std(self.grid.sell_prices[self.day*self.iterations:self.day*self.iterations+self.iterations])
high_price = min(self.high_price/4,1)
time_step = (self.time_step)/(self.iterations-1)
state = np.concatenate((socs, [loads, high_price, time_step,self.battery.SoC, current_generation,
price,
price_grid_sell ]))
return state
def _build_info(self):
"""
Return dictionary of misc. infos to be given per state.
Here this means providing forecasts of future
prices and temperatures (next 24h)
"""
temp_forecast = np.array(self.temperatures[self.time_step + 1:self.time_step + self.iterations+1])
return {"temperature_forecast": temp_forecast,
"forecast_times": np.arange(0, self.iterations)}
def _compute_tcl_power(self):
"""
Return the total power consumption of all TCLs
"""
return sum([tcl.u * tcl.P for tcl in self.tcls])
def step(self, action):
"""
Arguments:
action: A list.
Returns:
state: Current state
reward: How much reward was obtained on last action
terminal: Boolean on if the game ended (maximum number of iterations)
info: None (not used here)
"""
if type(action) is not list:
action = ACTIONS[action]
self.grid.set_time(self.day*self.iterations + self.time_step)
reward = 0
# Update state of TCLs according to action
tcl_action = action[0]
price_action = action[1]
self.high_price += price_action - 2
if self.high_price > 4:
price_action = 2
self.high_price = 4
energy_deficiency_action = action[2]
energy_excess_action = action[3]
# Get the energy generated by the DER
available_energy = self.generation.current_generation(self.day*self.iterations + self.time_step)
# Calculate the cost of energy produced from wind turbines
reward-= available_energy * self.power_cost / 100
# We implement the pricing action and we calculate the total load in response to the price
for load in self.loads:
load.react(price_tier=price_action, time_day=self.time_step)
total_loads = sum([l.load() for l in self.loads])
# print("Total loads",total_loads)
# We fulfilled the load with the available energy.
available_energy -= total_loads
# Constraint of charging too high prices
# We calculate the return based on the sale price.
self.sale_price = self.price_tiers[price_action] + self.market_price
# We increment the reward by the amount of return
# Division by 100 to transform from cents to euros
reward += total_loads * (self.sale_price) / 100
# Distributing the energy according to priority
sortedTCLs = sorted(self.tcls, key=lambda x: x.SoC)
# print(tcl_action)
control = max(min(tcl_action * self.num_tcls * self.avg_tcl_power / 3, available_energy), 0)
self.control = control
for tcl in sortedTCLs:
if control > 0:
tcl.control(1)
control -= tcl.P * tcl.u
else:
tcl.control(0)
tcl.update_state(self.temperatures[self.day*self.iterations + self.time_step])
available_energy -= self._compute_tcl_power()
reward += self._compute_tcl_power() * self.tcl_sale_price / 100
# print("Available energy:", available_energy)
if available_energy > 0:
if energy_excess_action:
available_energy = self.battery.charge(available_energy)
# print("available energy after charging the battery", available_energy)
reward += self.grid.sell(available_energy) / 100
else:
reward += self.grid.sell(available_energy) / 100
self.energy_sold = available_energy
self.energy_bought = 0
else:
if energy_deficiency_action:
available_energy += self.battery.supply(-available_energy)
# print("after energy was taken from battery", available_energy)
self.energy_bought = -available_energy
reward += self.grid.buy(self.energy_bought) / 100
self.energy_sold = 0
# Proceed to next timestep.
self.time_step += 1
# Build up the representation of the current state (in the next timestep)
state = self._build_state()
terminal = self.time_step == self.iterations
# if terminal:
# # # reward if battery is charged
# # reward += abs(reward * self.battery.SoC / 2)
info = self._build_info()
return state, reward/MAX_R , terminal, info
def reset(self,day=None):
"""
Create new TCLs, and return initial state.
Note: Overrides previous TCLs
"""
if day == None:
self.day= self.day0
else:
self.day = day
print("Day:", self.day)
self.time_step = 0
self.high_price = 0
return self._build_state()
def reset_all(self,day=None):
"""
Create new TCLs, and return initial state.
Note: Overrides previous TCLs
"""
if day == None:
# self.day = random.randint(self.day0, self.dayn-1)
self.day= self.day0
else:
self.day = day
print("Day:", self.day)
self.time_step = 0
self.battery.reset()
self.high_price = 0
self.tcls.clear()
self.loads.clear()
self.tcls = [self._create_tcl(*self._create_tcl_parameters()) for _ in range(self.num_tcls)]
self.loads = [self._create_load(*self._create_load_parameters()) for _ in range(self.num_loads)]
return self._build_state()
def render(self,name=''):
SOCS_RENDER.append([tcl.SoC*100 for tcl in self.tcls])
LOADS_RENDER.append([l.load() for l in self.loads])
PRICE_RENDER.append(self.sale_price)
BATTERY_RENDER.append(self.battery.SoC)
ENERGY_GENERATED_RENDER.append(self.generation.current_generation(self.day*self.iterations+self.time_step-1))
ENERGY_SOLD_RENDER.append(self.energy_sold)
ENERGY_BOUGHT_RENDER.append(self.energy_bought)
GRID_PRICES_BUY_RENDER.append(self.grid.buy_prices[self.day * self.iterations + self.time_step-1])
GRID_PRICES_SELL_RENDER.append(self.grid.sell_prices[self.day * self.iterations + self.time_step-1])
TCL_CONTROL_RENDER.append(self.control)
TCL_CONSUMPTION_RENDER.append(self._compute_tcl_power())
TOTAL_CONSUMPTION_RENDER.append(self._compute_tcl_power()+np.sum([l.load() for l in self.loads]))
TEMP_RENDER.append(self.temperatures[self.day*self.iterations+self.time_step-1])
if self.time_step==self.iterations:
fig=plt.figure()
# ax = pyplot.axes()
ax = plt.subplot(2, 1, 1)
plt.axhspan(0, 100, facecolor='g', alpha=0.5)
ax.set_facecolor("silver")
ax.yaxis.grid(True)
ax.set_ylabel("TCLs state of charge %")
ax.boxplot(SOCS_RENDER, positions=range(24))
ax1 = ax.twinx()
ax1.set_ylabel("Temperatures °C")
ax1.plot(np.array(TEMP_RENDER), '--')
plt.title("TCLs state of charge and outdoor Temperatures")
plt.xlabel("Time (h)")
plt.legend(["Outdoor Temperatures"], loc='lower right')
# plt.show()
ax = plt.subplot(2, 1, 2)
ax.set_facecolor("silver")
ax.set_ylabel("kW")
ax.set_xlabel("Time (h)")
ax.yaxis.grid(True)
ax.plot(ENERGY_GENERATED_RENDER, color='k')
ax.bar(x=np.array(np.arange(self.iterations)) - 0.2, height=TCL_CONTROL_RENDER, width=0.2)
ax.bar(x=np.array(np.arange(self.iterations)), height=TCL_CONSUMPTION_RENDER, width=0.2)
plt.xticks( np.array(np.arange(self.iterations)) )
plt.title("Energy allocated to and consumed by TCLs and energy generated")
plt.legend(['Energy generated','Energy allocated for TCLs', 'Energy consumed by TCLs'])
plt.xlabel("Time (h)")
plt.ylabel("kW")
plt.show()
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.yaxis.grid(True)
# plt.plot(PRICE_RENDER,color='k')
# plt.title("SALE PRICES")
# plt.xlabel("Time (h)")
# plt.ylabel("€ cents")
# plt.show()
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.set_xlabel("Time (h)")
# ax.yaxis.grid(True)
# plt.plot(np.array(BATTERY_RENDER),color='k')
# plt.title("ESS SOC")
# plt.xlabel("Time (h)")
# # ax4.set_ylabel("BATTERY SOC")
# plt.show()
#
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.set_xlabel("Time (h)")
# ax.set_ylabel("kWh")
# ax.yaxis.grid(True)
# plt.plot(np.array(TOTAL_CONSUMPTION_RENDER), color='k')
# plt.title("Demand")
# plt.xlabel("Time (h)")
# plt.show()
#
#
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.set_xlabel("Time (h)")
# ax.yaxis.grid(True)
# plt.plot(np.array(self.typical_load), color='k')
# plt.title("Expected Individual basic load (L_b)")
# plt.xlabel("Time (h)")
# plt.ylabel("kWh")
# plt.show()
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.set_ylabel("kW")
# ax.set_xlabel("Time (h)")
# ax.yaxis.grid(True)
# plt.boxplot(np.array(LOADS_RENDER).T)
# plt.title("Hourly residential loads")
# plt.xlabel("Time (h)")
# plt.show()
#
#
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.yaxis.grid(True)
# plt.plot(np.array(ENERGY_GENERATED_RENDER),color='k')
# plt.title("ENERGY GENERATED")
# plt.xlabel("Time (h)")
# plt.ylabel("kW")
# plt.show()
#
# ax = plt.axes()
# ax.set_facecolor("silver")
# ax.yaxis.grid(True)
# # ax.axis(ymin=0,ymax=610)
# ax.bar(x=np.array(np.arange(self.iterations)),height=np.array(ENERGY_SOLD_RENDER),color='navy', width=0.8)
# ax.bar(x=np.array(np.arange(self.iterations)),height=np.array(ENERGY_BOUGHT_RENDER),color='darkred', width=0.8)
# ax.set_xlabel("Time (h)")
# ax.set_ylabel("Energy Exchanged kWh")
# ax.legend(['Energy sold', 'Energy purchased'],loc='upper left')
# # pyplot.show()
#
# ax1=ax.twinx()
# ax1.plot(np.array(GRID_PRICES_BUY_RENDER),color='red')
# ax1.plot(np.array(GRID_PRICES_SELL_RENDER), color='green')
# ax1.set_ylabel("GRID PRICES € cents")
# ax1.legend(['Buying prices','Selling prices'],loc='upper right')
# plt.show()
# np.save(name + 'Cost' + str(self.day) + '.npy', self.grid.total_cost(np.array(GRID_PRICES_RENDER),np.array(ENERGY_BOUGHT_RENDER)))
# np.save(name + 'Energy_bought_sold' + str(self.day) + '.npy', np.array(ENERGY_BOUGHT_RENDER)-np.array(ENERGY_SOLD_RENDER))
# np.save(name+'TOTAL_Consumption'+str(self.day)+'.npy' , TOTAL_CONSUMPTION_RENDER)
SOCS_RENDER.clear()
LOADS_RENDER.clear()
PRICE_RENDER.clear()
BATTERY_RENDER.clear()
GRID_PRICES_BUY_RENDER.clear()
GRID_PRICES_SELL_RENDER.clear()
ENERGY_BOUGHT_RENDER.clear()
ENERGY_SOLD_RENDER.clear()
ENERGY_GENERATED_RENDER.clear()
TCL_CONTROL_RENDER.clear()
TCL_CONSUMPTION_RENDER.clear()
TOTAL_CONSUMPTION_RENDER.clear()
TEMP_RENDER.clear()
def close(self):
"""
Nothing to be done here, but has to be defined
"""
return
def seedy(self, s):
"""
Set the random seed for consistent experiments
"""
random.seed(s)
np.random.seed(s)
if __name__ == '__main__':
# Testing the environment
# Initialize the environment
env = MicroGridEnv()
env.seedy(1)
# Save the rewards in a list
rewards = []
# reset the environment to the initial state
state = env.reset()
# Call render to prepare the visualization
# Interact with the environment (here we choose random actions) until the terminal state is reached
while True:
# Pick an action from the action space (here we pick an index between 0 and 80)
# action = env.action_space.sample()
# action =[np.argmax(action[0:4]),np.argmax(action[4:9]),np.argmax(action[9:11]),np.argmax(action[11:])]
action=[1,2,0,0]
# Using the index we get the actual action that we will send to the environment
# print(ACTIONS[action])
print(action)
# Perform a step in the environment given the chosen action
# state, reward, terminal, _ = env.step(action)
state, reward, terminal, _ = env.step(list(action))
env.render()
print(reward)
rewards.append(reward)
if terminal:
break
print("Total Reward:", sum(rewards))
# Plot the TCL SoCs
states = np.array(rewards)
plt.plot(rewards)
plt.title("rewards")
plt.xlabel("Time")
plt.ylabel("rewards")
plt.show()
|
import requests
import time
import datetime
# seconds split into days, hours, minutes and seconds.
def split_seconds(seconds):
# Dict for storing days, hours, minutes and seconds.
split_time = {
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0
}
# Convert seconds to days, hours, minutes and seconds.
days = seconds // (24 * 3600)
split_time.update({'days': int(days)})
seconds = seconds % (24 * 3600)
hours = seconds // 3600
split_time.update({'hours': int(hours)})
seconds %= 3600
minutes = seconds // 60
split_time.update({'minutes': int(minutes)})
seconds %= 60
seconds = seconds
split_time.update({'seconds': int(seconds)})
# Return dict with values.
return split_time
# Convert split_time dict to string (String xD xH xM xS)
def complile_string(time_dict, start_string, end_with):
if time_dict['days'] != 0:
start_string += str(time_dict['days']) + 'd '
if time_dict['hours'] != 0:
start_string += str(time_dict['hours']) + 'h '
if time_dict['minutes'] != 0:
start_string += str(time_dict['minutes']) + 'm '
if time_dict['seconds'] != 0:
start_string += str(time_dict['seconds']) + 's'
# Add to end of string.
start_string += end_with
# Return compiled string.
return start_string
# Get rpi wan ip
def rpi_wanip():
ip = requests.get('https://api.ipify.org').text
return ip
# start_string = '{0.user} has been running for '.format(client)
# function that returns bot uptime.
def bot_uptime(start_time, start_string):
# Calculate time bot has been running.
time_since_start_seconds = time.time() - start_time
# Convert seconds to readable format.
uptime = complile_string(split_seconds(time_since_start_seconds), start_string, '.')
# Return string.
return uptime
# Check if time is between specified times.
def next_reboot_date(reboot_time):
# get current date.
current_date = datetime.datetime.now()
# next days date.
nextday_date = current_date + datetime.timedelta(days=1)
# Save hour and minute in integer variables for easier use.
# Hour
hour = int(time.strftime('%H', current_date.timetuple()))
# Minute
minute = int(time.strftime('%M', current_date.timetuple()))
# Check if current time is between 00:00 and 6:30.
if datetime.time(0, 0, 0) < datetime.time(hour, minute, 0) < reboot_time:
# Save reboot date in var.
reboot_date = current_date.replace(hour=reboot_time.hour, minute=reboot_time.minute, second=0, microsecond=0)
# Check if current time is between 6:30 and 23:59:59.
if reboot_time < datetime.time(hour, minute, 0) < datetime.time(23, 59, 59):
# Save reboot date in var.
reboot_date = nextday_date.replace(hour=reboot_time.hour, minute=reboot_time.minute, second=0, microsecond=0)
# get seconds until reboot.
until_reboot = reboot_date - current_date
# Return time until reboot in seconds
return until_reboot.seconds
# start_string = '{0.user} has been running for '.format(client)
# Check how long it is until the next reboot (6:30)
def bot_reboot(reboot_time, start_string):
# Get seconds until reboot, convert into more readable format (string)
reboot_time = complile_string(split_seconds(next_reboot_date(reboot_time)), start_string, '.')
# Return string.
return reboot_time
|
from django.core.management.base import BaseCommand, CommandError
from directory.models import CipherSuite, Rfc
from os import linesep
from requests import get
import re
class FailedDownloadException(Exception):
pass
class Command(BaseCommand):
help = 'Scrapes TLS cipher suites from iana.org'
# definition of generic filters for TLS ciphers
# only fieldnames that contain (re.search) a
# given regex will be added to the database
# format: (fieldname, regex)
def __init__(self):
self.positive_filters = [
('name', 'Unassigned'),
('name', 'Reserved'),
('name', 'EMPTY'),
('name', 'FALLBACK'),
]
self.negative_filters = [
('name', 'TLS'),
]
# inherit everything else from BaseCommand
super().__init__()
def get_csv(self, url='https://www.iana.org/assignments/tls-parameters/tls-parameters-4.csv'):
"""Tries to download the content at the specified URL,
returning the response in plain text format. If status code
equals anything else than 200, FailedDownloadException is thrown"""
response = get(url)
if response.status_code == 200:
return response.text
else:
raise FailedDownloadException()
def split_line(self, line):
result = dict()
info = line.split(',')
result['hex1'] = re.search(r'0x[0-9A-F]{2}', info[0]).group(0)
result['hex2'] = re.search(r'0x[0-9A-F]{2}', info[1]).group(0)
result['name'] = info[2]
# info[3] = DTLS-OK
# info[4] = Recommended
result['rfcs'] = re.search(r'\[(RFC\d+)\]', info[5]).groups()
return result
def handle(self, *args, **options):
"""Main function to be run when command is executed."""
verbosity = int(options['verbosity'])
# try downloading csv file
try:
csv_file = self.get_csv()
except:
raise CommandError("Failed to download resource from the given URL.")
# counter for successfully inserted or found ciphers
cs_new = cs_old = rfc_new = 0
for line in csv_file.split(linesep):
# try splitting line its separate components or skip it
try:
d = self.split_line(line)
except:
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to split line. Skipping.")
)
continue
# if any filters don't match, skip current cipher suite
if not all(re.search(f[1], d[f[0]], re.IGNORECASE) for f in self.negative_filters):
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to parse line. Skipping.")
)
continue
# if any filters do match, skip current cipher suite
if any(re.search(f[1], d[f[0]]) for f in self.positive_filters):
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to parse line. Skipping.")
)
continue
# create model instances in DB
c, cstat = CipherSuite.objects.get_or_create(
name = d['name'],
hex_byte_1 = d['hex1'],
hex_byte_2 = d['hex2'],
)
for rfc in d['rfcs']:
regular_rfc = re.match(r'RFC(\d+)', rfc)
draft_rfc = re.match(r'RFC-ietf-tls-rfc(\d+).+', rfc)
if regular_rfc is not None:
rfc_nr = regular_rfc.group(1)
draft_status = False
elif draft_rfc is not None:
rfc_nr = draft_rfc.group(1)
draft_status = True
r, rstat = Rfc.objects.get_or_create(
number = rfc_nr,
is_draft = draft_status
)
c.defining_rfcs.add(r)
if rstat:
rfc_new += 1
if verbosity > 2:
self.stdout.write(
self.style.SUCCESS(
f"Successfully created RFC '{r.number}'."
)
)
if cstat:
cs_new += 1
if verbosity > 2:
self.stdout.write(
self.style.SUCCESS(
f"Successfully created Ciphersuite '{c.name}'."
)
)
else:
cs_old += 1
self.stdout.write(
self.style.SUCCESS(
f"Successfully created {cs_new} ({cs_old}) cipher suites and {rfc_new} RFCs."
)
)
|
<filename>python/jupyter_notebooks/pwcal_hsa.py
import numpy as np
import pandas as pd
import pyarrow
from multiprocessing import Pool
from collections import Counter
from itertools import repeat
import scipy.stats as st
def __get_ptws(ids, pathways_db, go_terms_db):
'''Get list of patways/terms based on gene IDs'''
pathways = list(pathways_db[pathways_db['gene_id'].isin(ids)]['pathway'])
go_terms = list(go_terms_db[go_terms_db['gene_id'].isin(ids)]['GO_term'])
return pathways, go_terms
def __pcal(n, tf_id, tf_name, cases_db, pathways_db, go_terms_db, dist, exclude_deg_tf):
''' Find an overlap between KEGG pathways/GO terms related to differentially
expressed genes in each case and each TF in the library and calculate corresponding
scores based on this overlap and the relative pathway/term importance'''
full_data_df = pd.DataFrame(columns=['case_id', 'case', 'TF', 'TF_ids', 'p_share',
'all_p', 'p_score', 'go_share', 'all_go',
'go_score'])
indx = 0
print(f'Processing TF {n+1}: {tf_name} ID: {tf_id}')
# Get unique pathways and terms related to the TF and their counts
tf_ptws, tf_terms = __get_ptws([tf_id], pathways_db, go_terms_db)
tf_pw_count = Counter(tf_ptws)
tf_term_count = Counter(tf_terms)
all_p = len(tf_pw_count)
all_go = len(tf_term_count)
total_ptws_count = Counter(pathways_db['pathway'].values)
total_term_count = Counter(go_terms_db['GO_term'].values)
total_pw_df = pd.DataFrame.from_dict(total_ptws_count, orient='index',columns=['count'])
total_go_df = pd.DataFrame.from_dict(total_term_count, orient='index',columns=['count'])
max_pw_count = total_pw_df['count'].max()
max_go_count = total_go_df['count'].max()
for c in cases_db['case'].index:
# Get unique pathways and terms related to differentially expressed genes in each
# case and their counts. If TF from the library is among the genes, its pathways
# and terms can be excluded to avoid data leakage (for the testing purposes).
if exclude_deg_tf:
case_ids = [g for g in cases_db.at[c,'degs_entrez'] if g != tf_id]
else:
case_ids = cases_db.at[c,'degs_entrez']
case_ptws, case_terms = __get_ptws(case_ids, pathways_db, go_terms_db)
if (len(case_ptws)==0 or len(case_terms)==0): continue
case_ptw_count = Counter(case_ptws)
case_term_count = Counter(case_terms)
# Calculate relative pathway importance for this case
# The score is calculated based on beta distribution
pw_df = pd.DataFrame.from_dict(case_ptw_count, orient='index',columns=['count'])
pw_df['importance'] = pw_df['count'].apply(lambda x: x/max_pw_count)
pw_df['importance'] = dist.pdf(pw_df['importance'])
# Calculate relative GO term importance for this case
go_df = pd.DataFrame.from_dict(case_term_count, orient='index',columns=['count'])
go_df['importance'] = go_df['count'].apply(lambda x: x/max_go_count)
go_df['importance'] = dist.pdf(go_df['importance'])
# Calculate relative pathway/term overlap and scores based on 'importance'
common_p = 0
common_go = 0
p_score = 0.0
go_score = 0.0
coef = 100
if all_p > 0:
for i in tf_pw_count:
if i in pw_df.index:
common_p += 1
p_score += coef * pw_df.at[i, 'importance'] * pw_df.at[i, 'count'] / len(case_ids)
if all_go > 0:
for t in tf_term_count:
if t in go_df.index:
common_go +=1
go_score += coef * go_df.at[t, 'importance'] * go_df.at[t, 'count'] / len(case_ids)
p_share = common_p/all_p if all_p > 0 else 0.0
go_share = common_go/all_go if all_go > 0 else 0.0
# Add to DataFrame
full_data_df.loc[indx] = [c, cases_db.at[c,'case'], tf_name, tf_id, p_share, all_p,
p_score, go_share, all_go, go_score]
indx +=1
return full_data_df
def pwcal (cases_db, exclude_deg_tf = False, n_jobs = 2):
'''
Calculate pathway and GO term similarity between TFs and DEGs
Parameters:
----------
cases_db : DataFrame object
Preprocessed table containing lists of differentially expressed genes
exclude_deg_tf : bool, default False
If True, KEGG pathways and GO terms related to TFs, which appear in the list of DEGs
differentially expressed genes, are not included.
n_jobs : int, default 2
Number of parallel processes
Returns : DataFrame
Returns DataFrame with corresponding pathway/term scores
'''
# Table of TF to TF ID associations
tf_names_db = pd.read_parquet('data/tf_names_hsa.parquet')
# Table of gene ID to pathway associations
pathways_db = pd.read_parquet('data/pathways_hsa_full_hg38.parquet')
pathways_db.drop_duplicates(['gene_id','pathway'],keep='first', inplace=True)
# Table of gene ID to GO terms (from 'processes' cathegory) associations
go_terms_db = pd.read_parquet('data/go_terms_hsa_processes.parquet')
go_terms_db.drop_duplicates(['gene_id','GO_term'],keep='first', inplace=True)
# Arbitrary distribution for calculating 'pathway importance'
dist = st.beta(2,2)
# Create a process pool
pool = Pool(processes=n_jobs)
print(" {} processes started...".format(n_jobs))
results = pool.starmap(__pcal, zip(
range(len(tf_names_db.index)),
list(tf_names_db.index),
list(tf_names_db['gene'].values),
repeat(cases_db),
repeat(pathways_db),
repeat(go_terms_db),
repeat(dist),
repeat(exclude_deg_tf)
))
pool.close()
pool.join()
results = pd.concat(results)
results.reset_index(drop=True, inplace=True)
print('Finished')
return results
|
<gh_stars>1-10
#/*
#Python and Tkinter Programming
#<NAME>
#ISBN: 1884777813
#Publisher: Manning
#*/
from tkinter import *
SQUARE = 1
ROUND = 2
ARROW = 3
POINT_DOWN = 0
POINT_UP = 1
POINT_RIGHT = 2
POINT_LEFT = 3
STATUS_OFF = 1
STATUS_ON = 2
STATUS_WARN = 3
STATUS_ALARM = 4
STATUS_SET = 5
class DummyClass:
pass
Color = DummyClass()
Color.PANEL = '#545454'
Color.OFF = '#656565'
Color.ON = '#00FF33'
Color.WARN = '#ffcc00'
Color.ALARM = '#ff4422'
class LED:
def __init__(self, master=None, width=25, height=25,
appearance=FLAT,
status=STATUS_ON, bd=1,
bg=None,
shape=SQUARE, outline="",
blink=0, blinkrate=1,
orient=POINT_UP,
takefocus=0):
# preserve attributes
self.master = master
self.shape = shape
self.onColor = Color.ON
self.offColor = Color.OFF
self.alarmColor = Color.ALARM
self.warningColor = Color.WARN
self.specialColor = '#00ffdd'
self.status = status
self.blink = blink
self.blinkrate = int(blinkrate)
self.on = 0
self.onState = None
if not bg:
bg = Color.PANEL
## Base frame to contain light
self.frame=Frame(master, relief=appearance, bg=bg, bd=bd,
takefocus=takefocus)
basesize = width
d = center = int(basesize/2)
if self.shape == SQUARE:
self.canvas=Canvas(self.frame, height=height, width=width,
bg=bg, bd=0, highlightthickness=0)
self.light=self.canvas.create_rectangle(0, 0, width, height,
fill=Color.ON)
elif self.shape == ROUND:
r = int((basesize-2)/2)
self.canvas=Canvas(self.frame, width=width, height=width,
highlightthickness=0, bg=bg, bd=0)
if bd > 0:
self.border=self.canvas.create_oval(center-r, center-r,
center+r, center+r)
r = r - bd
self.light=self.canvas.create_oval(center-r-1, center-r-1,
center+r, center+r, fill=Color.ON,
outline=outline)
else: # Default is an ARROW
self.canvas=Canvas(self.frame, width=width, height=width,
highlightthickness=0, bg=bg, bd=0)
x = d
y = d
if orient == POINT_DOWN:
self.light=self.canvas.create_polygon(x-d,y-d, x,y+d,
x+d,y-d, x-d,y-d, outline=outline)
elif orient == POINT_UP:
self.light=self.canvas.create_polygon(x,y-d, x-d,y+d,
x+d,y+d, x,y-d, outline=outline)
elif orient == POINT_RIGHT:
self.light=self.canvas.create_polygon(x-d,y-d, x+d,y,
x-d,y+d, x-d,y-d, outline=outline)
elif orient == POINT_LEFT:
self.light=self.canvas.create_polygon(x-d,y, x+d,y+d,
x+d,y-d, x-d,y, outline=outline)
self.canvas.pack(side=TOP, fill=X, expand=NO)
self.update()
def turnon(self):
self.status = STATUS_ON
if not self.blink: self.update()
def turnoff(self):
self.status = STATUS_OFF
if not self.blink: self.update()
def alarm(self):
self.status = STATUS_ALARM
if not self.blink: self.update()
def warn(self):
self.status = STATUS_WARN
if not self.blink: self.update()
def set(self, color):
self.status = STATUS_SET
self.specialColor = color
self.update()
def blinkon(self):
if not self.blink:
self.blink = 1
self.onState = self.status
self.update()
def blinkoff(self):
if self.blink:
self.blink = 0
self.status = self.onState
self.onState = None
self.on = 0
self.update()
def blinkstate(self, blinkstate):
if blinkstate:
self.blinkon()
else:
self.blinkoff()
def update(self):
# First do the blink, if set to blink
if self.blink:
if self.on:
if not self.onState:
self.onState = self.status
self.status = STATUS_OFF
self.on = 0
else:
if self.onState:
self.status = self.onState # Current ON color
self.on = 1
if self.status == STATUS_ON:
self.canvas.itemconfig(self.light, fill=self.onColor)
elif self.status == STATUS_OFF:
self.canvas.itemconfig(self.light, fill=self.offColor)
elif self.status == STATUS_WARN:
self.canvas.itemconfig(self.light, fill=self.warningColor)
elif self.status == STATUS_SET:
self.canvas.itemconfig(self.light, fill=self.specialColor)
else:
self.canvas.itemconfig(self.light, fill=self.alarmColor)
self.canvas.update_idletasks()
if self.blink:
self.frame.after(self.blinkrate * 1000, self.update)
if __name__ == '__main__':
class TestLEDs(Frame):
def __init__(self, parent=None):
# List of Colors and Blink On/Off
states = [(STATUS_OFF, 0),
(STATUS_ON, 0),
(STATUS_WARN, 0),
(STATUS_ALARM, 0),
(STATUS_SET, 0),
(STATUS_ON, 1),
(STATUS_WARN, 1),
(STATUS_ALARM, 1),
(STATUS_SET, 1)]
# List of LED types to display,
# with sizes and other attributes
leds = [(ROUND, 25, 25, FLAT, 0, None, ""),
(ROUND, 15, 15, RAISED, 1, None, ""),
(SQUARE, 20, 20, SUNKEN, 1, None, ""),
(SQUARE, 8, 8, FLAT, 0, None, ""),
(SQUARE, 8, 8, RAISED, 1, None, ""),
(SQUARE, 16, 8, FLAT, 1, None, ""),
(ARROW, 14, 14, RIDGE, 1, POINT_UP, ""),
(ARROW, 14, 14, RIDGE, 0, POINT_RIGHT, ""),
(ARROW, 14, 14, FLAT, 0, POINT_DOWN, "white")]
Frame.__init__(self) # Do superclass init
self.pack()
self.master.title('LED Example - Stage 1')
# Iterate for each type of led
for shape, w, h, app, bd, orient, outline in leds:
frame = Frame(self, bg=Color.PANEL)
frame.pack(anchor=N, expand=YES, fill=X)
# Iterate for selected states
for state, blink in states:
LED(frame, shape=shape, status=state,
width=w, height=h, appearance=app,
orient=orient, blink=blink, bd=bd,
outline=outline).frame.pack(side=LEFT,
expand=YES, padx=1, pady=1)
TestLEDs().mainloop()
|
import json
import uuid
from django.urls import reverse
from django.test import TestCase, override_settings
from django.utils.crypto import get_random_string
from zentral.contrib.inventory.models import EnrollmentSecret, MachineSnapshot, MetaBusinessUnit, Tag, MachineTag
from zentral.contrib.munki.models import Configuration, EnrolledMachine, Enrollment
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class MunkiAPIViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.meta_business_unit = MetaBusinessUnit.objects.create(name=get_random_string(64))
cls.business_unit = cls.meta_business_unit.create_enrollment_business_unit()
cls.configuration = Configuration.objects.create(name=get_random_string())
cls.enrollment_secret = EnrollmentSecret.objects.create(meta_business_unit=cls.meta_business_unit)
cls.enrollment = Enrollment.objects.create(configuration=cls.configuration, secret=cls.enrollment_secret)
# utility methods
def _make_enrolled_machine(self, tag_name=None):
em = EnrolledMachine.objects.create(enrollment=self.enrollment,
serial_number=get_random_string(32),
token=get_random_string(64))
if tag_name:
tag = Tag.objects.create(name=tag_name)
MachineTag.objects.create(serial_number=em.serial_number, tag=tag)
return em
def _post_as_json(self, url, data, **extra):
return self.client.post(url,
json.dumps(data),
content_type="application/json",
**extra)
# enroll
def test_enroll_bad_request_empty(self):
response = self._post_as_json(reverse("munki:enroll"), {})
self.assertEqual(response.status_code, 400)
def test_enroll_bad_request_bad_secret(self):
serial_number = get_random_string(32)
response = self._post_as_json(reverse("munki:enroll"),
{"secret": "yolo",
"uuid": str(uuid.uuid4()),
"serial_number": serial_number})
self.assertEqual(response.status_code, 400)
def test_enroll_ok(self):
serial_number = get_random_string(32)
response = self._post_as_json(reverse("munki:enroll"),
{"secret": self.enrollment.secret.secret,
"uuid": str(uuid.uuid4()),
"serial_number": serial_number})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], "application/json")
json_response = response.json()
self.assertCountEqual(["token"], json_response.keys())
token = json_response["token"]
enrolled_machine = EnrolledMachine.objects.get(enrollment=self.enrollment, serial_number=serial_number)
self.assertEqual(token, enrolled_machine.token)
# job details
def test_job_details_missing_auth_header_err(self):
response = self._post_as_json(reverse("munki:job_details"), {})
self.assertContains(response, "Missing or empty Authorization header", status_code=403)
def test_job_details_wrong_auth_token_err(self):
response = self._post_as_json(reverse("munki:job_details"), {},
HTTP_AUTHORIZATION=get_random_string(23))
self.assertContains(response, "Wrong authorization token", status_code=403)
def test_job_details_enrolled_machine_does_not_exist_err(self):
response = self._post_as_json(reverse("munki:job_details"), {},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(get_random_string(34)))
self.assertContains(response, "Enrolled machine does not exist", status_code=403)
def test_job_details_missing_serial_number_err(self):
enrolled_machine = self._make_enrolled_machine()
response = self._post_as_json(reverse("munki:job_details"), {},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertContains(response,
f"No reported machine serial number. Request SN {enrolled_machine.serial_number}.",
status_code=403)
def test_job_details_machine_conflict_err(self):
enrolled_machine = self._make_enrolled_machine()
data_sn = get_random_string(9)
response = self._post_as_json(reverse("munki:job_details"),
{"machine_serial_number": data_sn},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertContains(response,
(f"Zentral postflight reported SN {data_sn} "
f"different from enrollment SN {enrolled_machine.serial_number}"),
status_code=403)
def test_job_details(self):
enrolled_machine = self._make_enrolled_machine()
response = self._post_as_json(reverse("munki:job_details"),
{"machine_serial_number": enrolled_machine.serial_number},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertEqual(response.status_code, 200)
expected_response = {
"apps_full_info_shard": self.configuration.inventory_apps_full_info_shard,
"tags": []
}
self.assertEqual(expected_response, response.json())
def test_job_details_conflict(self):
enrolled_machine = self._make_enrolled_machine()
response = self._post_as_json(reverse("munki:job_details"),
{"machine_serial_number": get_random_string(3)},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertContains(response, "different from enrollment SN", status_code=403)
# post job
def test_post_job(self):
tag_name = get_random_string()
enrolled_machine = self._make_enrolled_machine(tag_name=tag_name)
computer_name = get_random_string(45)
report_sha1sum = 40 * "0"
response = self._post_as_json(reverse("munki:post_job"),
{"machine_snapshot": {"serial_number": enrolled_machine.serial_number,
"system_info": {"computer_name": computer_name}},
"reports": [{"start_time": "2018-01-01 00:00:00 +0000",
"end_time": "2018-01-01 00:01:00 +0000",
"basename": "report2018",
"run_type": "auto",
"sha1sum": report_sha1sum,
"events": []}]},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertEqual(response.status_code, 200)
response = self._post_as_json(reverse("munki:job_details"),
{"machine_serial_number": enrolled_machine.serial_number},
HTTP_AUTHORIZATION="MunkiEnrolledMachine {}".format(enrolled_machine.token))
self.assertEqual(response.status_code, 200)
response_json = response.json()
expected_response = {
"apps_full_info_shard": self.configuration.inventory_apps_full_info_shard,
"tags": [tag_name]
}
expected_response["last_seen_sha1sum"] = report_sha1sum
self.assertEqual(expected_response, response_json)
ms = MachineSnapshot.objects.current().get(serial_number=enrolled_machine.serial_number)
ms2 = MachineSnapshot.objects.current().get(reference=enrolled_machine.serial_number)
self.assertEqual(ms, ms2)
self.assertEqual(ms.system_info.computer_name, computer_name)
|
<filename>webca/webca/crypto/test_extensions.py
"""
Test the extensions functions.
"""
from cryptography import x509
from django.test import TestCase
from OpenSSL import crypto
from . import certs, extensions
class KeyUsage(TestCase):
"""Test the extensions functions."""
def test_empty(self):
"""KeyUsage"""
ext = extensions.KeyUsage()
self.assertEqual(ext.value(), '')
def test_one(self):
"""KeyUsage"""
ext = extensions.KeyUsage().digitalSignature()
self.assertEqual(ext.value(), 'digitalSignature')
self.assertEqual(ext.values(), ['digitalSignature'])
def test_two(self):
"""KeyUsage"""
ext = extensions.KeyUsage().digitalSignature().nonRepudiation()
self.assertEqual(ext.value(), 'digitalSignature,nonRepudiation')
self.assertEqual(ext.values(), ['digitalSignature', 'nonRepudiation'])
def test_list(self):
"""KeyUsage"""
ext = extensions.KeyUsage().from_list('digitalSignature')
self.assertEqual(ext.value(), 'digitalSignature')
self.assertEqual(ext.values(), ['digitalSignature'])
def test_list_two(self):
"""KeyUsage"""
ext = extensions.KeyUsage().from_list('digitalSignature,nonRepudiation')
self.assertEqual(ext.value(), 'digitalSignature,nonRepudiation')
self.assertEqual(ext.values(), ['digitalSignature', 'nonRepudiation'])
def test_from_x509(self):
"""KeyUsage.from_extension"""
x509ext = x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=True,
data_encipherment=True,
key_agreement=True,
encipher_only=True,
decipher_only=True,
key_cert_sign=True,
crl_sign=True,
)
ext = extensions.KeyUsage.from_extension(x509ext)
self.assertTrue('digitalSignature' in ext.value())
self.assertEqual(len(ext.values()), 9)
def test_from_x509_invalid(self):
"""KeyUsage.from_extension"""
x509ext = x509.CRLNumber(1)
self.assertRaises(ValueError, extensions.KeyUsage.from_extension, x509ext)
class ExtendedKeyUsage(TestCase):
"""Extended key usage"""
def test_from_x509(self):
"""ExtendedKeyUsage.from_extension"""
x509ext = x509.ExtendedKeyUsage([
x509.oid.ExtendedKeyUsageOID.OCSP_SIGNING,
])
ext = extensions.ExtendedKeyUsage.from_extension(x509ext)
self.assertTrue('OCSPSigning' in ext.value())
def test_from_x509_invalid(self):
"""ExtendedKeyUsage.from_extension"""
x509ext = x509.CRLNumber(1)
self.assertRaises(ValueError, extensions.ExtendedKeyUsage.from_extension, x509ext)
class Utils(TestCase):
"""General utils"""
def test_get_certificate_extension(self):
"""get_certificate_extension"""
_, cert = certs.create_ca_certificate([('CN', 'test')])
ext = extensions.get_certificate_extension(cert, b'basicConstraints')
self.assertIsNotNone(ext)
def test_get_certificate_extension_empty(self):
"""get_certificate_extension"""
_, cert = certs.create_ca_certificate([('CN', 'test')])
ext = extensions.get_certificate_extension(cert, b'extendedKeyUsage')
self.assertIsNone(ext)
def test_get_extension(self):
"""get_certificate_extension"""
_, cert = certs.create_ca_certificate([('CN', 'test')])
ext = extensions.get_extension(cert, 'basicConstraints')
self.assertIsNotNone(ext)
def test_get_extension_empty(self):
"""get_certificate_extension"""
_, cert = certs.create_ca_certificate([('CN', 'test')])
ext = extensions.get_extension(cert, b'extendedKeyUsage')
self.assertIsNone(ext)
def test_json(self):
"""json_to_extension"""
json = '{"name":"basicConstraints", "critical":true,"value":"CA:TRUE"}'
self.assertIsInstance(extensions.json_to_extension(json), crypto.X509Extension)
def test_san(self):
"""build_san"""
names = 'DNS:test'
san = extensions.build_san(names, False)
self.assertIsInstance(san, crypto.X509Extension)
self.assertEqual(san.get_short_name(), b'subjectAltName')
def test_san_critical(self):
"""build_san"""
names = 'DNS:test'
san = extensions.build_san(names, False)
self.assertFalse(san.get_critical())
san = extensions.build_san(names, True)
self.assertTrue(san.get_critical())
def test_cdp(self):
"""build_cdp"""
names = 'URI:test'
cdp = extensions.build_cdp(names, False)
self.assertIsInstance(cdp, crypto.X509Extension)
self.assertEqual(cdp.get_short_name(), b'crlDistributionPoints')
def test_cdp_critical(self):
"""build_cdp"""
names = 'URI:test'
cdp = extensions.build_cdp(names, False)
self.assertFalse(cdp.get_critical())
cdp = extensions.build_cdp(names, True)
self.assertTrue(cdp.get_critical())
|
#!/usr/bin/env python
import sys
import numpy as np
import time
from optparse import OptionParser
import logging
def normalize(A):
column_sums = A.sum(axis=0)
new_matrix = A / column_sums[np.newaxis, :]
return new_matrix
def inflate(A, inflate_factor):
return normalize(np.power(A, inflate_factor))
def expand(A, expand_factor):
return np.linalg.matrix_power(A, expand_factor)
def add_diag(A, mult_factor):
return A + mult_factor * np.identity(A.shape[0])
def get_clusters(A):
clusters = []
for i, r in enumerate((A>0).tolist()):
if r[i]:
clusters.append(A[i,:]>0)
clust_map ={}
for cn , c in enumerate(clusters):
for x in [ i for i, x in enumerate(c) if x ]:
clust_map[cn] = clust_map.get(cn, []) + [x]
return clust_map
def draw(G, A, cluster_map):
import networkx as nx
import matplotlib.pyplot as plt
clust_map = {}
for k, vals in cluster_map.items():
for v in vals:
clust_map[v] = k
colors = []
for i in range(len(G.nodes())):
colors.append(clust_map.get(i, 100))
pos = nx.spring_layout(G)
from matplotlib.pylab import matshow, show, cm
plt.figure(2)
nx.draw_networkx_nodes(G, pos,node_size = 200, node_color =colors , cmap=plt.cm.Blues )
nx.draw_networkx_edges(G,pos, alpha=0.5)
matshow(A, fignum=1, cmap=cm.gray)
plt.show()
show()
def stop(M, i):
if i%5==4:
m = np.max( M**2 - M) - np.min( M**2 - M)
if m==0:
logging.info("Stop at iteration %s" % i)
return True
return False
def mcl(M, expand_factor = 2, inflate_factor = 2, max_loop = 10 , mult_factor = 1):
M = add_diag(M, mult_factor)
M = normalize(M)
for i in range(max_loop):
logging.info("loop %s" % i)
M = inflate(M, inflate_factor)
M = expand(M, expand_factor)
if stop(M, i): break
clusters = get_clusters(M)
return M, clusters
def networkx_mcl(G, expand_factor = 2, inflate_factor = 2, max_loop = 10 , mult_factor = 1):
import networkx as nx
A = nx.adjacency_matrix(G)
return mcl(np.array(A.todense()), expand_factor, inflate_factor, max_loop, mult_factor)
def print_info(options):
print("-" * 60)
print("MARKOV CLUSTERING:")
print("-" * 60)
print(" expand_factor: %s" % options.expand_factor)
print(" inflate_factor: %s" % options.inflate_factor)
print(" mult factor: %s" % options.mult_factor)
print(" max loops: %s\n" % options.max_loop)
def get_options():
usage = "usage: %prog [options] <input_matrix>"
parser = OptionParser(usage)
parser.add_option("-e", "--expand_factor",
dest="expand_factor",
default=2,
type=int,
help="expand factor (default: %default)")
parser.add_option("-i", "--inflate_factor",
dest="inflate_factor",
default=2,
type=float,
help="inflate factor (default: %default)")
parser.add_option("-m", "--mult_factor",
dest="mult_factor",
default=2,
type=float,
help="multiply factor (default: %default)")
parser.add_option("-l", "--max_loops",
dest="max_loop",
default=60,
type=int,
help="max loops (default: %default)")
parser.add_option("-o", "--output", metavar="FILE",
help="output (default: stdout)")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=True,
help="verbose (default: %default)")
parser.add_option("-d", "--draw-graph",
action="store_true", dest="draw", default=False,
help="show graph with networkx (default: %default)")
(options, args) = parser.parse_args()
try:
filename = args[0]
except:
raise Exception('input', 'missing input filename')
return options, filename
def get_graph(csv_filename):
import networkx as nx
M = []
for r in open(csv_filename):
r = r.strip().split(",")
M.append(list(map(lambda x: float(x.strip()), r)))
G = nx.from_numpy_matrix(np.matrix(M))
return np.array(M), G
def clusters_to_output(clusters, options):
if options.output and len(options.output)>0:
f = open(options.output, 'w')
for k, v in clusters.items():
f.write("%s|%s\n" % (k, ", ".join(map(str, v)) ))
f.close()
else:
print("Clusters:")
for k, v in clusters.items():
print('{}, {}'.format(k, v))
if __name__ == '__main__':
options, filename = get_options()
print_info(options)
M, G = get_graph(filename)
print(" number of nodes: %s\n" % M.shape[0])
print("{}: {}".format(time.time(), "evaluating clusters..."))
M, clusters = networkx_mcl(G, expand_factor = options.expand_factor,
inflate_factor = options.inflate_factor,
max_loop = options.max_loop,
mult_factor = options.mult_factor)
print("{}: {}".format(time.time(), "done\n"))
clusters_to_output(clusters, options)
if options.draw:
print("{}: {}".format(time.time(), "drawing..."))
draw(G, M, clusters)
print("{}: {}".format(time.time(), "done"))
|
"""
code adapted from:
https://github.com/upura/featureTweakPy
"""
import copy
from typing import Dict, List, Union
import numpy as np
import pandas as pd
import sklearn
import xgboost
import xgboost.core
from carla.recourse_methods.api import RecourseMethod
from carla.recourse_methods.catalog.focus.parse_xgboost import parse_booster
from carla.recourse_methods.catalog.focus.tree_model import ForestModel, XGBoostModel
from carla.recourse_methods.processing import check_counterfactuals
def _L1_cost_func(a, b):
""" The 1-norm ||a-b||_1 """
return np.linalg.norm(a - b, ord=1)
def _L2_cost_func(a, b):
""" The 2-norm ||a-b||_2 """
return np.linalg.norm(a - b, ord=2)
def search_path(tree, class_labels):
"""
return path index list containing [{leaf node id, inequality symbol, threshold, feature index}].
Parameters
----------
tree: sklearn.tree.DecisionTreeClassifier or xgboost.core.Booster
The classification tree.
class_labels:
All the possible class labels.
Returns
-------
path_info
"""
def parse_tree(tree):
"""
Parameters
----------
tree: sklearn.tree.DecisionTreeClassifier or xgboost.core.Booster
The classification tree we want to parse.
Returns
-------
children_left: array of int
children_left[i] holds the node id of the left child of node i.
For leaves, children_left[i] == TREE_LEAF.
children_right: array of int
children_right[i] holds the node id of the right child of node i.
For leaves, children_right[i] == TREE_LEAF.
threshold: array of double
threshold[i] holds the threshold for the internal node i.
feature: array of int
feature[i] holds the feature to split on, for the internal node i.
leaf_nodes: array of int
leaf nodes with outcome aim label
"""
if isinstance(tree, sklearn.tree.DecisionTreeClassifier):
children_left = tree.tree_.children_left
children_right = tree.tree_.children_right
feature = tree.tree_.feature
threshold = tree.tree_.threshold
values = tree.tree_.value
# leaf nodes ID
leaf_nodes = np.where(children_left == -1)[0]
# outcomes of leaf nodes
leaf_values = values[leaf_nodes].reshape(len(leaf_nodes), len(class_labels))
leaf_classes = np.argmax(leaf_values, axis=-1)
"""
We want to find the leaf_nodes where the class is equal to the counterfactual label 1.
In the original code the line was as follows:
leaf_nodes = np.where(leaf_values[:, cf_label] != 0)[0]
However this seems wrong as we want to index the leaf_nodes with the above expression.
This also caused that sometimes 0 would be in the leaf_nodes, but as 0 is the root node this
should not happen.
"""
# select the leaf nodes whose outcome is aim_label
leaf_nodes = leaf_nodes[np.where(leaf_classes != 0)[0]]
return children_left, children_right, feature, threshold, leaf_nodes
elif isinstance(tree, xgboost.core.Booster):
children_left, children_right, threshold, feature, scores = parse_booster(
tree
)
# leaf nodes ID
leaf_nodes = np.where(children_left == -1)[0]
# TODO threshold of 0.5 because of logistic function, doesn't work for other xgboost objective functions
# outcome of leaf nodes
leaf_classes = scores[leaf_nodes] > 0.5
leaf_nodes = leaf_nodes[np.where(leaf_classes != 0)[0]]
return children_left, children_right, feature, threshold, leaf_nodes
else:
raise ValueError("tree is not of a supported Class")
""" select leaf nodes whose outcome is aim_label """
children_left, children_right, feature, threshold, leaf_nodes = parse_tree(tree)
""" search the path to the selected leaf node """
paths = {}
for leaf_node in leaf_nodes:
""" correspond leaf node to left and right parents """
child_node = leaf_node
parent_node = -100 # initialize
parents_left = []
parents_right = []
while parent_node != 0:
if np.where(children_left == child_node)[0].shape == (0,):
parent_left = -1
parent_right = np.where(children_right == child_node)[0][0]
parent_node = parent_right
elif np.where(children_right == child_node)[0].shape == (0,):
parent_right = -1
parent_left = np.where(children_left == child_node)[0][0]
parent_node = parent_left
parents_left.append(parent_left)
parents_right.append(parent_right)
""" for next step """
child_node = parent_node
# nodes dictionary containing left parents and right parents
paths[leaf_node] = (parents_left, parents_right)
path_info = get_path_info(paths, threshold, feature)
return path_info
def get_path_info(paths, threshold, feature):
"""
Extract the path info from the parameters
Parameters
----------
paths:
Paths trough the tree from root to leaves.
threshold: array of double
threshold[i] holds the threshold for the internal node i.
feature: array of int
feature[i] holds the feature to split on, for the internal node i.
Returns
-------
dictionary where dict[i] contains node_id, inequality_symbol, threshold, and feature
"""
path_info = {}
for i in paths:
node_ids = [] # node ids used in the current node
inequality_symbols = [] # inequality symbols used in the current node
thresholds = [] # thresholds used in the current node
features = [] # features used in the current node
parents_left, parents_right = paths[i]
for idx in range(len(parents_left)):
def do_appends(node_id):
""" helper function to reduce duplicate code"""
node_ids.append(node_id)
thresholds.append(threshold[node_id])
features.append(feature[node_id])
if parents_left[idx] != -1:
""" the child node is the left child of the parent """
node_id = parents_left[idx] # node id
inequality_symbols.append(0)
do_appends(node_id)
elif parents_right[idx] != -1:
""" the child node is the right child of the parent """
node_id = parents_right[idx]
inequality_symbols.append(1)
do_appends(node_id)
path_info[i] = {
"node_id": node_ids,
"inequality_symbol": inequality_symbols,
"threshold": thresholds,
"feature": features,
}
return path_info
class FeatureTweak(RecourseMethod):
"""
Implementation of FeatureTweak [1]_.
Parameters
----------
mlmodel: ForestModel or XGBoostModel
Black-Box-Model
hyperparams : dict
Dictionary containing hyperparameters. See notes below for its contents.
Methods
-------
get_counterfactuals:
Generate counterfactual examples for given factuals.
esatisfactory_instance:
Return the epsilon satisfactory instance of x.
feature_tweaking:
Generate a single counterfactual by FeatureTweaking.
Notes
-----
- Hyperparams
Hyperparameter contains important information for the recourse method to initialize.
Please make sure to pass all values as dict with the following keys.
* "eps": float
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2017, August). Interpretable predictions of tree-based
ensembles via actionable feature tweaking. In Proceedings of the 23rd ACM SIGKDD international conference on
knowledge discovery and data mining (pp. 465-474).
"""
def __init__(
self,
mlmodel: Union[ForestModel, XGBoostModel],
hyperparams: Dict,
cost_func=_L2_cost_func,
):
super().__init__(mlmodel)
self.model = mlmodel
self.data = mlmodel.data
self.eps = hyperparams["eps"]
self.target_col = self.data.target
self.cost_func = cost_func
def esatisfactory_instance(self, x: np.ndarray, path_info):
"""
return the epsilon satisfactory instance of x.
Parameters
----------
x: np.ndarray
A single factual example.
path_info:
One path from the result of search_path(tree, class_labels, cf_label)
Returns
-------
epsilon satisfactory instance
"""
esatisfactory = copy.deepcopy(x)
for i in range(len(path_info["feature"])):
feature_idx = path_info["feature"][i] # feature index
if isinstance(feature_idx, str):
feature_idx = np.where(
np.array(self.model.feature_input_order) == feature_idx
)
threshold_value = path_info["threshold"][i] # threshold in current node
inequality_symbol = path_info["inequality_symbol"][i] # inequality symbol
if inequality_symbol == 0:
esatisfactory[feature_idx] = threshold_value - self.eps
elif inequality_symbol == 1:
esatisfactory[feature_idx] = threshold_value + self.eps
else:
print("something wrong")
return esatisfactory
def feature_tweaking(self, x: np.ndarray, class_labels: List[int], cf_label: int):
"""
Perform feature tweaking on a single factual example.
Parameters
----------
x: np.ndarray
A single factual example.
class_labels: List[int]
List of possible class labels.
cf_label: int
What label the counterfactual should have.
Returns
-------
counterfactual example
"""
def predict(classifier, x):
if isinstance(
classifier,
(sklearn.tree.DecisionTreeClassifier, ForestModel, XGBoostModel),
):
# need to reshape x as it's not a batch
return classifier.predict(x.reshape(1, -1))
elif isinstance(classifier, xgboost.core.Booster):
# TODO is this threshold always correct? E.g. does it depend on num_classes?
threshold = 0.5
# need to convert Numpy array to DMatrix
return (
classifier.predict(
xgboost.DMatrix(
x.reshape(1, -1),
feature_names=self.model.feature_input_order,
)
)
> threshold
)
raise ValueError("tree is not of a supported Class")
x_out = copy.deepcopy(x) # initialize output
delta_mini = 10 ** 3 # initialize cost
for tree in self.model.tree_iterator: # loop over individual trees
estimator_prediction = predict(tree, x)
if (
predict(self.model, x) == estimator_prediction
and estimator_prediction != cf_label
):
paths_info = search_path(tree, class_labels)
for key in paths_info:
""" generate epsilon-satisfactory instance """
path_info = paths_info[key]
es_instance = self.esatisfactory_instance(x, path_info)
if (
predict(tree, es_instance) == cf_label
and self.cost_func(x, es_instance) < delta_mini
):
x_out = es_instance
delta_mini = self.cost_func(x, es_instance)
else:
continue
return x_out
def get_counterfactuals(self, factuals: pd.DataFrame):
# drop targets
instances = factuals.copy()
instances = instances.reset_index(drop=True)
# normalize and one-hot-encoding
instances = self.encode_normalize_order_factuals(instances, with_target=False)
instances = instances[self.data.continous]
# y = factuals[self.target_col]
# y = self.model.predict(instances)
class_labels = [0, 1]
counterfactuals = []
for i, row in instances.iterrows():
cf_label = 1 # flipped target label
counterfactual = self.feature_tweaking(
row.to_numpy(), class_labels, cf_label
)
counterfactuals.append(counterfactual)
counterfactuals_df = check_counterfactuals(self._mlmodel, counterfactuals)
return counterfactuals_df
|
from typing import Tuple
import numpy as np
from scipy.fftpack import dct, idct
from PIL import Image
class SpreadSpectrumWatermarking:
"""
<NAME>., <NAME>., <NAME>., & <NAME>. (1997).
Secure spread spectrum watermarking for multimedia. IEEE Transactions on Image
Processing, 6(12), 1673–1687. doi:10.1109/83.650120
"""
def __init__(self, n: int, alpha: float = 0.1):
self.n = n
self.alpha = alpha
def mark_array(self, pixels: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
# 1. Extract a sequence of values V = v1 ... vn from the image. We use the n highest
# components of the luminance DCT
# 2. Generate a watermark vector W of length n from a Gaussian with mean 0 and std 1
# 3. Replace V with V + alpha * W
img = Image.fromarray(pixels, "RGB")
height, width = img.size
# We use YCbCr as we want to smear the watermark in the parts
# of the image which human perception is very sensitive to,
# in this case we choose the luminance. Also, the algorithm
# works on a 2d single-channel image, therefore we need to choose
# a channel we apply the DCT to when working with color images.
ycbcr = np.asarray(img.convert("YCbCr"))
luminance = ycbcr[:, :, 0]
# We apply the DCT to the luminance channel and find the n largest
# components. These we alter with our watermark
dct = _dct2(luminance)
# Extract n largest components in sorted order
flattened_dct = dct.flatten(order="C")
ind = self._get_topk(flattened_dct)
# The watermark are n values from a Gaussian distribution with
# mean 0 and standard deviation 1
mu, sigma = 0, 1
watermark = np.random.normal(mu, sigma, self.n)
# Apply the watermark
flattened_dct_marked = flattened_dct.copy()
flattened_dct_marked[ind] = flattened_dct[ind] + self.alpha * watermark
# Convert it back
dct_marked = flattened_dct_marked.reshape((width, height))
luminance_marked = _idct2(dct_marked)
ycbcr_marked = ycbcr.copy()
ycbcr_marked[:, :, 0] = luminance_marked
if np.allclose(ycbcr, ycbcr_marked):
raise RuntimeError("Result image identical to original one. Change either alpha value or watermark size.")
img_rgb_marked = Image.fromarray(ycbcr_marked, "YCbCr").convert("RGB")
pixels_rgb_marked = np.asarray(img_rgb_marked)
return pixels_rgb_marked, watermark
def mark_image(self, path: str) -> Tuple[np.ndarray, np.ndarray]:
pixels = np.asarray(Image.open(path))
return self.mark_array(pixels)
def extract_from_array(self, data_original: np.ndarray, data_suspect: np.ndarray) -> np.ndarray:
""" Extract watermark from a 2D array according to spread spectrum watermarking algorithm"""
luminance_original = np.asarray(Image.fromarray(data_original, "RGB").convert("L"))
luminance_suspect = np.asarray(Image.fromarray(data_suspect, "RGB").convert("L"))
dct_original = _dct2(luminance_original)
dct_suspect = _dct2(luminance_suspect)
flattened_dct_original = dct_original.flatten(order="C")
flattened_dct_suspect = dct_suspect.flatten(order="C")
ind = self._get_topk(flattened_dct_original)
watermark_suspect = (flattened_dct_suspect[ind] - flattened_dct_original[ind]) / self.alpha
return watermark_suspect
def extract_from_image(self, path_original: str, path_suspect: str) -> np.ndarray:
""" Extract watermark from an image according to spread spectrum watermarking algorithm"""
img_original = np.asarray(Image.open(path_original))
img_suspect = np.asarray(Image.open(path_suspect))
return self.extract_from_array(img_original, img_suspect)
def similarity(self, original_watermark: np.ndarray, suspicous_watermark: np.ndarray) -> float:
""" Computes the similarities between original watermark sequence and watermark extracted from
suspicious image.
"""
x = original_watermark.dot(suspicous_watermark)
y = np.sqrt(suspicous_watermark.dot(suspicous_watermark))
return x / y
def _get_topk(self, data: np.array) -> np.array:
""" Returns indices to the top n entries in `data` """
ind = np.argpartition(data, -self.n)[-self.n :]
ind = ind[np.argsort(data[ind])][::-1]
return ind
# implement 2D DCT
def _dct2(a):
return dct(dct(a.T, norm="ortho").T, norm="ortho")
# implement 2D IDCT
def _idct2(a):
return idct(idct(a.T, norm="ortho").T, norm="ortho")
|
import argparse
import sys
import glob
import math
import numpy as np
import os
import shutil
import subprocess
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import time
import timeit
from datetime import datetime
import logging
#Set up command line parser
parser = argparse.ArgumentParser(prog='lstools_test', description='Test script for LSOracle')
parser.add_argument('--log_to_file', action='store_true', help='print log information to specified filename in LSOracle directory')
parser.add_argument('--verbose', '-v', action='count',
help='output detail. Default (unset) = warning; -v = info, -vv = debug')
parser.add_argument('--test_directory', '-t',
default='./tests/end_to_end',
help='Specify path to end-to-end test files. input files should be .aig format')
parser.add_argument('--unit_test_directory', '-u',
default='./tests/unit',
help='Specify path to unit test files. input files should be .aig format')
parser.add_argument('--training_model', '-m',
default='./core/algorithms/classification/deep_learn_model.json',
help='if you have a custom tensorflow model for the classifier, specify it here.')
parser.add_argument('--cicd ', action='store_true',
help='sets paths, envs, etc, to appropriate values for ci')
parser.add_argument('--kahypar', default='./core/test.ini',
help='kahypar config file.')
parser.add_argument('--lsoracle_exe', default='./build/core/lsoracle',
help='LSOracle executable.')
parser.add_argument('--abc_exe', default='abc',
help='abc executable.')
args = parser.parse_args()
training_file = os.path.abspath(args.training_model)
unit_path = os.path.abspath(args.unit_test_directory)
test_path = os.path.abspath(args.test_directory)
#configure logging
timestamp = datetime.now()
timestamp_format = timestamp.strftime('%Y%m%d%H%M%S')
log_level = 'WARNING'
if args.verbose is None:
log_level = 'WARNING'
elif args.verbose == 1:
log_level = 'INFO'
elif args.verbose > 1:
log_level = 'DEBUG'
if args.log_to_file:
log_filename = timestamp_format + '_lsoracle_test.log'
logging.basicConfig(filename=log_filename,format='%(asctime)s:%(levelname)s:%(message)s', level=log_level)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=log_level)
#Define our function to call the lstools executable
def optimize(filename, mode, part_num, suffix):
opt_file = filename + suffix + '.v'
cmd = [args.lsoracle_exe,'-c', 'read_aig ' + filename + '; partitioning ' + str(part_num) + ' -c ' + args.kahypar + '; ' + mode + ' -o ' + opt_file + ';']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
string_stdout = str(stdout, encoding="utf-8").splitlines()
logging.debug(str(stdout, encoding="utf-8"))
if stderr is not None:
logging.error(str(stderr, encoding="utf-8"))
logging.debug(string_stdout[-6])
return [int(s) for s in string_stdout[-6].split() if s.isdigit()]
def compare(filename, suffix):
new_file = filename + '.v'
opt_file = filename + suffix + '.v'
#need to create verilog file to compare to
cmd = [args.lsoracle_exe, '-c', 'read_aig ' + curr_file + '; write_verilog ' + new_file + ';']
subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#use cec to compare the pre and post optimization files
cmd = [args.abc_exe, '-c', 'cec -n ' + new_file +' '+ opt_file + ';']
abc_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
abc_stdout, abc_stderr = abc_process.communicate()
intermediate_string = str(abc_stdout, encoding="utf-8")
logging.debug(intermediate_string)
if abc_stderr is not None:
logging.error(str(abc_stderr, encoding="utf-8"))
string_abc = intermediate_string.splitlines()
return string_abc[-1]
#Begin tests
logging.info('LSOracle test suite')
#End to end tests
#Grab my test files
test_path_glob = test_path + '/*.aig'
logging.info('End to end tests:')
logging.info('Test path: %s', test_path)
files = glob.glob(test_path_glob)
logging.debug("List of test files: " + ", ".join(files))
#Actual testing
#we'll have to do some more thinking about what a good end to end test looks like. For now I'm going to optimize a couple benchmarks
#using aig, mig, mixed, and brute force, and report those. I'll have a failure message if our method is slower than
#both mig and aig. It ought to, at least, be between them.
for curr_file in files:
logging.info(curr_file)
#report statistics
cmd = [args.lsoracle_exe,'-c', 'read_aig ' + curr_file + '; ps -a;']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
string_stdout = str(stdout, encoding='utf-8')
logging.debug(string_stdout)
if stderr is not None:
logging.error(str(stderr, encoding='utf-8'))
#calculate number of nodes
unoptimized_size = float(string_stdout.splitlines()[1][7:string_stdout.find('\n')])
num_part = math.ceil(unoptimized_size / 300)
logging.info('Size (# nodes before optimization): ' + str(unoptimized_size) +' partitions = size/300: ' + str(num_part))
#mixed synthesis with classifier
cmdstr = 'optimization -n ' + training_file
mixed_size = optimize(curr_file, cmdstr, num_part, '_mixed_out')
logging.info('ntk size after mixed synthesis: ' + str(mixed_size[0]) + ' depth: ' + str(mixed_size[1]))
abcout = compare(curr_file, '_mixed_out')
assert('Networks are equivalent' in abcout)
#Brute Force
cmdstr = 'optimization'
brute_size = optimize(curr_file, cmdstr, num_part, '_brute_out')
logging.info('ntk size after brute force: ' + str(brute_size[0]) + ' depth: ' + str(brute_size[1]))
abcout = compare(curr_file, '_brute_out')
assert('Networks are equivalent' in abcout)
#AIG only
cmdstr = 'optimization -a'
aig_size = optimize(curr_file, cmdstr, num_part, '_aig_out')
logging.info('ntk size after aig optimization: ' + str(aig_size[0]) + ' depth: ' + str(aig_size[1]))
abcout = compare(curr_file, '_aig_out')
assert('Networks are equivalent' in abcout)
#MIG only
cmdstr = 'optimization -m'
mig_size = optimize(curr_file, cmdstr, num_part, '_mig_out')
logging.info('ntk size after mig optimization: ' + str(mig_size[0]) + ' depth: ' + str(mig_size[1]))
abcout = compare(curr_file, '_mig_out')
assert('Networks are equivalent' in abcout)
assert (mixed_size[0] <= aig_size[0] or mixed_size[0] <= mig_size[0]) or (brute_size[0] <= aig_size[0] or brute_size[0] <= mig_size[0])
#unit tests. This is a stub.
#Grab my test files
logging.info('\nUnit tests:')
unit_path_glob = unit_path + '/*.aig'
logging.info('Unit tests\n')
logging.info('Test path: %s', unit_path)
files = glob.glob(unit_path_glob)
logging.debug("List of test files: ")
logging.debug(", ".join(files))
|
<gh_stars>1-10
"""Ensemble methods that create consensus calls from multiple approaches.
This handles merging calls produced by multiple calling methods or
technologies into a single consolidated callset. Uses the bcbio.variation
toolkit: https://github.com/chapmanb/bcbio.variation and bcbio.variation.recall:
https://github.com/chapmanb/bcbio.variation.recall
"""
import collections
import copy
import glob
import os
import yaml
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import effects, population, validate, vcfutils
def combine_calls(batch_id, samples, data):
"""Combine multiple callsets into a final set of merged calls.
"""
logger.info("Ensemble consensus calls for {0}: {1}".format(
batch_id, ",".join(x["variantcaller"] for x in samples[0]["variants"])))
edata = copy.deepcopy(data)
base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id))
caller_names, vrn_files, bam_files = _organize_variants(samples, batch_id)
exist_variants = False
for tmp_vrn_file in vrn_files:
if vcfutils.vcf_has_variants(tmp_vrn_file):
exist_variants = True
break
if exist_variants:
if "caller" in edata["config"]["algorithm"]["ensemble"]:
callinfo = _run_ensemble_w_caller(batch_id, vrn_files, bam_files, base_dir, edata)
else:
config_file = _write_config_file(batch_id, caller_names, base_dir, edata)
callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir,
edata["sam_ref"], edata["config"])
edata["config"]["algorithm"]["variantcaller"] = "ensemble"
edata["vrn_file"] = callinfo["vrn_file"]
edata["ensemble_bed"] = callinfo["bed_file"]
callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate")
else:
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
vcfutils.write_empty_vcf(out_vcf_file)
callinfo = {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": None}
return [[batch_id, callinfo]]
def combine_calls_parallel(samples, run_parallel):
"""Combine calls using batched Ensemble approach.
"""
batch_groups, extras = _group_by_batches(samples, _has_ensemble)
out = []
if batch_groups:
processed = run_parallel("combine_calls", ((b, xs, xs[0]) for b, xs in batch_groups.iteritems()))
for batch_id, callinfo in processed:
for data in batch_groups[batch_id]:
data["variants"].insert(0, callinfo)
out.append([data])
return out + extras
def _has_ensemble(data):
# for tumour-normal calling, a sample may have "ensemble" for the normal
# sample configured but there won't be any variant files per se
variants_to_process = True if len(data["variants"]) > 1 and any([x.get('vrn_file', None) is not None or x.get('vrn_file_batch', None) is not None for x in data["variants"]]) else False
return variants_to_process and "ensemble" in data["config"]["algorithm"]
def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch = data.get("metadata", {}).get("batch")
if batch:
batch_groups[batch].append(data)
else:
assert data["name"][-1] not in batch_groups
batch_groups[data["name"][-1]] = [data]
else:
extras.append([data])
return batch_groups, extras
def _organize_variants(samples, batch_id):
"""Retrieve variant calls for all samples, merging batched samples into single VCF.
"""
bam_files = set([])
caller_names = [x["variantcaller"] for x in samples[0]["variants"]]
calls = collections.defaultdict(list)
for data in samples:
if "work_bam" in data:
bam_files.add(data["work_bam"])
for vrn in data["variants"]:
# for somatic ensemble, discard normal samples and filtered
# variants from vcfs
vrn_file = vrn["vrn_file"]
if data.get("metadata", False) and data["metadata"].get("phenotype", "normal").lower().startswith("tumor"):
vrn_file_temp = vrn_file.replace(".vcf", "_tumorOnly_noFilteredCalls.vcf") if ".vcf" in vrn_file else vrn_file_temp + "_tumorOnly_noFilteredCalls.vcf.gz"
# Select tumor sample and keep only PASS and . calls
vrn_file = vcfutils.select_sample(in_file=vrn_file, sample=data["name"][1],
out_file=vrn_file_temp,
config=data["config"], filters="PASS,.")
calls[vrn["variantcaller"]].append(vrn_file)
data = samples[0]
vrn_files = []
for caller in caller_names:
fnames = calls[caller]
if len(fnames) == 1:
vrn_files.append(fnames[0])
else:
vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data))
return caller_names, vrn_files, list(bam_files)
def _bcbio_variation_ensemble(vrn_files, out_file, ref_file, config_file, base_dir, config):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
bv_jar = config_utils.get_jar("bcbio.variation",
config_utils.get_program("bcbio_variation", config, "dir"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["java"] + jvm_opts + java_args + ["-jar", bv_jar, "variant-ensemble", config_file,
ref_file, out_file] + vrn_files
with utils.chdir(base_dir):
do.run(cmd, "Ensemble calling: %s" % os.path.basename(base_dir))
def _run_ensemble(batch_id, vrn_files, config_file, base_dir, ref_file, config):
"""Run an ensemble call using merging and SVM-based approach in bcbio.variation
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
out_bed_file = os.path.join(base_dir, "{0}-callregions.bed".format(batch_id))
work_dir = "%s-work" % os.path.splitext(out_vcf_file)[0]
if not utils.file_exists(out_vcf_file):
_bcbio_variation_ensemble(vrn_files, out_vcf_file, ref_file, config_file,
base_dir, config)
if not utils.file_exists(out_vcf_file):
base_vcf = glob.glob(os.path.join(work_dir, "prep", "*-cfilter.vcf"))[0]
utils.symlink_plus(base_vcf, out_vcf_file)
if not utils.file_exists(out_bed_file):
multi_beds = glob.glob(os.path.join(work_dir, "prep", "*-multicombine.bed"))
if len(multi_beds) > 0:
utils.symlink_plus(multi_beds[0], out_bed_file)
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": out_bed_file if os.path.exists(out_bed_file) else None}
def _write_config_file(batch_id, caller_names, base_dir, data):
"""Write YAML configuration to generate an ensemble set of combined calls.
"""
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id))
algorithm = data["config"]["algorithm"]
econfig = {"ensemble": algorithm["ensemble"],
"names": caller_names,
"prep-inputs": False}
intervals = validate.get_analysis_intervals(data)
if intervals:
econfig["intervals"] = os.path.abspath(intervals)
with open(config_file, "w") as out_handle:
yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False)
return config_file
def _run_ensemble_w_caller(batch_id, vrn_files, bam_files, base_dir, edata):
"""Run ensemble method using a variant caller to handle re-calling the inputs.
Uses bcbio.variation.recall method plus an external variantcaller.
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
if not utils.file_exists(out_vcf_file):
caller = edata["config"]["algorithm"]["ensemble"]["caller"]
cmd = [config_utils.get_program("bcbio-variation-recall", edata["config"]),
"ensemble", "--cores=%s" % edata["config"]["algorithm"].get("num_cores", 1),
"--caller=%s" % caller,
out_vcf_file, edata["sam_ref"]] + vrn_files + bam_files
do.run(cmd, "Ensemble calling with %s: %s" % (caller, batch_id))
in_data = copy.deepcopy(edata)
in_data["vrn_file"] = out_vcf_file
effects_vcf = effects.snpeff_effects(in_data)
return {"variantcaller": "ensemble",
"vrn_file": effects_vcf,
"bed_file": None}
|
<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/course_creators/tests/test_views.py
"""
Tests course_creators.views.py.
"""
from unittest import mock
from django.core.exceptions import PermissionDenied
from django.test import TestCase
from django.urls import reverse
from cms.djangoapps.course_creators.views import (
add_user_with_status_granted,
add_user_with_status_unrequested,
get_course_creator_status,
update_course_creator_group,
user_requested_access
)
from common.djangoapps.student import auth
from common.djangoapps.student.roles import CourseCreatorRole
from common.djangoapps.student.tests.factories import UserFactory
class CourseCreatorView(TestCase):
"""
Tests for modifying the course creator table.
"""
def setUp(self):
""" Test case setup """
super().setUp()
self.user = UserFactory.create(
username='test_user',
email='<EMAIL>',
password='<PASSWORD>',
)
self.admin = UserFactory.create(
username='Mark',
email='<EMAIL>',
password='<PASSWORD>',
)
self.admin.is_staff = True
def test_staff_permission_required(self):
"""
Tests that any method changing the course creator authz group must be called with staff permissions.
"""
with self.assertRaises(PermissionDenied):
add_user_with_status_granted(self.user, self.user)
with self.assertRaises(PermissionDenied):
update_course_creator_group(self.user, self.user, True)
def test_table_initially_empty(self):
self.assertIsNone(get_course_creator_status(self.user))
def test_add_unrequested(self):
add_user_with_status_unrequested(self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
# Calling add again will be a no-op (even if state is different).
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
def test_add_granted(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
# Calling add_user_with_status_granted impacts is_user_in_course_group_role.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
# Calling add again will be a no-op (even if state is different).
add_user_with_status_unrequested(self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole()))
def test_update_creator_group(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
update_course_creator_group(self.admin, self.user, True)
self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole()))
update_course_creator_group(self.admin, self.user, False)
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
def test_user_requested_access(self):
add_user_with_status_unrequested(self.user)
self.assertEqual('unrequested', get_course_creator_status(self.user))
self.client.login(username=self.user.username, password='<PASSWORD>')
# The user_requested_access function renders a template that requires
# request-specific information. Use the django TestClient to supply
# the appropriate request context.
self.client.post(reverse('request_course_creator'))
self.assertEqual('pending', get_course_creator_status(self.user))
def test_user_requested_already_granted(self):
add_user_with_status_granted(self.admin, self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
# Will not "downgrade" to pending because that would require removing the
# user from the authz course creator group (and that can only be done by an admin).
user_requested_access(self.user)
self.assertEqual('granted', get_course_creator_status(self.user))
def test_add_user_unrequested_staff(self):
# Users marked as is_staff will not be added to the course creator table.
add_user_with_status_unrequested(self.admin)
self.assertIsNone(get_course_creator_status(self.admin))
def test_add_user_granted_staff(self):
# Users marked as is_staff will not be added to the course creator table.
add_user_with_status_granted(self.admin, self.admin)
self.assertIsNone(get_course_creator_status(self.admin))
|
"""
Kypher queries over KGTK graphs.
"""
import sys
import os.path
import io
import re
import time
import pprint
import sh
from odictliteral import odict
import kgtk.kypher.parser as parser
import kgtk.kypher.sqlstore as ss
from kgtk.value.kgtkvalue import KgtkValue
pp = pprint.PrettyPrinter(indent=4)
### TO DO:
# - support node property access without having to introduce the property variable in the
# match clause first (e.g., y.salary in the multi-graph join example)
# + support parameters in lists
# - support concat function (|| operator in sqlite)
# - maybe support positional parameters $0, $1,...
# - intelligent interpretation of ^ and $ when regex-matching to string literals?
# - one can use kgtk_unstringify first to get to the text content
# - allow |-alternatives in relationship and node patterns (the latter being an
# extension to Cypher)
# - more intelligent index creation
# - investigate redundant join clauses
# - header column dealiasing/normalization, checking for required columns
# - bump graph timestamps when they get queried
# + allow order-by on column aliases (currently they are undefined variables)
# - (not) exists pattern handling
# + null-value handling and testing
# - handle properties that are ambiguous across graphs
# + graphs fed in from stdin
# + graph naming independent from files, so we don't have to have source data files
# available after import for querying, e.g.: ... -i $FILE1 --as g1 -i $FILE2 --as g2 ...
# - with named graphs, we probably also need some kind of --info command to list content
# + investigate Cyphers multiple distinct match clauses more thoroughly; apparently, a
# difference is that in a single pattern, each relationship must match a different edge
# which is kind of like UVBR in SNePS, but in multiple match patterns that restriction
# is only enforced within each match clauses's pattern. This means if we don't enforce
# the uniqueness principle, multiple strict match clauses do not add anything
# + optional match clauses need to allow multiple ones so they can fail individually
# - --create and --remove to instantiate and add/remove edge patterns from result bindings
# - --with clause to compute derived values to use by --create and --remove
### Utilities
def listify(x):
return (hasattr(x, '__iter__') and not isinstance(x, str) and list(x)) or (x and [x]) or []
def dwim_to_string_para(x):
"""Try to coerce 'x' to a KGTK string value that can be passed as a query parameter.
"""
x = str(x)
m = KgtkValue.strict_string_re.match(x)
if m is not None:
return x
# if we have an enclosing pair of quotes, remove them:
if x.startswith('"') and x.endswith('"'):
x = x[1:-1]
x = re.sub(r'(?P<char>["\|])', r'\\\g<char>', x)
return '"%s"' % x
def dwim_to_lqstring_para(x):
"""Try to coerce 'x' to a KGTK LQ-string value that can be passed as a query parameter.
"""
x = str(x)
m = KgtkValue.strict_language_qualified_string_re.match(x)
if m is not None:
return x
atpos = x.rfind('@')
if atpos > 0:
text = x[0:atpos]
# this allows an empty or invalid language:
lang = x[atpos+1:]
# if we have an enclosing pair of quotes, remove them:
if text.startswith("'") and text.endswith("'"):
text = text[1:-1]
text = re.sub(r"(?P<char>['\|])", r'\\\g<char>', text)
return "'%s'@%s" % (text, lang)
raise Exception("cannot coerce '%s' into a language-qualified string" % x)
### Query translation:
# An expression in Kypher can be ('+' means handled fully, 'o' partially):
# (from https://neo4j.com/docs/cypher-manual/current/syntax/expressions/)
#
# + A decimal (integer or float) literal: 13, -40000, 3.14, 6.022E23.
# + A hexadecimal integer literal (starting with 0x): 0x13af, 0xFC3A9, -0x66eff
# - HC: get converted into decimal
# + An octal integer literal (starting with 0): 01372, 02127, -05671.
# - HC: get converted into decimal
# + A string literal: 'Hello', "World".
# + A boolean literal: true, false, TRUE, FALSE.
# - HC: get converted into 0/1
# + A variable: n, x, rel, myFancyVariable, `A name with weird stuff in it[]!`.
# + A property: n.prop, x.prop, rel.thisProperty, myFancyVariable.`(weird property name)`
# - A dynamic property: n["prop"], rel[n.city + n.zip], map[coll[0]].
# - HC: not doable in SQL, amounts to a function or column variable
# + A parameter: $param, $0
# + A list of expressions: ['a', 'b'], [1, 2, 3], ['a', 2, n.property, $param], [ ].
# - HC: only lists of literals
# + A function call: length(p), nodes(p).
# + An aggregate function: avg(x.prop), count(*).
# - A path-pattern: (a)-->()<--(b).
# + An operator application: 1 + 2 and 3 < 4.
# + A predicate expression is an expression that returns true or false: a.prop = 'Hello', length(p) > 10, exists(a.name).
# - An existential subquery is an expression that returns true or false: EXISTS { MATCH (n)-[r]→(p) WHERE p.name = 'Sven' }.
# + A regular expression: a.name =~ 'Tim.*'
# - HC: SQLite supports LIKE and GLOB (which both have different regexp syntax),
# and REGEXP and MATCH through user-defined functions (we support =~ via kgtk_regex)
# - A case-sensitive string matching expression: a.surname STARTS WITH 'Sven', a.surname ENDS WITH 'son' or a.surname CONTAINS 'son'
# - HC: would need to be implemented via a user-defined function
# - A CASE expression.
# Using properties to restrict on "wide" columns:
#
# Example - unrestricted:
#
# (a)-[:loves]->(b)
#
# Example - qualified:
#
# (a {nationality: "Austria"})-[:loves]->(b)
#
# This could mean:
# {'node1': <Variable a>, 'node1;nationality': "Austria", 'label': "loves", 'node2': <Variable b>}
#
# (a)-[:loves {graph: "g1"}]->(b)
#
# This could mean:
# {'node1': <Variable a>, 'label': "loves", 'graph': "g1", 'node2': <Variable b>}
#
# Assumption: if we access something via a property, it will always be accessed via a column,
# not via a normalized edge; if data has mixed representation for some edges, it has to be
# normalized one way or the other first for the query to get all results. If not, it will
# only pick up the representation used in the query, other edges will be ignored.
#
# For structured literals, we assume their fields are implied/virtual wide columns that aren't
# materialized. For example:
#
# (id)-[:P580]->(time {`kgtk:year`: year})
# where year <= 2010
#
# which would be the same as (if we named the accessors like our column names):
#
# (id)-[:P580]->(time)
# where kgtk_year(time) <= 2010
class KgtkQuery(object):
def __init__(self, files, store, options=None, query=None,
match='()', where=None, optionals=None, with_=None,
ret='*', order=None, skip=None, limit=None,
parameters={}, index='auto', force=False, loglevel=0):
self.options = options or {}
self.store = store
self.loglevel = loglevel
self.force = force
self.parameters = parameters
self.defer_params = False
self.index_mode = index
if query is None:
# supplying a query through individual clause arguments is a little bit easier,
# since they can be in any order, can have defaults, are easier to shell-quote, etc.:
query = io.StringIO()
# for now we allow/require exactly one strict match pattern, even though in Cypher
# there could be any number and conceivably optionals could come before strict:
match and query.write(' MATCH ' + match)
where and query.write(' WHERE ' + where)
# optionals is a list of match pattern/where pairs, where single-element lists can be atoms:
for omatch in listify(optionals):
omatch = listify(omatch)
query.write(' OPTIONAL MATCH ' + omatch[0])
if len(omatch) > 1 and omatch[1] is not None:
query.write(' WHERE ' + omatch[1])
# with_ is a single (vars, where) tuple, where a single-element atom/list
# is interpreted as the variables clause to a 'with <vars>...':
if with_ is not None:
with_ = listify(with_) + [None]
query.write(' WITH ' + with_[0])
with_[1] and query.write(' WHERE ' + with_[1])
ret and query.write(' RETURN ' + ret)
order and query.write(' ORDER BY ' + order)
skip and query.write(' SKIP ' + str(skip))
limit and query.write(' LIMIT ' + str(limit))
query = query.getvalue()
self.log(2, 'Kypher:' + query)
self.query = parser.intern(query)
self.match_clause = self.query.get_match_clause()
self.optional_clauses = self.query.get_optional_match_clauses()
self.with_clause = self.query.get_with_clause()
self.return_clause = self.query.get_return_clause()
self.order_clause = self.query.get_order_clause()
self.skip_clause = self.query.get_skip_clause()
self.limit_clause = self.query.get_limit_clause()
# process/import files after we parsed the query, so we get syntax errors right away:
self.files = []
for file in listify(files):
file = str(file) # in case we get a path object
alias = self.get_input_option(file, 'alias')
comment = self.get_input_option(file, 'comment')
store.add_graph(file, alias=alias)
# use aliases for handle matching, otherwise unnormalized files except for stdin:
norm_file = store.get_normalized_file(file, alias=alias)
if store.is_standard_input(file):
file = norm_file
self.files.append(alias or file)
comment is not None and store.set_file_comment(norm_file, comment)
self.default_graph = self.files[0]
self.graph_handle_map = {}
self.result_header = None
def get_input_option(self, file, option, dflt=None):
for input, opts in self.options.items():
if input == file or opts.get('alias') == file:
return opts.get(option, dflt)
return dflt
def log(self, level, message):
if self.loglevel >= level:
header = '[%s query]:' % time.strftime('%Y-%m-%d %H:%M:%S')
sys.stderr.write('%s %s\n' % (header, message))
sys.stderr.flush()
def map_graph_handle_to_file(self, handle):
"""Performes a greedy mapping of 'handle' to either a full file name
or the first file basename that contains 'handle' as a substring.
If handle contains a numeric suffix, we also check its prefix portion.
For example, handle 'g12' is also matched as 'g' in the file basename.
"""
files = self.files
hmap = self.graph_handle_map
if handle in hmap:
return hmap[handle]
base_handle = handle
m = re.search(r'[0-9]+$', handle)
if m is not None and m.start() > 0:
base_handle = handle[0:m.start()]
mapped_files = hmap.values()
for file in files:
if file not in mapped_files:
key = file
if handle == key:
hmap[handle] = file
return file
key = os.path.basename(file)
if key.find(handle) >= 0 or key.find(base_handle) >= 0:
hmap[handle] = file
return file
raise Exception("failed to uniquely map handle '%s' onto one of %s" % (handle, files))
def get_parameter_value(self, name):
value = self.parameters.get(name)
if value is None:
if self.defer_params:
# value will be provided later, just use the parameter name as its value for now;
# we use a single-element tuple to mark it as a place holder:
value = (name,)
self.parameters[name] = value
else:
raise Exception("undefined query parameter: '%s'" % name)
return value
def get_pattern_clause_graph(self, clause):
"""Return the graph table for this 'clause', initialize it if necessary.
"""
node1 = clause[0]
if hasattr(node1, '_graph_table'):
return node1._graph_table
graph = node1.graph
if graph is not None:
graph = graph.name
else:
graph = self.default_graph
node1._graph_table = self.store.get_file_graph(self.map_graph_handle_to_file(graph))
return node1._graph_table
def get_pattern_clause_graph_alias(self, clause):
"""Return the graph table alias for this 'clause', initialize it if necessary.
"""
# assumes self.init_match_clauses() has been called:
node1 = clause[0]
return node1._graph_alias
# in case we have aliases which could be different in every graph, stubs for now:
def get_node1_column(self, graph):
return 'node1'
def get_node2_column(self, graph):
return 'node2'
def get_label_column(self, graph):
return 'label'
def get_id_column(self, graph):
return 'id'
def get_edge_columns(self, graph):
return ('id', 'node1', 'label', 'node2')
def pattern_clause_to_sql(self, clause, graph, state):
node1 = clause[0]
rel = clause[1]
node2 = clause[2]
node1col = self.get_node1_column(graph)
if node1.labels is not None:
para = state.get_literal_parameter(node1.labels[0])
state.add_match_clause_restriction((graph, node1col), para)
# we do not exclude anonymous vars here, since they can connect edges: <-[]-()-[]->
if node1.variable is not None:
state.register_clause_variable(node1.variable.name, (graph, node1col))
node2col = self.get_node2_column(graph)
if node2.labels is not None:
para = state.get_literal_parameter(node2.labels[0])
state.add_match_clause_restriction((graph, node2col), para)
# we do not exclude anonymous vars here (see above):
if node2.variable is not None:
state.register_clause_variable(node2.variable.name, (graph, node2col))
labelcol = self.get_label_column(graph)
idcol = self.get_id_column(graph)
if rel.labels is not None:
para = state.get_literal_parameter(rel.labels[0])
state.add_match_clause_restriction((graph, labelcol), para)
# but an anonymous relation variable cannot connect to anything else:
if rel.variable is not None and not isinstance(rel.variable, parser.AnonymousVariable):
state.register_clause_variable(rel.variable.name, (graph, idcol))
def pattern_props_to_sql(self, pattern, graph, column, state):
# 'pattern' is a node or relationship pattern for 'graph.column'. 'column' should be 'node1', 'node2' or 'id'.
props = getattr(pattern, 'properties', None)
if props is None or len(props) == 0:
return
# if we need to access a property, we need to register anonymous variables as well:
state.register_clause_variable(pattern.variable.name, (graph, column))
for prop, expr in props.items():
# TO DO: figure out how to better abstract property to column mapping (also see below):
propcol = isinstance(pattern, parser.RelationshipPattern) and prop or column + ';' + prop
# TRICKY/TO DO: if the property value is a standalone variable, we register it as a free
# variable before evaluating it, since properties can be ambiguous across different graphs
# and only within a clause do we know which graph is actually meant. Think about this
# some more, this issue comes up in the time-machine use case:
if isinstance(expr, parser.Variable):
state.register_clause_variable(expr.name, (graph, propcol))
expr = self.expression_to_sql(expr, state)
state.add_match_clause_restriction((graph, propcol), expr)
def pattern_clause_props_to_sql(self, clause, graph, state):
node1 = clause[0]
node1col = self.get_node1_column(graph)
self.pattern_props_to_sql(node1, graph, node1col, state)
node2 = clause[2]
node2col = self.get_node2_column(graph)
self.pattern_props_to_sql(node2, graph, node2col, state)
rel = clause[1]
idcol = self.get_id_column(graph)
self.pattern_props_to_sql(rel, graph, idcol, state)
OPERATOR_TABLE = {
parser.Add: '+', parser.Sub: '-', parser.Multi: '*', parser.Div: '/', parser.Mod: '%',
parser.Eq: '=', parser.Neq: '!=', parser.Lt: '<', parser.Gt: '>',
parser.Lte: '<=', parser.Gte: '>=',
parser.Not: 'NOT', parser.And: 'AND', parser.Or: 'OR',
}
def is_kgtk_operator(self, op):
"""Return True if 'op' is a special KGTK function or virtual property.
"""
return str(op).upper().startswith('KGTK_')
def variable_to_sql(self, expr, state):
"""Translate the variable 'expr' into the corresponding graph (alias), column and SQL.
The graph component might be None for special variables such as '*' or aliases.
"""
query_var = expr.name
if query_var == '*':
return None, query_var, query_var
sql_vars = state.lookup_variable(query_var, error=True)
# we allow regular and alias variables of the same name, but once an alias
# of name 'x' is defined, it will shadow access to any regular variable 'x':
for graph, col in sql_vars:
if graph == self.ALIAS_GRAPH:
# variable names a return column alias, rename it apart to avoid name conflicts:
column = self.alias_column_name(col)
return None, column, ss.sql_quote_ident(column)
# otherwise, pick the representative from the set of equiv-joined column vars,
# which corresponds to the graph alias and column name used by the first reference:
graph, column = sql_vars[0]
return graph, column, f'{graph}.{ss.sql_quote_ident(column)}'
def property_to_sql(self, expr, state):
"""Translate the property lookup expression 'expr' into the corresponding
graph (alias), column name and SQL translation.
"""
assert isinstance(expr, parser.Expression2), f'Not a property lookup expression: {expr}'
arg1 = expr.arg1
arg2 = expr.arg2
if isinstance(arg1, parser.Variable):
graph, column, sql = self.variable_to_sql(arg1, state)
for proplook in arg2:
if not isinstance(proplook, parser.PropertyLookup):
break # error
prop = proplook.property
if self.is_kgtk_operator(prop) and self.store.is_user_function(prop):
self.store.load_user_function(prop)
sql = f'{prop}({sql})'
elif column == self.get_id_column(graph):
# we are referring to the relation ID, subsitute it with the prop column:
sql = sql[:-(len(column)+1)] + prop + '"'
column = prop
else:
# we must be referring to a node-path column such as node1;name or node2;creator:
# TO DO: check existance of column here instead of waiting for SQLite to error
column += (';' + prop)
sql = sql[:-1] + ';' + prop + '"'
else:
return graph, column, sql
raise Exception("Unhandled property lookup expression: " + str(expr))
def expression_to_sql(self, expr, state):
"""Translate a Kypher expression 'expr' into its SQL equivalent.
"""
expr_type = type(expr)
if expr_type == parser.Literal:
return state.get_literal_parameter(expr.value)
elif expr_type == parser.Parameter:
value = self.get_parameter_value(expr.name)
return state.get_literal_parameter(value)
elif expr_type == parser.Variable:
graph, column, sql = self.variable_to_sql(expr, state)
return sql
elif expr_type == parser.List:
# we only allow literals in lists, Cypher also supports variables:
state.disable_variables()
elements = [self.expression_to_sql(elt, state) for elt in expr.elements]
state.enable_variables()
return '(' + ', '.join(elements) + ')'
elif expr_type == parser.Minus:
arg = self.expression_to_sql(expr.arg, state)
return '(- %s)' % arg
elif expr_type in (parser.Add, parser.Sub, parser.Multi, parser.Div, parser.Mod):
arg1 = self.expression_to_sql(expr.arg1, state)
arg2 = self.expression_to_sql(expr.arg2, state)
op = self.OPERATOR_TABLE[expr_type]
return '(%s %s %s)' % (arg1, op, arg2)
elif expr_type == parser.Hat:
raise Exception("Unsupported operator: '^'")
elif expr_type in (parser.Eq, parser.Neq, parser.Lt, parser.Gt, parser.Lte, parser.Gte):
arg1 = self.expression_to_sql(expr.arg1, state)
arg2 = self.expression_to_sql(expr.arg2, state)
op = self.OPERATOR_TABLE[expr_type]
return '(%s %s %s)' % (arg1, op, arg2)
elif expr_type == parser.Not:
arg = self.expression_to_sql(expr.arg, state)
return '(NOT %s)' % arg
elif expr_type in (parser.And, parser.Or):
arg1 = self.expression_to_sql(expr.arg1, state)
arg2 = self.expression_to_sql(expr.arg2, state)
op = self.OPERATOR_TABLE[expr_type]
return '(%s %s %s)' % (arg1, op, arg2)
elif expr_type == parser.Xor:
raise Exception("Unsupported operator: 'XOR'")
elif expr_type == parser.Case:
# TO DO: implement, has the same syntax as SQL:
raise Exception("Unsupported operator: 'CASE'")
elif expr_type == parser.Call:
function = expr.function
if function.upper() == 'CAST':
# special-case SQLite CAST which isn't directly supported by Cypher:
if len(expr.args) == 2 and isinstance(expr.args[1], parser.Variable):
arg = self.expression_to_sql(expr.args[0], state)
typ = expr.args[1].name
return 'CAST(%s AS %s)' % (arg, typ)
else:
raise Exception("Illegal CAST expression")
elif is_text_match_operator(function):
return translate_text_match_op_to_sql(self, expr, state)
args = [self.expression_to_sql(arg, state) for arg in expr.args]
distinct = expr.distinct and 'DISTINCT ' or ''
self.store.load_user_function(function, error=False)
return function + '(' + distinct + ', '.join(args) + ')'
elif expr_type == parser.Expression2:
graph, column, sql = self.property_to_sql(expr, state)
return sql
elif expr_type == parser.Expression3:
arg1 = self.expression_to_sql(expr.arg1, state)
op = expr.operator.upper()
if op in ('IS_NULL', 'IS_NOT_NULL'):
return '(%s %s)' % (arg1, op.replace('_', ' '))
if expr.arg2 is None:
raise Exception('Unhandled operator: %s' % str(op))
arg2 = self.expression_to_sql(expr.arg2, state)
if op in ('IN'):
return '(%s %s %s)' % (arg1, op, arg2)
elif op in ('REGEX'):
self.store.load_user_function('KGTK_REGEX')
return 'KGTK_REGEX(%s, %s)' % (arg1, arg2)
else:
raise Exception('Unhandled operator: %s' % str(op))
else:
raise Exception('Unhandled expression type: %s' % str(parser.object_to_tree(expr)))
def where_clause_to_sql(self, where_clause, state):
if where_clause is None:
return ''
else:
return self.expression_to_sql(where_clause.expression, state)
ALIAS_GRAPH = '_'
ALIAS_COLUMN_PREFIX = '_aLias.'
def alias_column_name(self, column):
"""Rename an alias 'column' apart so it doesn't conflict with any data table column names.
"""
# for now we simply prepend this prefix, a more thorough solution would look at actual
# table columns to make sure none of the column names starts with the prefix:
return self.ALIAS_COLUMN_PREFIX + column
def unalias_column_name(self, column):
"""If 'column' is a renamed alias, unrename it; otherwise leave it unmodified.
"""
return column.startswith(self.ALIAS_COLUMN_PREFIX) and column[len(self.ALIAS_COLUMN_PREFIX):] or column
def return_clause_to_sql_selection(self, clause, state):
select = clause.distinct and 'DISTINCT ' or ''
first = True
# Cypher does not have a 'GROUP BY' clause but instead uses non-aggregate return columns
# that precede an aggregate function as grouping keys, so we have to keep track of those:
agg_info = []
for item in clause.items:
expr = self.expression_to_sql(item.expression, state)
select += first and expr or (', ' + expr)
first = False
# check if this item calls an aggregation function or not: if it does then preceding columns
# that aren't aggregates are used for grouping, if it doesn't this column might be used for grouping:
is_agg = parser.has_element(
item.expression, lambda x: isinstance(x, parser.Call) and self.store.is_aggregate_function(x.function))
if item.name is not None:
# we create an alias variable object here, so we can evaluate it for proper renaming:
alias_var = parser.Variable(item._query, item.name)
# we have to register the alias as a variable, otherwise it can't be referenced in --order-by,
# but it is not tied to a specific graph table, thus that part is ALIAS_GRAPH below:
state.register_clause_variable(item.name, (self.ALIAS_GRAPH, item.name), nojoins=True)
sql_alias = self.expression_to_sql(alias_var, state)
select += ' ' + sql_alias
agg_info.append(not is_agg and sql_alias or None)
else:
agg_info.append(not is_agg and expr or None)
# we only need to group if there is at least one aggregate column and
# at least one regular column before one of the aggregate columns:
first_reg = len(agg_info)
last_agg = -1
for col, aggi in enumerate(agg_info):
if aggi is not None:
first_reg = min(col, first_reg)
else:
last_agg = max(col, last_agg)
if last_agg > first_reg:
group_by = [col for col in agg_info[0:last_agg] if col is not None]
group_by = 'GROUP BY ' + ', '.join(group_by)
else:
group_by = None
return select, group_by
def order_clause_to_sql(self, order_clause, state):
if order_clause is None:
return None
items = []
for sort_item in order_clause.items:
expr = self.expression_to_sql(sort_item.expression, state)
direction = sort_item.direction.upper()
items.append(expr + (direction.startswith('ASC') and '' or (' ' + direction)))
return 'ORDER BY ' + ', '.join(items)
def limit_clauses_to_sql(self, skip_clause, limit_clause, state):
if skip_clause is None and limit_clause is None:
return None
state.disable_variables()
limit = 'LIMIT'
if limit_clause is not None:
limit += ' ' + self.expression_to_sql(limit_clause.expression, state)
else:
limit += ' -1'
if skip_clause is not None:
limit += ' OFFSET ' + self.expression_to_sql(skip_clause.expression, state)
state.enable_variables()
return limit
def get_match_clauses(self):
"""Return all strict and optional match clauses of this query in order.
Returns the (single) strict match clause first which is important for
later optional joins to strict clause variables to work correctly.
"""
return (self.match_clause, *self.optional_clauses)
def init_match_clauses(self, state):
"""Initialize graph and table alias info for all match and pattern clauses.
"""
i = 1
for match_clause in self.get_match_clauses():
for clause in match_clause.get_pattern_clauses():
graph = self.get_pattern_clause_graph(clause)
graph_alias = '%s_c%d' % (graph, i) # per-clause graph table alias for self-joins
clause[0]._graph_alias = graph_alias
state.register_table_alias(graph, graph_alias)
i += 1
def graph_alias_to_graph(self, graph_alias):
"""Map a graph table 'graph_alias' back onto the graph table from which it was derived.
This simply keys in on the naming scheme we use above, but we could also store this somewhere.
"""
return graph_alias[0:graph_alias.rfind('_')]
def get_match_clause_graphs(self, match_clause):
"""Return the set of graph table names with aliases referenced by this 'match_clause'.
"""
graphs = set()
for clause in match_clause.get_pattern_clauses():
graph_table = self.get_pattern_clause_graph(clause)
graph_alias = self.get_pattern_clause_graph_alias(clause)
graphs.add((graph_table, graph_alias))
return graphs
def get_all_match_clause_graphs(self):
"""Return the set of graph table names with aliases referenced by this query.
"""
graphs = set()
for match_clause in self.get_match_clauses():
for clause in match_clause.get_pattern_clauses():
graph_table = self.get_pattern_clause_graph(clause)
graph_alias = self.get_pattern_clause_graph_alias(clause)
graphs.add((graph_table, graph_alias))
return graphs
def graph_names_to_sql(self, graphs):
"""Translate a list of (graph, alias) pairs into an SQL table list with aliases.
"""
return ', '.join([g + ' AS ' + a for g, a in sorted(listify(graphs))])
def match_clause_to_sql(self, match_clause, state):
"""Translate a strict or optional 'match_clause' into a set of source tables,
joined tables, internal and external join and where conditions which can then
be assembled into appropriate FROM/WHERE/INNER JOIN/LEFT JOIN and any necessary
nested joins depending on the particular structure of 'match_clause'. This is
a bit wild and wooly and will likely need further refinement down the road.
"""
state.set_match_clause(match_clause)
clause_sources = sorted(list(self.get_match_clause_graphs(match_clause)))
primary_source = clause_sources[0]
sources = clause_sources.copy()
joined = set()
internal_condition = []
external_condition = []
for (g1, c1), (g2, c2) in sorted(list(state.get_match_clause_joins(match_clause))):
condition = '%s.%s = %s.%s' % (g1, ss.sql_quote_ident(c1), g2, ss.sql_quote_ident(c2))
graph1 = (self.graph_alias_to_graph(g1), g1)
graph2 = (self.graph_alias_to_graph(g2), g2)
internal = graph1 in clause_sources and graph2 in clause_sources
if graph1 != primary_source:
if graph1 in clause_sources:
joined.add(graph1)
graph1 in sources and sources.remove(graph1)
if graph2 != primary_source:
if graph2 in clause_sources:
joined.add(graph2)
graph2 in sources and sources.remove(graph2)
if internal:
internal_condition.append(condition)
else:
external_condition.append(condition)
for (g, c), val in sorted(list(state.get_match_clause_restrictions(match_clause))):
internal_condition.append('%s.%s = %s' % (g, ss.sql_quote_ident(c), val))
where = self.where_clause_to_sql(match_clause.get_where_clause(), state)
if where:
internal_condition.append(where)
internal_condition = '\n AND '.join(internal_condition)
external_condition = '\n AND '.join(external_condition)
return sources, joined, internal_condition, external_condition
def with_clause_to_sql(self, with_clause, state):
"""Translate a 'WITH ... WHERE ...' clause which currently is primarily a vehicle to
communicate a global WHERE clause that applies across all match clauses, so for now
we only support 'WITH * ...'. But we do want to generalize this at some point, since
it gives us a way to chain queries and condition on aggregates, for example. Once we
do that, this needs to be generalized to take the translated query it wraps as an arg.
"""
if with_clause is None:
return ""
select = self.return_clause_to_sql_selection(with_clause, state)
if select != ('*', None):
raise Exception("unsupported WITH clause, only 'WITH * ...' is currently supported")
where = self.where_clause_to_sql(with_clause.where, state)
return where
def translate_to_sql(self):
"""Translate this query into an equivalent SQL expression.
Return the SQL as part of the accumulated final translation state.
"""
state = TranslationState(self)
# process strict and optional match clauses in order which is important to get
# the proper clause variable registration order; that way optional clauses that
# reference variables from earlier optional or strict clauses will join correctly:
self.init_match_clauses(state)
for match_clause in self.get_match_clauses():
state.set_match_clause(match_clause)
# translate clause top-level info such as variables and restrictions:
for clause in match_clause.get_pattern_clauses():
graph_alias = self.get_pattern_clause_graph_alias(clause)
self.pattern_clause_to_sql(clause, graph_alias, state)
# translate properties:
for clause in match_clause.get_pattern_clauses():
graph_alias = self.get_pattern_clause_graph_alias(clause)
self.pattern_clause_props_to_sql(clause, graph_alias, state)
# assemble SQL query:
query = io.StringIO()
select, group_by = self.return_clause_to_sql_selection(self.return_clause, state)
query.write('SELECT %s' % select)
# start with the mandatory strict match clause:
sources, joined, int_condition, ext_condition = self.match_clause_to_sql(self.match_clause, state)
aux_tables = list(state.get_match_clause_aux_tables(self.match_clause))
if len(sources) > 1 and not self.force:
raise Exception('match clause generates a cross-product which can be very expensive, use --force to override')
assert not ext_condition, 'INTERNAL ERROR: unexpected match clause'
where = []
query.write('\nFROM %s' % self.graph_names_to_sql(sources + aux_tables))
if joined:
query.write('\nINNER JOIN %s' % self.graph_names_to_sql(joined))
if int_condition:
if joined:
query.write('\nON %s' % int_condition)
else:
# we need to defer WHERE in case there are left joins:
where.append(int_condition)
# now add any left joins from optional match clauses:
for opt_clause in self.optional_clauses:
sources, joined, int_condition, ext_condition = self.match_clause_to_sql(opt_clause, state)
aux_tables = list(state.get_match_clause_aux_tables(opt_clause))
if len(sources) > 1 and not self.force:
raise Exception('optional clause generates a cross-product which can be very expensive, use --force to override')
nested = len(joined) > 0
query.write('\nLEFT JOIN %s%s' % (nested and '(' or '', self.graph_names_to_sql(sources + aux_tables)))
if nested:
query.write('\n INNER JOIN %s' % self.graph_names_to_sql(joined))
query.write('\n ON %s)' % int_condition.replace('\n', '\n '))
query.write('\nON %s' % ext_condition)
else:
query.write('\nON %s' % '\n AND '.join(listify(ext_condition) + listify(int_condition)))
# process any 'WITH * WHERE ...' clause to add to the global WHERE condition if necessary:
with_where = self.with_clause_to_sql(self.with_clause, state)
with_where and where.append(with_where)
# finally add WHERE clause from strict match and/or WITH clause in case there were any:
where and query.write('\nWHERE %s' % ('\n AND '.join(where)))
# add various other clauses:
group_by and query.write('\n' + group_by)
order = self.order_clause_to_sql(self.order_clause, state)
order and query.write('\n' + order)
limit = self.limit_clauses_to_sql(self.skip_clause, self.limit_clause, state)
limit and query.write('\n' + limit)
query = query.getvalue().replace(' TRUE\nAND', '')
query, parameters = state.replace_literal_parameters(query)
# logging:
rule = '-' * 45
self.log(1, 'SQL Translation:\n%s\n %s\n PARAS: %s\n%s'
% (rule, query.replace('\n', '\n '), parameters, rule))
state.set_sql(query)
state.set_parameters(parameters)
return state
def compute_auto_indexes(self, state):
"""Compute column indexes that are likely needed to run this query efficiently.
This is just an estimate based on columns involved in joins and restrictions.
"""
indexes = set()
for match_clause in self.get_match_clauses():
joins = state.get_match_clause_joins(match_clause)
restrictions = state.get_match_clause_restrictions(match_clause)
if len(joins) > 0:
for (g1, c1), (g2, c2) in joins:
indexes.add((self.graph_alias_to_graph(g1), c1))
indexes.add((self.graph_alias_to_graph(g2), c2))
if len(restrictions) > 0:
# even if we have joins, we might need additional indexes on restricted columns:
for (g, c), val in restrictions:
indexes.add((self.graph_alias_to_graph(g), c))
return indexes
def get_explicit_graph_index_specs(self):
"""Collect all explicit per-input index specs and return them
as an ordered dict of {<graph>: [index-spec+ ...] ...} items.
"""
explicit_index_specs = odict()
for file in self.files:
index_specs = self.get_input_option(file, 'index_specs')
if index_specs is not None:
graph = self.store.get_file_graph(file)
explicit_index_specs[graph] = index_specs
return explicit_index_specs
def get_default_graph_index_specs(self):
"""Collect all default index specs and return them as an ordered dict of
{<graph>: [index-spec+ ...] ...} items for all query graphs that do not
have any explicit index spec specified for them.
"""
explicit_index_specs = self.get_explicit_graph_index_specs()
default_index_specs = odict()
for graph, alias in self.get_all_match_clause_graphs():
if graph not in explicit_index_specs:
default_index_specs[graph] = listify(self.index_mode)
return default_index_specs
def ensure_indexes(self, graphs, index_specs, state, explain=False):
"""Ensure that for each graph in 'graphs' all indexes according to 'index_specs' are avaible.
'state' is the final translation state of the query.
"""
graphs = listify(graphs)
for index_spec in listify(index_specs):
index_spec = ss.get_normalized_index_mode(index_spec)
if isinstance(index_spec, list):
for spec in index_spec:
for graph in graphs:
self.store.ensure_graph_index(graph, ss.TableIndex(graph, spec), explain=explain)
elif index_spec == ss.INDEX_MODE_AUTO:
# build indexes as suggested by query joins and restrictions (restricted to 'graphs'):
for graph, column in self.compute_auto_indexes(state):
if graph in graphs:
# for now unconditionally restrict to core columns:
if column in self.get_edge_columns(graph):
#unique = column == self.get_id_column(graph)
# for now don't enforce uniqueness on edge IDs to support noisy data:
unique = False
self.store.ensure_graph_index_for_columns(graph, column, unique=unique, explain=explain)
elif index_spec == ss.INDEX_MODE_EXPERT:
# build indexes as suggested by the database (restricted to 'graphs'):
for name, graph, columns in self.store.suggest_indexes(state.get_sql()):
if graph in graphs:
# only consider first column, multi-column indexes can be specified manually:
column = columns[0]
#unique = column == self.get_id_column(graph)
# for now don't enforce uniqueness on edge IDs to support noisy data:
unique = False
self.store.ensure_graph_index_for_columns(graph, column, unique=unique, explain=explain)
elif index_spec == ss.INDEX_MODE_NONE:
pass
elif index_spec == ss.INDEX_MODE_CLEAR:
for graph in graphs:
self.store.drop_graph_indexes(graph)
elif index_spec == ss.INDEX_MODE_CLEAR_TEXT:
for graph in graphs:
self.store.drop_graph_indexes(graph, index_type=ss.TextIndex)
elif index_spec in (ss.INDEX_MODE_AUTO_TEXT):
print(f'WARN: {index_spec} not yet implemented')
def ensure_relevant_indexes(self, state, explain=False):
"""Ensure that relevant indexes for this query are available on the database.
First creates any indexes explicitly specified on individual inputs. Then, for any
graphs referenced in the query that do not have an explicit index specification, create
indexes based on the default index_mode strategy. The default for that is 'auto' which
will use 'compute_auto_indexes' to decide what should be indexed. 'state' is assumed
to be the final translation state of the query. If 'explain' is True, do not actually
build any indexes, only show commands that would be executed.
"""
# NOTES
# - what we want is the minimal number of indexes that allow this query to run efficiently,
# since index creation itself is expensive in time and disk space
# - however, to do this right we need some approximate analysis of the query, e.g., for a join
# we'll generally only need an index on one of the involved columns, however, knowing for
# which one requires knowledge of table size, statistics and other selectivity of the query
# - skewed distribution of fanout in columns complicates this further, since an average
# fanout might be very different from maximum fanouts (e.g., for wikidata node2)
# - large fanouts might force us to use two-column indexes such as 'label/node2' and 'label/node1'
# - to handle this better, we will exploit the SQLite expert command to create (variants) of
# the indexes it suggests, since that often wants multi-column indexes which are expensive
# - we also need some manual control as well to force certain indexing patterns
# - we only index core columns for now, but we might have use cases where that is too restrictive
# TO DO: parse all index specs before we perform any actions to detect errors eagerly
for graph, index_specs in self.get_explicit_graph_index_specs().items():
self.ensure_indexes(graph, index_specs, state, explain=explain)
for graph, index_specs in self.get_default_graph_index_specs().items():
self.ensure_indexes(graph, index_specs, state, explain=explain)
def execute(self):
state = self.translate_to_sql()
self.ensure_relevant_indexes(state)
result = self.store.execute(state.get_sql(), state.get_parameters())
self.result_header = [self.unalias_column_name(c[0]) for c in result.description]
return result
def explain(self, mode='plan'):
state = self.translate_to_sql()
self.ensure_relevant_indexes(state, explain=True)
result = self.store.explain(state.get_sql(), parameters=state.get_parameters(), mode=mode)
return result
class TranslationState(object):
"""Accumulates and manages various state information during translation.
"""
def __init__(self, query):
self.query = query
self.literal_map = {} # maps Kypher literals onto parameter placeholders
self.variable_map = {} # maps Kypher variables onto representative (graph, col) SQL columns
self.alias_map = {} # maps tables to their aliases and vice versa
self.match_clause = None # match clause we are currently processing
self.match_clause_info = {} # maps match clauses onto joins, restrictions, etc. encountered
self.sql = None # final SQL translation of 'query'
self.parameters = None # final parameter values for translated query
def get_literal_map(self):
return self.literal_map
def get_variable_map(self):
return self.variable_map
def disable_variables(self):
"""Disable any variable processing (e.g., for contexts where they are illegal).
"""
self._saved_variable_map = self.variable_map
self.variable_map = None
def enable_variables(self):
"""Enable variable processing again.
"""
self.variable_map = self._saved_variable_map
def lookup_variable(self, variable, error=True):
"""Lookup 'variable' in the current variable map. Raise an error if variables
are currently disabled or if 'variable' is undefined (unless 'error' is False).
"""
if self.variable_map is None:
if error:
# for cases where external variables are not allowed (e.g. LIMIT):
raise Exception('Illegal context for variable: %s' % variable)
else:
return None
sql_vars = self.variable_map.get(variable)
if sql_vars is None and error:
raise Exception('Undefined variable: %s' % variable)
return sql_vars
# TO DO: handle all graph alias management from here (e.g., graph_alias_to_graph)1
def register_table_alias(self, table, alias):
"""Record 'alias' as an alias for 'table'. Raise an error if the alias
if already used as a table name or as an alias for a different table.
"""
current = self.alias_map.get(alias)
if current is None:
self.alias_map[alias] = table
self.alias_map.setdefault(table, []).append(alias)
elif isinstance(current, list):
raise Exception(f'Alias {alias} already used as table name')
elif current != table:
raise Exception(f'Alias {alias} already in use for {current}')
def get_table_aliases(self, table, create_prefix=None):
"""Get the currently defined aliases for 'table'. If none are defined
and 'create_prefix' is provided, create a new alias based on that.
Otherwise, raise an error.
"""
aliases = self.alias_map.get(table)
if aliases is not None:
return aliases
elif create_prefix is not None:
for i in range(1, 1000000):
alias = f'{create_prefix}_{i}'
if self.alias_map.get(alias) is None:
self.register_table_alias(table, alias)
return self.alias_map[table]
raise Exception('Internal error: alias map exhausted')
else:
raise Exception(f'No aliases defined for {table}')
def get_match_clause(self):
"""Return the current match clause."""
return self.match_clause
def set_match_clause(self, clause):
"""Set the current match clause to 'clause'"""
self.match_clause = clause
def get_match_clause_joins(self, clause=None):
"""Returns all joins encounterd in this match 'clause' which maps equivalent
SQL column pairs (avoiding dupes and redundant flips). 'clause' defaults to
the current match clause.
"""
clause = clause or self.match_clause
info = self.match_clause_info.setdefault(clause, {})
joins = info.setdefault('joins', set())
return joins
def add_match_clause_join(self, var1, var2, clause=None):
"""Add a join between 'var1' and 'var2' to 'clause's joins.
'clause' defaults to the current match clause.
"""
self.get_match_clause_joins(clause=clause).add((var1, var2))
def get_match_clause_restrictions(self, clause=None):
"""Return all restrictions encountered in this match 'clause' which maps
(graph, col) SQL columns onto literal restrictions. 'clause' defaults to
the current match clause.
"""
clause = clause or self.match_clause
info = self.match_clause_info.setdefault(clause, {})
restrictions = info.setdefault('restrictions', set())
return restrictions
def add_match_clause_restriction(self, sqlvar, expression, clause=None):
"""Add a restriction of 'sqlvar' to 'expression' to 'clause's restrictions.
'clause' defaults to the current match clause.
"""
self.get_match_clause_restrictions(clause=clause).add((sqlvar, expression))
def get_match_clause_aux_tables(self, clause=None):
"""Return all auxiliary source tables encountered in this match 'clause'.
"""
clause = clause or self.match_clause
info = self.match_clause_info.setdefault(clause, {})
aux_tables = info.setdefault('aux_tables', set())
return aux_tables
def add_match_clause_aux_table(self, table, alias, clause=None):
"""Add auxiliary table 'table' with its 'alias' to 'clause's auxiliary tables.
'clause' defaults to the current match clause.
"""
self.get_match_clause_aux_tables(clause=clause).add((table, alias))
def get_sql(self):
return self.sql
def set_sql(self, sql):
self.sql = sql
def get_parameters(self):
return self.parameters
def set_parameters(self, params):
self.parameters = params
def get_literal_parameter(self, literal):
"""Return a parameter placeholder such as '?12?' that will be mapped to 'literal'
and will later be replaced with a query parameter at the appropriate position.
"""
litmap = self.get_literal_map()
if literal in litmap:
return litmap[literal]
else:
placeholder = '???%d??' % len(litmap)
litmap[literal] = placeholder
return placeholder
def replace_literal_parameters(self, raw_query):
"""Replace the named literal placeholders in 'raw_query' with positional
parameters and build a list of actual parameters to substitute for them.
"""
litmap = self.get_literal_map()
query = io.StringIO()
parameters = []
# reverse 'litmap' to map placeholders onto literal values:
litmap = {p: l for l, p in litmap.items()}
for token in re.split(r'\?\?', raw_query):
if token.startswith('?'):
parameters.append(litmap['??' + token + '??'])
token = '?'
query.write(token)
return query.getvalue(), parameters
def register_clause_variable(self, query_var, sql_var, nojoins=False):
"""Register a reference to the Kypher variable 'query_var' which corresponds to the
SQL clause variable 'sql_var' represented as '(graph, column)' where 'graph' is a
table alias for the relevant graph specific to the current clause. If this is the
first reference to 'query_var', simply add it to the variable map. Otherwise, find the
best existing reference to equiv-join it with and record the necessary join in 'joins'
(unless 'nojoins' is True).
"""
varmap = self.get_variable_map()
sql_vars = varmap.get(query_var)
if sql_vars is None:
# we use a list here now to preserve the order which matters for optionals:
varmap[query_var] = [sql_var]
else:
# POLICY: we either find the earliest equivalent variable from the same clause
# (as in '(x)-[]->{x)'), or the earliest registered variable from a different
# clause, which is what we need to handle cross-clause references from optionals
# (assuming strict and optional match clauses are processed appropriately in order):
# NOTE: further optimizations might be possible here, e.g., we might want to prefer
# a self-join on the same column, since it might reduce the number of auto-indexes:
this_graph, this_col = sql_var
best_var = None
for equiv_var in sql_vars:
equiv_graph, equiv_col = equiv_var
if best_var is None:
best_var = equiv_var
elif this_graph == equiv_graph:
# we match on graph and clause, since clause is encoded in graph:
best_var = equiv_var
break
else:
# keep current earliest 'best_var':
pass
# not sure if they could ever be equal, but just in case:
if sql_var != best_var:
sql_var not in sql_vars and sql_vars.append(sql_var)
# we never join an alias with anything:
if this_graph != self.query.ALIAS_GRAPH:
equiv = [best_var, sql_var]
# normalize join order by order in 'sql_vars' so earlier vars come first:
equiv.sort(key=lambda v: sql_vars.index(v))
if not nojoins:
self.add_match_clause_join(equiv[0], equiv[1])
### Text match support
# This is a bit messy and idiosyncratic, so we are keeping it outside the regular translator.
TEXTMATCH_OPERATORS = {'TEXTMATCH': 'match', 'TEXTLIKE': 'like', 'TEXTGLOB': 'glob',
'MATCHSCORE': 'score', 'BM25': 'score'}
def is_text_match_operator(op):
"""Return True if 'op' is a special KGTK text search function.
"""
return str(op).upper() in TEXTMATCH_OPERATORS
def normalize_text_match_operator(op):
"""Normalize the textmatch operator 'op' to a logical string name.
"""
return TEXTMATCH_OPERATORS.get(str(op).upper())
def translate_text_match_op_to_sql(query, expr, state):
"""Translate a text match expression 'expr' into appropriate full-text index operations.
"""
# Unnamed invocations require the referenced variable to uniquely select a graph
# and associated text index; if there is ambiguity, an error will be raised:
# (1) --match '...(x)-[]->(l)...' --where '...textmatch(l, "foo bar")...' --return '...bm25(l)...'
# column-based match on 'l' only, requires 'l' to be associated with a unique graph and
# the graph be associated with a unique text index that indexes the node2 column
# (2) --match '...(x)-[r]->(l)...' --where '...textmatch(r, "foo bar")...' --return '...bm25(r)...'
# all-indexed-column-based match, requires 'r' to be associated with a unique graph and
# the graph to be associated with a unique text index, for the associated score function
# either r or l could be used as long as l is as discriminative as r
# (3) --match '...(x)-[r]->(l)...' --where '...textmatch(r.node2, "foo bar")...' --return '...bm25(r.node2)...'
# column-based match on node2 only, requires 'r' to be associated with a unique graph and
# the graph be associated with a unique text index that indexes the node2 column; can be used
# in case the associated l variable does not uniquely specify a text index
# (4) --match '...(x)-[r]->(l)...' --where '...textmatch(r.id, "foo bar")...' --return '...bm25(r.id)...'
# column-based match on id only, requires 'r' to be associated with a unique graph and
# the graph be associated with a unique text index that indexes the id column
#
# Named invocations specify the name of a specific index to use in addition to a graph variable.
# This can be used for explicit disambiguation, for example, if a graph has multiple text indexes
# associated with it. In this case the named index needs to be usable for the specified variable.
# Index names can be provided with a variable whose name should not match any match variables.
# We can then use property syntax if we want to specify specific match variables. In the examples
# below we assume we have a text index named 'myidx2'.
# (1) --match '...(x)-[]->(l)...' --where '...textmatch(myidx2.l, "foo bar")...' --return '...bm25(myidx2.l)...'
# column-based match on 'l' only, requires 'l' to be associated with the graph of myidx2
# and myidx2 to index the node2 column
# (2) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r, "foo bar")...' --return '...bm25(myidx2.r)...'
# all-indexed-column-based match, requires 'r' to be associated with the graph of myidx2
# (3) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.node2, "foo bar")...' --return '...bm25(myidx2.r.node2)...'
# column-based match on node2 only, requires 'r' to be associated with the graph of myidx2
# and myidx2 to index the node2 column
# (4) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.id, "foo bar")...' --return '...bm25(myidx2.r.id)...'
# column-based match on id only, requires 'r' to be associated with the graph of myidx2
# and myidx2 to index the node2 column
normfun = normalize_text_match_operator(expr.function)
if not normfun:
raise Exception(f"Unsupported text match function: {expr.function}")
arguments = expr.args
arity = len(arguments)
if arity < 1:
raise Exception(f"Missing arguments in {expr.function} expression")
# handle first argument which can be a variable or property expression (possibly starting with an index name):
# ('Variable', {'name': 'l'})
# ('Expression2', {'arg1': ('Variable', {'name': 'r'}), 'arg2': [('PropertyLookup', {'property': 'node2'})]})
# ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'})]})
# ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'}), ('PropertyLookup', {'property': 'node2'})]})
arg1 = arguments[0]
index_name = None
if isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):
# we have a property reference, check if base variable is an index name:
sql_vars = state.lookup_variable(arg1.arg1.name, error=False)
if sql_vars is None:
index_name = arg1.arg1.name
props = arg1.arg2
arg1.arg1.name = props[0].property
arg1.arg2 = props[1:]
if len(props) == 1:
arg1 = arg1.arg1
# figure out the graph and column name:
# TO DO: for now we ignore the possibility of graph or column-ambiguous variables
# and simply go with the SQL translation, but this needs to be generalized:
if isinstance(arg1, parser.Variable):
# key in on Variableness to figure out whether this might refer to whole index or just a column:
graph_alias, column_name, sql = query.variable_to_sql(arg1, state)
elif isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):
graph_alias, column_name, sql = query.property_to_sql(arg1, state)
else:
raise Exception(f"First argument to {expr.function} needs to be variable or property")
# find applicable indexes:
graph = query.graph_alias_to_graph(graph_alias)
if column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable):
# we have an all-indexed-columns match:
column = None
else:
# we have a column-specific match:
column = column_name
indexes = find_matching_text_indexes(query, graph, index_name, column)
if len(indexes) == 0:
# for now, until we support mode:autotext:
raise Exception(f"No usable text index found for {expr.function}")
elif len(indexes) > 1:
raise Exception(f"Multiple applicable text indexes found for {expr.function}")
# generate the SQL translation:
index = indexes[0]
index_table = index.get_name()
index_alias = state.get_table_aliases(index_table, create_prefix='txtidx')[0]
index_column = index_table
if not (column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable)):
# we have a column-specific search:
index_column = column_name
index_column = ss.sql_quote_ident(index_column)
state.add_match_clause_aux_table(index_table, index_alias)
if normfun in ('match', 'like', 'glob'):
if arity != 2:
raise Exception(f"Extraneous {expr.function} arguments")
operator = normfun.upper()
arg2 = query.expression_to_sql(arguments[1], state)
return f'{index_alias}.{index_column} {operator} {arg2} and {index_alias}.rowid = {graph_alias}.rowid'
elif normfun in ('score'):
# scoring function always needs the special table column, even for column-based match:
return f'BM25({index_alias}.{index_table})'
def get_implied_text_indexes(query, graph):
"""Return text indexes that will be available at query time implied by
current indexes plus state changes and index creations from query options.
Does not actually create any indexes or change associated info tables.
"""
index_specs = query.get_explicit_graph_index_specs().get(graph)
index_specs = query.get_default_graph_index_specs().get(graph) if index_specs is None else index_specs
current_indexes = [idx for idx in query.store.get_graph_indexes(graph) if isinstance(idx, ss.TextIndex)]
explicit_indexes = []
for index_spec in index_specs:
index_spec = ss.get_normalized_index_mode(index_spec)
if isinstance(index_spec, list):
for spec in index_spec:
index = ss.TableIndex(graph, spec)
if isinstance(index, ss.TextIndex):
explicit_indexes.append(index)
elif index_spec in (ss.INDEX_MODE_CLEAR, ss.INDEX_MODE_CLEAR_TEXT):
explicit_indexes = []
current_indexes = []
for expidx in explicit_indexes[:]:
for curidx in current_indexes[:]:
if expidx.redefines(curidx):
current_indexes.remove(curidx)
elif curidx.subsumes(expidx):
explicit_indexes.remove(expidx)
return explicit_indexes + current_indexes
def find_matching_text_indexes(query, graph, index_name, column):
"""Find text indexes that can be used to match againt 'graph', 'index_name' and 'column'.
Both 'index_name' and 'column' may be None. Considers any current indexes as well as
state changes and index creations implied by 'query' index options, but does not actually
create any indexes or change associated info tables.
"""
indexes = []
for idx in get_implied_text_indexes(query, graph):
if ((index_name is None or idx.index.options.get('name') == index_name)
and (column is None or column in idx.index.columns)):
indexes.append(idx)
return indexes
"""
>>> store = cq.SqliteStore('/tmp/graphstore.sqlite3.db', create=True)
>>> graph = '/home/hans/Documents/kgtk/code/kgtk/kgtk/kypher/.work/data/graph.tsv'
>>> query = cq.KgtkQuery(graph, store, match='(a)-[:loves]->(b)')
>>> list(query.execute())
[('Hans', 'loves', 'Molly', 'e11'), ('Otto', 'loves', 'Susi', 'e12'), ('Joe', 'loves', 'Joe', 'e14')]
>>> query = cq.KgtkQuery(graph, store, match='(a)-[:loves]->(b)-[:loves]->(a)')
>>> list(query.execute())
[('Joe', 'loves', 'Joe', 'e14', 'Joe', 'loves', 'Joe', 'e14')]
>>> query = cq.KgtkQuery(graph, store, match='(a)-[:loves]->(a)-[:loves]->(a)')
>>> list(query.execute())
[('Joe', 'loves', 'Joe', 'e14', 'Joe', 'loves', 'Joe', 'e14')]
>>> query = cq.KgtkQuery(graph, store, loglevel=1,
match='g: (a)-[:loves]->(a), (a)-[r2:name]->(n)')
>>> list(query.execute())
SQL: SELECT *
FROM graph_1 graph_1_c1, graph_1 graph_1_c2
WHERE graph_1_c1."label"=?
AND graph_1_c2."label"=?
AND graph_1_c1."node1"=graph_1_c1."node2"
AND graph_1_c1."node1"=graph_1_c2."node1"
PARAS: ['loves', 'name']
[('Joe', 'loves', 'Joe', 'e14', 'Joe', 'name', '"Joe"', 'e23')]
>>>
# return clause translation:
>>> query = cq.KgtkQuery(graph, store, loglevel=1,
match='g: (a)-[:loves]->(a), (a)-[r2:name]->(n)',
ret="distinct a as node1, 'loves' as label, n as node2, r2 as id")
>>> cp.pp.pprint(query.return_clause.to_tree())
( 'Return',
{ 'distinct': False,
'items': [ ( 'ReturnItem',
{ 'expression': ('Variable', {'name': 'a'}),
'name': 'node1'}),
( 'ReturnItem',
{ 'expression': ( 'Expression2',
{ 'arg1': ( 'Variable',
{'name': 'r2'}),
'arg2': [ ( 'PropertyLookup',
{ 'property': 'label'})]}),
'name': 'label'}),
( 'ReturnItem',
{ 'expression': ('Variable', {'name': 'n'}),
'name': 'node2'}),
( 'ReturnItem',
{ 'expression': ('Variable', {'name': 'r2'}),
'name': 'id'})]})
>>> list(query.execute())
SQL: SELECT DISTINCT graph_1_c2."node1" "node1", ? "label", graph_1_c2."node2" "node2", graph_1_c2."id" "id"
FROM graph_1 graph_1_c1, graph_1 graph_1_c2
WHERE graph_1_c1."label"=?
AND graph_1_c2."label"=?
AND graph_1_c1."node1"=graph_1_c1."node2"
AND graph_1_c1."node1"=graph_1_c2."node1"
PARAS: ['loves', 'loves', 'name']
[('Joe', 'loves', '"Joe"', 'e23')]
>>> query.result_header
['node1', 'label', 'node2', 'id']
"""
|
<filename>lib/postgap/MeSH.py
#! /usr/bin/env python
"""
Copyright [1999-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
"""
import sys
import requests
import xmltodict
NCBI_Taxon_ID = {
'Human': 9606
}
def gene_to_postgap.MeSH(gene):
"""
Look up postgap.MeSH annotations for gene
Args:
* [ string ] (gene names)
Return type: [ string ] (annotations)
"""
server = "http://gene2mesh.ncibi.org"
ext = "/fetch?genesymbol=%s&taxid=%s" % (gene.name, NCBI_Taxon_ID[postgap.Globals.SPECIES])
print '>>>>>>>>>>>>>>>>>>'
print str(server)+str(ext)
print '>>>>>>>>>>>>>>>>>>'
response = requests.get(str(server)+str(ext))
if not response.ok:
sys.stderr.write("Failed to get proper response to query %s%s\n" % (server, ext) )
sys.stderr.write(response.content + "\n")
response.raise_for_status()
print repr(response)
'''
Example postgap.MeSH output:
{
'Gene2postgap.MeSH': {
'Request': {
'ParameterSet': {
'Tool': 'none',
'GeneSymbol': 'csf1r',
'TaxonomyID': '9606',
'Limit': '1000',
'Email': 'anonymous'
},
'type': 'fetch'
},
'Response': {
'ResultSet': {
'Result': [
{
'FisherExact': {
'content': '1.8531319238671E-230',
'type': 'p-value'
},
'ChiSquare': '112213.6506462',
'Fover': '1498.1813411401',
'postgap.MeSH': {
'Qualifier': {
'Name': 'metabolism'
},
'Descriptor': {
'TreeNumber': [
'D08.811.913.696.620.682.725.400.500',
'D12.776.543.750.060.492',
'D12.776.543.750.705.852.150.150',
'D12.776.543.750.750.400.200.200',
'D12.776.624.664.700.800',
'D23.050.301.264.035.597',
'D23.101.100.110.597'
],
'Identifier': 'D016186',
'Name': 'Receptor, Macrophage Colony-Stimulating Factor',
'UMLSID': {}
}
},
'DocumentSet': {
'type': 'pubmed',
'PMID': [
]
},
'Gene': {
'Taxonomy': {
'Identifier': '9606'
},
'Identifier': '1436',
'type': 'Entrez',
'Description': 'colony stimulating factor 1 receptor',
'Symbol': 'CSF1R'
}
},
],
'sort': 'FisherExact',
'count': '94',
'order': 'ascending'
},
'Copyright': {
'Details': 'http://nlp.ncibi.org/Copyright.txt',
'Year': '2009',
'Statement': 'Copyright 2009 by the postgap.Regents of the University of Michigan'
},
'Support': {
'Details': 'http://www.ncibi.org',
'GrantNumber': 'U54 DA021519',
'Statement': 'Supported by the National Institutes of Health as part of the NIH\\\'s National Center for Integrative Biomedical Informatics (NCIBI)'
}
}
}
}
'''
print response.content
if len(response.content):
try:
hash = xmltodict.parse(response.content)
print repr(hash)
hits = hash['Gene2postgap.MeSH']['Request']['ResultSet']['Result']
# XML subtletly: if a tag is repeated, a list of objects is produced,
# else a single object. Careful when unpacking!
if hits is list:
return [hit['postgap.MeSH']['Descriptor']['Name'] for hit in hits]
else:
return [hits['postgap.MeSH']['Descriptor']['Name']]
except:
return []
else:
return []
|
<filename>taw_rlg_rest/TawRlgRest.py
from __future__ import print_function
import requests
import time
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from xlsxwriter.utility import xl_rowcol_to_cell
from xml.etree import ElementTree
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'D:\\1TAW\\client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def remove_non_ascii(string):
return ''.join([i if ord(i) < 128 else ' ' for i in string])
class TawRlgRest(object):
def __init__(self, query_url, result_url, api_key, taw_group_url):
self.taw_group_url = taw_group_url
self.query_url = query_url
self.result_url = result_url
self.player_id_list = []
self.api_key = str(api_key)
self.player_stats = {}
self.tier_names = {
0: 'Unranked',
1: 'BronzeI',
2: 'BronzeII',
3: 'BronzeIII',
4: 'SilverI',
5: 'SilverII',
6: 'SilverIII',
7: 'GoldI',
8: 'GoldII',
9: 'GoldIII',
10: 'PlatinumI',
11: 'PlatinumII',
12: 'PlatinumIII',
13: 'DiamondI',
14: 'DiamondII',
15: 'DiamondIII',
16: 'ChampionI',
17: 'ChampionII',
18: 'ChampionIII',
19: 'Grand Champion'
}
self.division_names = {
0: 'DivisionI',
1: 'DivisionII',
2: 'DivisionIII',
3: 'DivisionIV'
}
def get_taw_player_ids(self):
response = requests.get(self.taw_group_url)
tree = ElementTree.ElementTree(ElementTree.fromstring(response.content))
root = tree.getroot()[6]
for memberID in root.iter('steamID64'):
self.player_id_list.append(memberID.text)
def retrieve_player_stats(self):
for player_id in self.player_id_list:
print(player_id)
params = {'unique_id': player_id, 'platform_id': '1'}
headers = {'Authorization': self.api_key}
response = requests.get(self.query_url, params=params, headers=headers)
if response.status_code == requests.codes.ok:
data = response.json()
self.player_stats.setdefault(
data['uniqueId'], {'overall_stats': [data['displayName'],
data['profileUrl'],
data['stats']['wins'],
data['stats']['goals'],
data['stats']['mvps'],
data['stats']['saves'],
data['stats']['shots'],
data['stats']['assists']],
'1v1': {'MMR': 0, 'Matches': 0, 'Tier': 0, 'Division': 0},
'2v2': {'MMR': 0, 'Matches': 0, 'Tier': 0, 'Division': 0},
'Solo3v3': {'MMR': 0, 'Matches': 0, 'Tier': 0, 'Division': 0},
'Standard3v3': {'MMR': 0, 'Matches': 0, 'Tier': 0, 'Division': 0}}
)
if len(data['rankedSeasons']) > 0:
max_season = max([int(x) for x in data['rankedSeasons'].keys()])
if max_season > 3:
if '10' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['1v1']['MMR'] = data['rankedSeasons'][str(max_season)]['10']['rankPoints']
self.player_stats[data['uniqueId']]['1v1']['Matches'] = data['rankedSeasons'][str(max_season)]['10']['matchesPlayed']
self.player_stats[data['uniqueId']]['1v1']['Tier'] = data['rankedSeasons'][str(max_season)]['10']['tier']
self.player_stats[data['uniqueId']]['1v1']['Division'] = data['rankedSeasons'][str(max_season)]['10']['division']
if '11' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['2v2']['MMR'] = data['rankedSeasons'][str(max_season)]['11']['rankPoints']
self.player_stats[data['uniqueId']]['2v2']['Matches'] = data['rankedSeasons'][str(max_season)]['11']['matchesPlayed']
self.player_stats[data['uniqueId']]['2v2']['Tier'] = data['rankedSeasons'][str(max_season)]['11']['tier']
self.player_stats[data['uniqueId']]['2v2']['Division'] = data['rankedSeasons'][str(max_season)]['11']['division']
if '12' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['Solo3v3']['MMR'] = data['rankedSeasons'][str(max_season)]['12']['rankPoints']
self.player_stats[data['uniqueId']]['Solo3v3']['Matches'] = data['rankedSeasons'][str(max_season)]['12']['matchesPlayed']
self.player_stats[data['uniqueId']]['Solo3v3']['Tier'] = data['rankedSeasons'][str(max_season)]['12']['tier']
self.player_stats[data['uniqueId']]['Solo3v3']['Division'] = data['rankedSeasons'][str(max_season)]['12']['division']
if '13' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['Standard3v3']['MMR'] = data['rankedSeasons'][str(max_season)]['13']['rankPoints']
self.player_stats[data['uniqueId']]['Standard3v3']['Matches'] = data['rankedSeasons'][str(max_season)]['13']['matchesPlayed']
self.player_stats[data['uniqueId']]['Standard3v3']['Tier'] = data['rankedSeasons'][str(max_season)]['13']['tier']
self.player_stats[data['uniqueId']]['Standard3v3']['Division'] = data['rankedSeasons'][str(max_season)]['13']['division']
else:
if '10' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['1v1']['MMR'] = data['rankedSeasons'][str(max_season)]['10']['rankPoints']
if '11' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['2v2']['MMR'] = data['rankedSeasons'][str(max_season)]['11']['rankPoints']
if '12' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['Solo3v3']['MMR'] = data['rankedSeasons'][str(max_season)]['12']['rankPoints']
if '13' in data['rankedSeasons'][str(max_season)]:
self.player_stats[data['uniqueId']]['Standard3v3']['MMR'] = data['rankedSeasons'][str(max_season)]['13']['rankPoints']
else:
print('Error: status code of {} for player {}'.format(response.status_code, player_id))
time.sleep(0.75)
def update_local_player_stats(self):
sheet_headers = ['\"SteamName', 'SteamProfileLink', 'TrackerLink',
'1v1 Matches', '1v1 MMR', '1v1 Tier', '1v1 Division',
'2v2 Matches', '2v2 MMR', '2v2 Tier', '2v2 Division',
'Solo 3v3 Matches', 'Solo 3v3 MMR', 'Solo 3v3 Tier', 'Solo 3v3 Division',
'Standard 3v3 Matches', 'Standard 3v3 MMR', 'Standard 3v3 Tier', 'Standard 3v3 Division',
'Wins', 'Goals', 'MVPs', 'Saves', 'Shots', 'Assists\"']
data = [sheet_headers]
for k, d in self.player_stats.items():
row_data = ['\"' + str(d['overall_stats'][0]),
'=HYPERLINK(\"\"http://steamcommunity.com/profiles/{}\"\", \"\"Steam Profile\"\")'.format(k),
'=HYPERLINK(\"\"{}\"\", \"\"Tracker Profile\"\")'.format(d['overall_stats'][1]),
d['1v1']['Matches'], d['1v1']['MMR'], self.tier_names[d['1v1']['Tier']],
self.division_names[d['1v1']['Division']],
d['2v2']['Matches'], d['2v2']['MMR'], self.tier_names[d['2v2']['Tier']],
self.division_names[d['2v2']['Division']],
d['Solo3v3']['Matches'], d['Solo3v3']['MMR'], self.tier_names[d['Solo3v3']['Tier']],
self.division_names[d['Solo3v3']['Division']],
d['Standard3v3']['Matches'], d['Standard3v3']['MMR'], self.tier_names[d['Standard3v3']['Tier']],
self.division_names[d['Standard3v3']['Division']],
d['overall_stats'][2], d['overall_stats'][3], d['overall_stats'][4], d['overall_stats'][5],
d['overall_stats'][6], str(d['overall_stats'][7]) + '\"']
if row_data[5] == 'Unranked':
row_data[6] = 'Unranked'
if row_data[9] == 'Unranked':
row_data[10] = 'Unranked'
if row_data[13] == 'Unranked':
row_data[14] = 'Unranked'
if row_data[17] == 'Unranked':
row_data[18] = 'Unranked'
data.append(row_data)
with open('D:\\1TAW\\current_player_stats.csv', 'w') as out:
for row in data:
try:
out.write('\",\"'.join([str(x) for x in row]) + '\n')
except UnicodeEncodeError as e:
print(row, e)
for i, x in enumerate(row):
row[i] = remove_non_ascii(str(x))
out.write('\",\"'.join(row) + '\n')
def update_remote_player_stats(self):
# Not yet working
sheet_headers = ['SteamName', 'SteamProfileLink', 'TrackerLink',
'1v1 Matches', '1v1 MMR', '1v1 Tier', '1v1 Division',
'2v2 Matches', '2v2 MMR', '2v2 Tier', '2v2 Division',
'Solo 3v3 Matches', 'Solo 3v3 MMR', 'Solo 3v3 Tier', 'Solo 3v3 Division',
'Standard 3v3 Matches', 'Standard 3v3 MMR', 'Standard 3v3 Tier', 'Standard 3v3 Division',
'Wins', 'Goals', 'MVPs', 'Saves', 'Shots', 'Assists']
data = [sheet_headers]
for k, d in self.player_stats.items():
row_data = [d['overall_stats'][0], k, '=HYPERLINK({})'.format(d['overall_stats'][1]),
d['1v1']['Matches'], d['1v1']['MMR'], self.tier_names[d['1v1']['Tier']], self.division_names[d['1v1']['Division']],
d['2v2']['Matches'], d['2v2']['MMR'], self.tier_names[d['2v2']['Tier']], self.division_names[d['2v2']['Division']],
d['Solo3v3']['Matches'], d['Solo3v3']['MMR'], self.tier_names[d['Solo3v3']['Tier']], self.division_names[d['Solo3v3']['Division']],
d['Standard3v3']['Matches'], d['Standard3v3']['MMR'], self.tier_names[d['Standard3v3']['Tier']], self.division_names[d['Standard3v3']['Division']],
d['overall_stats'][2], d['overall_stats'][3], d['overall_stats'][4], d['overall_stats'][5],
d['overall_stats'][6], d['overall_stats'][7]]
data.append(row_data)
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
cell_range_end = xl_rowcol_to_cell(len(self.player_stats)+1, 25)
rangeName = 'Sheet1!A1:' + cell_range_end
result = service.spreadsheets().values().update(
spreadsheetId=self.result_url, range=rangeName, body=data).execute()
#values = result.update('values', data)
# if not values:
# print('No data found.')
# else:
# print('Name, Major:')
# for row in values:
# # Print columns A and E, which correspond to indices 0 and 4.
# print('%s, %s' % (row[0], row[4]))
if __name__ == '__main__':
with open('D:\\1TAW\\api.txt', 'r') as f:
k = f.read().strip()
with open('D:\\1TAW\\spreadsheet_id.txt', 'r') as f:
sid = f.read().strip()
tr = TawRlgRest(r'https://api.rocketleaguestats.com/v1/player', sid, k,
r'http://steamcommunity.com/groups/TAWRLG/memberslistxml?xml=1') |
#!/usr/bin/env python
import os
import time
import traceback
from argparse import ArgumentParser
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from utils import (configure_logging, load_checkpoint, load_image_names,
load_images, load_model)
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--eval-dir", type=str, required=True,
help="Directory of the evaluation to test (output)")
parser.add_argument("--model-name", type=str, required=True,
help="Name of the model to instantiate")
parser.add_argument("--epoch", type=int, required=True,
help="The epoch of the model to load")
parser.add_argument("--test-data", type=str, required=True,
help="Directory of the data to test on")
parser.add_argument("--match-pattern", type=str, default=None,
help="Pattern for files to match")
parser.add_argument("--batch-size", type=int, default=16,
help="The number of samples in one batch")
parser.add_argument("--score-name", type=str, default=None,
help="Optional, name for the file to store scores")
parser.add_argument("--cycle", action="store_true",
help="Specify if the second generator of a CycleGAN should be used")
return parser.parse_args()
def main(start_time):
tf.enable_eager_execution()
# handle arguments and config
args = parse_arguments()
args.start_time = start_time
args.noise_dimensions = None
args.has_colored_target = False
args.output_dir = os.path.join("output", args.eval_dir)
if not os.path.exists(args.output_dir):
args.output_dir = os.path.join("old-output", args.eval_dir)
args.checkpoint_dir = os.path.join(args.output_dir, "checkpoints")
tf.logging.info("Args: {}".format(args))
model = load_model(args)
generator = model.get_generator()
if args.cycle:
tf.logging.warning("Loading second generator of CycleGAN")
load_checkpoint(args, checkpoint_number=(args.epoch+24)//25, second_generator=generator)
else:
load_checkpoint(args, checkpoint_number=(args.epoch+24)//25, generator=generator)
image_names = load_image_names(args.test_data, args.match_pattern)
input_images = load_images(image_names, args.test_data, "image")
target_images = load_images(image_names, args.test_data, "patho")
target_data_set = tf.data.Dataset.from_tensor_slices((input_images, target_images)).batch(args.batch_size)
tf.logging.info("Computing segmentation score over {} images in batches of {}".format(len(target_images), args.batch_size))
scores = list()
all_tp = 0
all_fp = 0
all_fn = 0
with tqdm(total=len(target_images)) as pbar:
for batch in target_data_set:
inputs, targets = batch
pbar.update(inputs.shape[0].value)
generated_images = generator(inputs, training=False)
predicted = tf.cast(generated_images >= 0, tf.uint8)
actual = tf.cast(targets >= 0, tf.uint8)
tp = tf.count_nonzero(predicted * actual)
fp = tf.count_nonzero(predicted * (actual - 1))
fn = tf.count_nonzero((predicted - 1) * actual)
all_tp += tp
all_fp += fp
all_fn += fn
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = (2 * precision * recall / (precision + recall)).numpy()
if not np.isnan(f1):
scores.append(f1)
precision = all_tp / (all_tp + all_fp)
recall = all_tp / (all_tp + all_fn)
f1 = (2 * precision * recall / (precision + recall)).numpy()
tf.logging.info("Segmentation score: {:.3f} ({:.3}+-{:.3})".format(f1, np.mean(scores), np.std(scores)))
tf.logging.info("TP {}, FP {}, FN {}".format(all_tp, all_fp, all_fn))
if args.score_name:
with open(os.path.join(args.output_dir, args.score_name), "w") as fh:
fh.write("{}\n".format(",".join([str(s) for s in scores])))
if __name__ == "__main__":
START_TIME = time.time()
np.random.seed(42)
configure_logging()
try:
main(START_TIME)
except Exception as ex:
tf.logging.fatal("Exception occurred: {}".format(traceback.format_exc()))
finally:
tf.logging.info("Finished eval after {:.1f}m".format((time.time() - START_TIME) / 60))
|
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import collections
import itertools
import os
import math
import torch
import time
import ctypes
from copy import deepcopy
from functools import reduce
from fairseq import data, distributed_utils, options, progress_bar, tasks, utils, bleu, tokenizer
from fairseq.fp16_trainer import FP16Trainer
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.sequence_generator import SequenceGenerator
from fairseq.data import dictionary
from mlperf_compliance import mlperf_log
def main(args):
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
torch.cuda.set_device(args.device_id)
from mlperf_compliance.mlperf_log import transformer_print
transformer_print(key=mlperf_log.RUN_CLEAR_CACHES) #before this tag we should run clearing caches on the host
# mlperf compliance synchronization
if args.distributed_world_size > 1:
assert(torch.distributed.is_initialized())
torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0)
torch.cuda.synchronize()
transformer_print(key=mlperf_log.RUN_START)
if args.max_tokens is None:
args.max_tokens = 6000
print(args)
transformer_print(key=mlperf_log.OPT_NAME, value=args.optimizer)
transformer_print(key=mlperf_log.OPT_LR, value=args.lr)
transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA1, value=eval(args.adam_betas)[0])
transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA2, value=eval(args.adam_betas)[1])
transformer_print(key=mlperf_log.OPT_HP_ADAM_EPSILON, value=args.adam_eps)
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
result = torch.cuda.cudart().cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
result = torch.cuda.cudart().cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
torch.manual_seed(args.seed)
transformer_print(key=mlperf_log.RUN_SET_RANDOM_SEED, value=args.seed)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
transformer_print(key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={'alpha':args.lenpen,
'beam_size':args.beam,
'extra_decode_length':args.max_len_b,
'vocab_size':task.target_dictionary.__len__()
}
)
# Load dataset splits
load_dataset_splits(task, ['train', 'valid'])
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
# Build trainer
if args.fp16:
trainer = FP16Trainer(args, task, model, criterion)
else:
if torch.cuda.get_device_capability(0)[0] >= 7:
print('| NOTICE: your device may support faster training with --fp16')
trainer = Trainer(args, task, model, criterion)
if (args.online_eval or args.target_bleu) and not args.remove_bpe:
args.remove_bpe='@@ '
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
transformer_print(key=mlperf_log.INPUT_BATCH_SIZE, value=args.max_tokens)
transformer_print(key=mlperf_log.INPUT_ORDER)
# Initialize dataloader
max_positions = trainer.get_model().max_positions()
# Send a dummy batch to warm the caching allocator
dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
trainer.dummy_train_step(dummy_batch)
# Train until the learning rate gets too small or model reaches target score
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
tgt_bleu = args.target_bleu or math.inf
current_bleu = 0.0
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
ctr = 0
class DummyEpochBatchIterator:
def __init__(self, epoch=0):
self.epoch = epoch
epoch_itr = DummyEpochBatchIterator(0)
transformer_print(key=mlperf_log.TRAIN_LOOP)
while lr >= args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update and current_bleu < tgt_bleu:
transformer_print(key=mlperf_log.TRAIN_EPOCH, value=epoch_itr.epoch)
import time
start = time.time()
epoch_itr = data.EpochBatchIterator(
dataset=task.dataset(args.train_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
epoch=epoch_itr.epoch if ctr is not 0 else 0
)
print("got epoch iterator", time.time() - start)
# Load the latest checkpoint if one is available
if ctr is 0:
load_checkpoint(args, trainer, epoch_itr)
# train for one epoch
start = time.time()
train(args, trainer, task, epoch_itr)
print("epoch time ", time.time() - start)
start = time.time()
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Eval BLEU score
transformer_print(key=mlperf_log.EVAL_START, value=epoch_itr.epoch)
if args.online_eval or (not tgt_bleu is math.inf):
current_bleu = score(args, trainer, task, epoch_itr, args.gen_subset)
transformer_print(key=mlperf_log.EVAL_ACCURACY, value={'epoch':epoch_itr.epoch, 'value':current_bleu})
transformer_print(key=mlperf_log.EVAL_TARGET, value=tgt_bleu)
transformer_print(key=mlperf_log.EVAL_STOP, value=epoch_itr.epoch)
# Only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# Save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
ctr = ctr + 1
print("validation and scoring ", time.time() - start)
train_meter.stop()
transformer_print(key=mlperf_log.RUN_STOP)
transformer_print(key=mlperf_log.RUN_FINAL)
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr()
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
# update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
if args.enable_parallel_backward_allred_opt and update_freq > 1:
raise RuntimeError('--enable-parallel-backward-allred-opt is incompatible with --update-freq > 1')
extra_meters = collections.defaultdict(lambda: AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
num_batches = len(epoch_itr)
#begin = time.time()
#inside = 0
for i, sample in enumerate(progress, start=epoch_itr.iterations_in_epoch):
#newbegin = time.time()
#print("iter time", newbegin - begin, inside, (newbegin - begin - inside)*1000)
#begin = newbegin
if i < num_batches - 1 and (i + 1) % update_freq > 0:
# buffer updates according to --update-freq
trainer.train_step(sample, update_params=False, last_step=(i == len(itr)-1))
continue
else:
log_output = trainer.train_step(sample, update_params=True, last_step=(i == len(itr)-1))
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue # these are already logged above
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
if args.profile is not None and i == args.profile:
import sys
sys.exit()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0:
valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
#end = time.time()
#inside = end - begin
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
# reset training meters
for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'clip']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=trainer.get_model().max_positions(),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
return valid_losses
def score(args, trainer, task, epoch_itr, subset):
begin = time.time()
if not subset in task.datasets.keys():
task.load_dataset(subset)
src_dict = deepcopy(task.source_dictionary) # This is necessary, generation of translations
tgt_dict = deepcopy(task.target_dictionary) # alters target dictionary messing up with the rest of training
model = trainer.get_model()
#mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE, value=task.dataset(subset).__len__())
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=task.dataset(subset),
max_tokens=None,
max_sentences=max(8,min(math.ceil(1024/args.distributed_world_size),128)),
max_positions=model.max_positions(),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
translator = SequenceGenerator(
[model], tgt_dict, beam_size=args.beam,
stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen, unk_penalty=args.unkpen,
sampling=args.sampling, sampling_topk=args.sampling_topk, minlen=args.min_len,
)
# Generate and compute BLEU
dict = dictionary.Dictionary()
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
num_sentences = 0
has_target = True
if args.log_translations:
log = open(os.path.join(args.save_dir, 'translations_epoch{}_{}'.format(epoch_itr.epoch, args.distributed_rank)), 'w+')
with progress_bar.build_progress_bar(args, itr) as progress:
translations = translator.generate_batched_itr(
progress, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b,
cuda=True, timer=gen_timer, prefix_size=args.prefix_size,
)
wps_meter = TimeMeter()
for sample_id, src_tokens, target_tokens, hypos in translations:
# Process input and grount truth
has_target = target_tokens is not None
target_tokens = target_tokens.int().cpu() if has_target else None
src_str = src_dict.string(src_tokens, args.remove_bpe)
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if args.log_translations:
log.write('S-{}\t{}\n'.format(sample_id, src_str))
if has_target:
log.write('T-{}\t{}\n'.format(sample_id, target_str))
# Process top predictions
for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict = None,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe
)
if args.log_translations:
log.write('H-{}\t{}\t{}\n'.format(sample_id, hypo['score'], hypo_str))
# log.write(str(hypo_tokens))
log.write('P-{}\t{}\n'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
# Score only the top hypothesis
if has_target and i==0:
sys_tok = tokenizer.Tokenizer.tokenize((hypo_str.lower() if args.ignore_case else hypo_str), dict)
ref_tok = tokenizer.Tokenizer.tokenize((target_str.lower() if args.ignore_case else target_str), dict)
scorer.add(ref_tok, sys_tok)
wps_meter.update(src_tokens.size(0))
progress.log({'wps':round(wps_meter.avg)})
num_sentences += 1
if args.distributed_world_size > 1:
_all_gather_bleu_scorer(scorer)
if args.log_translations:
log.close()
if gen_timer.sum != 0:
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1./gen_timer.avg))
if has_target:
print('| Generate {} with beam={}: {}'.format(subset, args.beam, scorer.result_string()))
print('| Eval completed in: {:.2f}s'.format(time.time()-begin))
return scorer.score(order=4)
def _all_gather_bleu_scorer(scorer):
stats = distributed_utils.all_gather_list(scorer.stat)
bleu_stat = bleu.BleuStat()
bleu_stat.reflen = reduce(lambda x,y: x+y, [s.reflen for s in stats])
bleu_stat.predlen = reduce(lambda x,y: x+y, [s.predlen for s in stats])
bleu_stat.match1 = reduce(lambda x,y: x+y, [s.match1 for s in stats])
bleu_stat.count1 = reduce(lambda x,y: x+y, [s.count1 for s in stats])
bleu_stat.match2 = reduce(lambda x,y: x+y, [s.match2 for s in stats])
bleu_stat.count2 = reduce(lambda x,y: x+y, [s.count2 for s in stats])
bleu_stat.match3 = reduce(lambda x,y: x+y, [s.match3 for s in stats])
bleu_stat.count3 = reduce(lambda x,y: x+y, [s.count3 for s in stats])
bleu_stat.match4 = reduce(lambda x,y: x+y, [s.match4 for s in stats])
bleu_stat.count4 = reduce(lambda x,y: x+y, [s.count4 for s in stats])
scorer.stat = bleu_stat
def get_valid_stats(trainer):
stats = collections.OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(save_checkpoint, 'best'):
stats['best'] = min(save_checkpoint.best, stats['valid_loss'])
return stats
def get_perplexity(loss):
try:
return '{:.2f}'.format(math.pow(2, loss))
except OverflowError:
return float('inf')
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
end_of_epoch and not args.no_epoch_checkpoints and
epoch % args.save_interval == 0
)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
not end_of_epoch and args.save_interval_updates > 0 and
updates % args.save_interval_updates == 0
)
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'best': save_checkpoint.best,
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path)
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
def load_dataset_splits(task, splits):
for split in splits:
if split == 'train':
task.load_dataset(split, combine=True)
else:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
try:
task.load_dataset(split_k, combine=False)
except FileNotFoundError as e:
if k > 0:
break
raise e
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_port > 0 or args.distributed_init_method is not None:
from distributed_train import main as distributed_main
distributed_main(args)
elif args.distributed_world_size > 1:
from multiprocessing_train import main as multiprocessing_main
multiprocessing_main(args)
else:
main(args)
|
import gym
from gp_gym import gen_init_pop, select, run_ep_while_not_done, IFLTE
class CartPole:
"""
This class implements a GP agent for the CartPole-v0 gym environment.
"""
def __init__(self, info):
self.env_name = info["env_name"]
# Program structure
self.p_type = info["program_type"]
self.T = info["T"]
self.F = info["F"]
self.max_depth = info["max_depth"]
self.t_rate = info["term_growth_rate"]
self.method = info["method"]
# GP parameters
self.pop_size = info["pop_size"]
self.num_eps = info["num_eps"]
self.max_gens = info["max_gens"]
self.term_fit = info["term_fit"]
def train(self):
best_program = None
# Generate initial population
current_pop = gen_init_pop(self.pop_size, self.T, self.F, self.max_depth, self.method, self.t_rate, self.p_type)
# Evolution loop
gen_idx = 0
while (not best_program) and (gen_idx < self.max_gens):
# Evaluate population fitness
fit_scores = self.batch_fit(current_pop, self.num_eps)
max_fitness = max(fit_scores)
# Check termination criteria
if (max_fitness >= self.term_fit) or (gen_idx >= self.max_gens - 1):
best_program = current_pop[fit_scores.index(max_fitness)]
# Evolve next generation
else:
current_pop = [select(current_pop, fit_scores) for _ in range(self.pop_size)]
gen_idx += 1
return best_program
def batch_fit(self, pop, num_eps, render=False):
"""
Computes the average fitness score (over a specified number of episodes)
of every program in a population.
pop: population of programs
num_eps: number of episodes to evaluate each program on
"""
env = gym.make(self.env_name)
scores = [self.fit(p, num_eps, env=env, render=render) for p in pop]
env.close()
return scores
def fit(self, p, num_eps, env=None, render=False):
"""
Computes the average fitness score of a program over a
specified number of episodes.
env: gym environment object
p: program to evaluate
num_eps: number of episodes to run the program for
return: fitness score (float)
"""
score = 0.0
if not env:
env = gym.make(self.env_name)
for _ in range(num_eps):
score += run_ep_while_not_done(env, p, self.eval, render=render)
return score/num_eps
def eval(self, p, obs):
"""
Interprets a program and evaluates it to an action
given an observation from the environment.
p: program to interpret
obs: observation : [float]
return: action {0, 1}
"""
result = 0
# Terminals
if type(p) is not list:
terminal = self.T[p]
# Actions
if terminal["type"] == "Action":
result = int(p)
# Observation variables
elif terminal["token"] == "ObsVar":
result = obs[terminal["obs_index"]]
# Constants
elif terminal["token"] == "Constant": # constants
if terminal["type"] == "Float": # floats
result = float(p)
# Functions
else:
fname = p[0]
args = [self.eval(p[i+1], obs) for i in range(self.F[fname]["arity"])]
# IFLTE
if fname == "IFLTE":
result = IFLTE(args)
return result
|
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
import numpy as np
from scipy.sparse import csr_matrix
import array as ar
from multiprocessing import Process
import time
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
import svml
from neoml.KMeans import KMeans as KMeansNeo
from sklearn.cluster import KMeans as KMeansSk
from neoml.Hierarchical import Hierarchical as HierarchicalNeo
from sklearn.cluster import AgglomerativeClustering as HierarchicalSk
from neoml.Linear import LinearClassifier as LinearBinaryNeo
from sklearn.linear_model import LogisticRegression as LinearBinarySk
from neoml.SVM import SvmClassifier as SvmNeo
from sklearn.svm import SVC as SvmSk
from neoml.OneVersusAll import OneVersusAllClassifier as OneVsAllNeo
from sklearn.multiclass import OneVsRestClassifier as OneVsAllSk
from neoml.DecisionTree import DecisionTreeClassifier as DecisionTreeClNeo
from sklearn.tree import DecisionTreeClassifier as DecisionTreeClSk
from neoml.GradientBoost import GradientBoostClassifier as GradientBoostClNeo
from sklearn.ensemble import GradientBoostingClassifier as GradientBoostClSk
def neoTrainKMeans( X, y, w ):
return KMeansNeo( max_iteration_count=50, init_cluster_count=3 ).clusterize( X, w )
def skTrainKMeans( X, y, w, centers_centers ):
return KMeansSk( max_iter=50, n_clusters=3, algorithm="full", n_init=1, random_state=0, init=centers_centers ).fit( X, sample_weight=w )
def neoTrainHierarchical( X, y, w ):
return HierarchicalNeo( min_cluster_count=3, max_cluster_distance=100 ).clusterize( X, w )
def skTrainHierarchical( X, y, w ):
return HierarchicalSk( linkage='average', n_clusters=None, distance_threshold=100 ).fit( X )
def neoTrainLinearBinaryClassifier( X, y, w ):
return LinearBinaryNeo( loss='binomial', l1_reg=0, max_iteration_count=100 ).train( X, y, w )
def skTrainLinearBinaryClassifier( X, y, w ):
return LinearBinarySk( random_state=0, penalty='l2', solver='liblinear', max_iter=100 ).fit( X, y, w )
def neoTrainLinearSVM( X, y, w ):
return SvmNeo( kernel='linear', max_iteration_count=1000 ).train( X, y, w )
def skTrainLinearSVM( X, y, w ):
return SvmSk( kernel='linear', max_iter=1000 ).fit( X, y, w )
def neoTrainRBFSVM( X, y, w ):
return SvmNeo( kernel='rbf', max_iteration_count=1000 ).train( X, y, w )
def skTrainRBFSVM( X, y, w ):
return SvmSk( kernel='rbf', max_iter=1000 ).fit( X, y, w )
def neoTrainOneVsAllLinearSVM( X, y, w ):
return OneVsAllNeo( SvmNeo( kernel='linear', max_iteration_count=1000 ) ).train( X, y, w )
def skTrainOneVsAllLinearSVM( X, y, w ):
return OneVsAllSk( SvmSk( kernel='linear', max_iter=1000 ) ).fit( X, y )
def neoTrainGradientBoostingClassifier( X, y, w ):
return GradientBoostClNeo( loss ='binomial', iteration_count=10 ).train( X, y, w )
def skTrainGradientBoostingClassifier( X, y, w ):
return GradientBoostClSk( loss='deviance', n_estimators=10, random_state=0 ).fit( X, y, w )
def neoTrainDecisionTreeClassifier( X, y, w ):
return DecisionTreeClNeo( criterion='gini', min_split_size=2, min_subset_part=0.10, min_subset_size=128, max_tree_depth=10 ).train( X, y, w )
def skTrainDecisionTreeClassifier( X, y, w ):
return DecisionTreeClSk( criterion='gini', min_samples_split=2, min_weight_fraction_leaf=0.10, min_samples_leaf=128, max_depth=10 ).fit( X, y, w )
###################################################################################################################
def neoScoreClassifier( classifierModel, X_test, y_test ):
correct = sum( 1 for y, probs in zip( y_test, classifierModel.classify( X_test ) ) if y == np.argmax( probs ) )
print( f"score: {float( correct ) / len( y_test )}" )
def neoScoreClusterizer( clusterizationRes, X_test, y_test ):
correct = sum( 1 for y, label in zip( y_test, clusterizationRes[0] ) if y == label )
print( f"score: {float( correct ) / len( y_test )}" )
def skScoreClassifier( classifier, X_test, y_test ):
score = classifier.score( X_test, y_test )
print( f"score: {score}" )
def skScoreClusterizer( clusterizer, X_test, y_test ):
correct = sum( 1 for y, label in zip( y_test, clusterizer.labels_ ) if y == label )
print( f"score: {float( correct ) / len( y_test )}" )
ClassificationAlgorithms = {
"Linear binary NeoML": [neoTrainLinearBinaryClassifier, neoScoreClassifier],
"Linear binary sklearn": [skTrainLinearBinaryClassifier, skScoreClassifier],
"Linear SVM NeoML": [neoTrainLinearSVM, neoScoreClassifier],
"Linear SVM sklearn": [skTrainLinearSVM, skScoreClassifier],
"RBF SVM NeoML": [neoTrainRBFSVM, neoScoreClassifier],
"RBF SVM sklearn": [skTrainRBFSVM, skScoreClassifier],
"OneVsAll with Linear SVM NeoML": [neoTrainOneVsAllLinearSVM, neoScoreClassifier],
"OneVsAll with Linear SVM sklearn": [skTrainOneVsAllLinearSVM, skScoreClassifier],
"Gradient boosting classifier NeoML": [neoTrainGradientBoostingClassifier, neoScoreClassifier],
"Gradient boosting classifier sklearn": [skTrainGradientBoostingClassifier, skScoreClassifier],
"Decision tree classifier NeoML": [neoTrainDecisionTreeClassifier, neoScoreClassifier],
"Decision tree classifier sklearn": [skTrainDecisionTreeClassifier, skScoreClassifier]
}
ClusteringAlgorithms = {
"KMeans NeoML": [neoTrainKMeans, neoScoreClusterizer],
"KMeans sklearn": [skTrainKMeans, skScoreClusterizer],
"Hierarchical NeoML": [neoTrainHierarchical, neoScoreClusterizer],
"Hierarchical sklearn": [skTrainHierarchical, skScoreClusterizer]
}
def timeoutFunc( timeout, func ):
# p = Process( target=func )
# p.start()
# p.join( timeout )
# if p.is_alive():
# print( "end of time" )
# p.terminate()
# p.join()
# return False
func()
return True
def measure( func, n_runs, timeout=None ):
tic = time.perf_counter()
for i in range(0, n_runs):
if timeoutFunc( timeout, func ) == False:
return
print("time: {0}".format( (time.perf_counter() - tic) / n_runs ))
TestFunctions = {
"measure_train_time": lambda trainFunc, scoreFunc, n_runs=1, timeout=None: measure( trainFunc, n_runs, timeout ),
"calculate_score": lambda trainFunc, scoreFunc, n_runs=1, timeout=None: timeoutFunc( timeout, scoreFunc )
}
def runTests( X_train, X_test, y_train, y_test, w_train=None, w_test=None, n_runs=1, timeout=None, algorithms=ClassificationAlgorithms, testFuncs=TestFunctions ):
if w_train is None:
w_train = np.ones( len(y_train), dtype=float )
if w_test is None:
w_test = np.ones( len(y_test), dtype=float )
for desc, algo in algorithms.items():
print( "\n{0} {1}".format(datetime.now().time(), desc), flush=True )
if desc == "KMeans sklearn":
_, centers, _ = neoTrainKMeans( X, y, w )
trainFunc = lambda: algo[0]( X_train, y_train, w_train, centers )
else:
trainFunc = lambda: algo[0]( X_train, y_train, w_train )
scoreFunc = lambda: algo[1]( trainFunc(), X_test, y_test )
for testFuncDesc, testFunc in testFuncs.items():
print( testFuncDesc, flush=True )
testFunc( trainFunc, scoreFunc, n_runs, timeout )
def splitDataAndRunTests( X, y, w=None, n_runs=1, timeout=None, algorithms=ClassificationAlgorithms, testFuncs=TestFunctions ):
if w is None:
w = np.ones( len(y), dtype=float )
X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(X, y, w, test_size=.2)
X_train_sparse = csr_matrix( X_train, dtype=np.float32 )
X_test_sparse = csr_matrix( X_test, dtype=np.float32 )
runTests( X_train, X_test, y_train, y_test, w_train, w_test, n_runs=n_runs, timeout=timeout, algorithms=algorithms, testFuncs=testFuncs )
def createClassificationDatasetAndTest( n_samples=5000, n_features=20, n_classes=2, n_informative=2, n_runs=1000, timeout=2, algorithms=ClassificationAlgorithms, testFuncs=TestFunctions ):
print(f"\nTesting on the dataset = make_classification( n_samples={n_samples}, n_features={n_features}, n_classes={n_classes} ), {n_runs} runs with timeout {timeout}s", flush=True)
X, y = make_classification( n_samples=n_samples, n_features=n_features, n_classes=n_classes, n_informative=n_informative )
splitDataAndRunTests( X, y, n_runs=n_runs, timeout=timeout, algorithms=algorithms, testFuncs=testFuncs )
def createClusteringDatasetAndTest( n_samples=5000, n_features=20, centers=3, n_runs=1000, timeout=2, algorithms=ClusteringAlgorithms, testFuncs=TestFunctions ):
print(f"\nTesting on the dataset = make_blobs( n_samples={n_samples}, n_features={n_features}, centers={centers} ), {n_runs} runs with timeout {timeout}s", flush=True)
X, y = make_blobs( n_samples=n_samples, n_features=n_features, centers=centers )
splitDataAndRunTests( X, y, n_runs=n_runs, timeout=timeout, algorithms=algorithms, testFuncs=testFuncs )
def testOnInternalClassificationDataset( dataset, n_runs=10, timeout=500, algorithms=ClassificationAlgorithms, testFuncs=TestFunctions ):
print( f"Testing on internal dataset {dataset} with timeout={timeout}, n_runs={n_runs}", flush=True )
X, y = svml.read( dataset + ".train.svml" )
X_test, y_test = svml.read( dataset + ".test.svml", min_feature_count=X.shape[1] )
runTests( X, X_test, y, y_test, n_runs=n_runs, timeout=timeout, algorithms=algorithms, testFuncs=testFuncs )
myAlgos = ["Linear SVM NeoML", "Linear SVM sklearn"]
algosToTestBinary = ["Linear binary NeoML", "Linear binary sklearn", "Linear SVM NeoML", "Linear SVM sklearn", "RBF SVM NeoML", "RBF SVM sklearn", "Decision tree classifier NeoML", "Decision tree classifier sklearn"]
algosToTestMulti = ["OneVsAll with Linear SVM NeoML", "OneVsAll with Linear SVM sklearn", "Gradient boosting classifier NeoML", "Gradient boosting classifier sklearn"]
algorithmsMy={ key: ClassificationAlgorithms[key] for key in myAlgos }
algorithmsBinary={ key: ClassificationAlgorithms[key] for key in algosToTestBinary }
algorithmsMulti={ key: ClassificationAlgorithms[key] for key in algosToTestMulti }
testFuncNames = [ "measure_train_time", "calculate_score" ]
testFuncs={ key: TestFunctions[key] for key in testFuncNames }
#testOnInternalClassificationDataset( "data/news20bin", n_runs=100, timeout=2, algorithms=algorithmsBinary )
#testOnInternalClassificationDataset( "data/news20", algorithms=algorithmsMulti )
#print("Binary classification (small, medium, big):")
createClassificationDatasetAndTest( algorithms=algorithmsMy, n_runs=1 )
#createClassificationDatasetAndTest( n_samples=20000, n_runs=100, timeout=50 )
#createClassificationDatasetAndTest( n_samples=100000, n_runs=5, timeout=500 )
#
#print("\n\nMulti-class classification (small, medium, big):")
#createClassificationDatasetAndTest( n_classes=10, n_informative=5 )
#createClassificationDatasetAndTest( n_samples=20000, n_classes=10, n_informative=5, n_runs=100, timeout=50 )
#createClassificationDatasetAndTest( n_samples=100000, n_classes=10, n_informative=5, n_runs=5, timeout=500 )
#
#print("Clustering (small, medium, big):")
#createClusteringDatasetAndTest()
#createClusteringDatasetAndTest( n_samples=20000, n_runs=100, timeout=50 )
#createClusteringDatasetAndTest( n_samples=100000, n_runs=5, timeout=500 )
#createClassificationDatasetAndTest( n_runs=5, timeout=20, algorithms=algorithms, testFuncs=testFuncs )
#createClassificationDatasetAndTest( n_runs=1, timeout=100, n_samples=20000, algorithms=algorithms, testFuncs=testFuncs )
#createClassificationDatasetAndTest( n_runs=5, timeout=20, n_classes=10, n_informative=5, algorithms=algorithms, testFuncs=testFuncs )
#algosToTest = ["Hierarchical NeoML", "Hierarchical sklearn"]
#testFuncs = { "measure_train_time", "calculate_score" }
#createClusteringDatasetAndTest( n_samples=2000, n_runs=10, testFuncs={ key: TestFunctions[key] for key in testFuncs } )
print("Done.")
|
import shutil
import unittest
from mock import MagicMock
import bilby
class TestUltranest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.likelihood = MagicMock()
self.priors = bilby.core.prior.PriorDict(
dict(a=bilby.core.prior.Uniform(0, 1),
b=bilby.core.prior.Uniform(0, 1)))
self.priors["a"] = bilby.core.prior.Prior(boundary="periodic")
self.priors["b"] = bilby.core.prior.Prior(boundary="reflective")
self.sampler = bilby.core.sampler.Ultranest(self.likelihood, self.priors,
outdir="outdir", label="label",
use_ratio=False, plot=False,
skip_import_verification=True)
def tearDown(self):
del self.likelihood
del self.priors
del self.sampler
shutil.rmtree("outdir")
def test_default_kwargs(self):
expected = dict(
resume=True,
show_status=True,
num_live_points=None,
wrapped_params=None,
derived_param_names=None,
run_num=None,
vectorized=False,
num_test_samples=2,
draw_multiple=True,
num_bootstraps=30,
update_interval_iter=None,
update_interval_ncall=None,
log_interval=None,
dlogz=None,
max_iters=None,
update_interval_iter_fraction=0.2,
viz_callback=None,
dKL=0.5,
frac_remain=0.01,
Lepsilon=0.001,
min_ess=400,
max_ncalls=None,
max_num_improvement_loops=-1,
min_num_live_points=400,
cluster_num_live_points=40,
step_sampler=None,
)
self.assertListEqual([1, 0], self.sampler.kwargs["wrapped_params"]) # Check this separately
self.sampler.kwargs["wrapped_params"] = None # The dict comparison can't handle lists
self.sampler.kwargs["derived_param_names"] = None
self.sampler.kwargs["viz_callback"] = None
self.assertDictEqual(expected, self.sampler.kwargs)
def test_translate_kwargs(self):
expected = dict(
resume=True,
show_status=True,
num_live_points=123,
wrapped_params=None,
derived_param_names=None,
run_num=None,
vectorized=False,
num_test_samples=2,
draw_multiple=True,
num_bootstraps=30,
update_interval_iter=None,
update_interval_ncall=None,
log_interval=None,
dlogz=None,
max_iters=None,
update_interval_iter_fraction=0.2,
viz_callback=None,
dKL=0.5,
frac_remain=0.01,
Lepsilon=0.001,
min_ess=400,
max_ncalls=None,
max_num_improvement_loops=-1,
min_num_live_points=400,
cluster_num_live_points=40,
step_sampler=None,
)
for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs:
new_kwargs = self.sampler.kwargs.copy()
del new_kwargs['num_live_points']
new_kwargs[equiv] = 123
self.sampler.kwargs = new_kwargs
self.sampler.kwargs["wrapped_params"] = None
self.sampler.kwargs["derived_param_names"] = None
self.sampler.kwargs["viz_callback"] = None
self.assertDictEqual(expected, self.sampler.kwargs)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import math
# (numpy array格式)稠密矩阵 转 稀疏矩阵
def sparse(data):
if type(data) == list:
data = np.array(data,ndmin=2)
elif data.ndim == 1:
data = np.array(data,ndmin=2)
data = data.transpose()
sparse_matrix = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if data[i][j] != 0:
sparse_matrix.append((i,j,data[i][j]))
return sparse_matrix
'''
(numpy array格式)稀疏矩阵 转 稠密矩阵
注:该函数只支持一行转为结果为一行的稠密矩阵
只支持以行数小的在前面这种格式的转换,
例如支持[(0,0,0), (1, 0, 0)], 不支持[(1,0,0), (0,0,0)],
即输入稀疏矩阵的排序规则需要符合order by row,col
'''
def dense(data):
dense_matrix = np.array([])
tmp = []
for i in range(len(data)):
tmp.append(data[i][2])
dense_matrix = np.insert(tmp, 0, values=dense_matrix, axis=0)
return dense_matrix
'''
将元素全为MatrixEntrytoArray类型的 RDD 转为 list
'''
def MatrixEntrytoArray(data):
data = data.collect()
data_new = []
for i in range(len(data)):
# list(map(int,str(data[i]).replace('MatrixEntry','')[1:-1].split(',') )) #这样float型转换不了
xyz = str(data[i]).replace('MatrixEntry','')[1:-1].split(',')
xyz = [intorfloat(i) for i in xyz] #强制int型
data_new.append( tuple(xyz) )
return data_new
'''
字符串转为int/float/double类型,优先int
输入:
data(字符串类型)
输出:
data(int/float型)
'''
def intorfloat(data):
if type(data) == int:
data = int(data)
elif type(data) == float and int(data) - data == 0:
data = int(data)
else:
data = float(data)
return data
'''
取list中每个元组的首个数值
输入:
data(list格式,元素为元组)
输出:
data(list格式,元素为值)
'''
def tuplefirstvalue(data):
data_new = []
for i in range(len(data)):
data_new.append( intorfloat(data[i][2]) )
return data_new
'''
结果近似为整数的,返回整数
输入:
data (list格式)
输出:
data (list格式)
'''
def approximateintegertointeger(data):
for i in range(len(data)):
if abs( data[i] - round(data[i]) ) < 10**-8:
data[i] = int( round(data[i]) )
return data
'''
nan 替换成 None
输入:
data (list格式)
输出:
data (list格式)
'''
def nanreplce(data):
for i in range(len(data)):
# None 不能用math.isnan()判断
try:
if math.isnan(data[i]):
data[i] = None
except:
continue
else:
continue
return data |
def incorrect_login_data():
print("Combination of login and password is incorrect")
def print_error(message):
print("Error: " + message)
def print_no_permission(permission_name):
print_error("you have no " + permission_name + " permission for this file")
def print_usage(command, synoptics, description, example):
print("Usage: " + command + synoptics + "\n" + description + "\nExample:\n\t" + command + example)
def check_permissions_and_execute(command_name, function):
try:
file_id = files.index(args[0])
if rights.index(command_name) in rights_matrix[user_id][file_id]:
function(file_id)
else:
print_no_permission(command_name)
except ValueError:
print_error("file not found")
def read(file_id):
print(files_content[file_id])
def write(file_id):
files_content[file_id] = " ".join(args[1:])
print("Success!")
def check(login, password):
try:
user_id = users.index(login)
except ValueError:
incorrect_login_data()
return -1
if password != passwords[user_id]:
incorrect_login_data()
return -1
return user_id
def ls(user_id):
if not rights_matrix[user_id]:
print("You don't have any rights in this system :(\nPlease contact with the system administrator")
else:
print("Available files:")
for file_id in rights_matrix[user_id]:
if not rights_matrix[user_id][file_id]:
rights_string = "no rights"
else:
rights_string = ", ".join(rights[i] for i in rights_matrix[user_id][file_id])
print("\t" + files[file_id] + ": " + rights_string)
class Command:
def __init__(self, name):
self.name = name
users = ["Alex", "Evgeny", "Vasiliy"]
passwords = ["<PASSWORD>", "123", "123"]
files = ["Skynet.doc", "AI_research.7z", "Overwatch.exe"]
files_content = ["Skynet is a global computer system",
"AI will conquer the world!!!",
"Welcome to Overwatch, Soldier 78!"]
rights = ["read", "write", "grant"]
commands = ["read", "write", "grant", "exit", "quit"]
rights_matrix = [
{0: [0, 1, 2], 1: [0, 1, 2], 2: []},
{0: [0], 1: [1], 2: [2]},
{}]
print("Welcome to Cyberdine Systems Inc.\nPlease, login into the system")
while True:
login = input("Login: ")
password = input("Password: ")
user_id = check(login, password)
if user_id == -1:
continue
print("Welcome, " + login)
ls(user_id)
while True:
user_input = input(login.lower() + "@skynet:/$ ")
command = user_input.split()[0]
args = list(user_input.split()[1:])
if command == "quit" or command == "exit":
print("Goodbye, " + users[user_id])
break
elif command == "read":
if len(args) != 1:
print_usage("read", " FILE", "Reads content of the specific file", " file.txt")
else:
check_permissions_and_execute("read", read)
elif command == "write":
check_permissions_and_execute("write", write)
elif command == "grant":
if len(args) < 4:
print_usage("grant", " USER RIGHTS... FILE",
"Grants selected user selected rights for the specific file", " user read write file.txt")
else:
eval("read(0)")
username = args[0]
filename = args[-1]
rights = args[1:-1]
print(username)
print(filename)
print(rights)
else:
print(command + ": command not found")
break
|
import heapq
import random
import time
import multiprocessing
import pygame
import math
import Queue
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
class ai_agent():
mapinfo = []
# castle rect
castle_rect = pygame.Rect(12 * 16, 24 * 16, 32, 32)
def __init__(self):
self.mapinfo = []
# rect: [left, top, width, height]
# rect_type: 0:empty 1:brick 2:steel 3:water 4:grass 5:froze
# castle_rect: [12*16, 24*16, 32, 32]
# mapinfo[0]: bullets [rect, direction, speed]]
# mapinfo[1]: enemies [rect, direction, speed, type]]
# enemy_type: 0:TYPE_BASIC 1:TYPE_FAST 2:TYPE_POWER 3:TYPE_ARMOR
# mapinfo[2]: tile [rect, type] (empty don't be stored to mapinfo[2])
# mapinfo[3]: player [rect, direction, speed, Is_shielded]]
# shoot: 0:none 1:shoot
# move_dir: 0:Up 1:Right 2:Down 3:Left 4:None
# def Get_mapInfo: fetch the map infomation
# def Update_Strategy Update your strategy
def operations(self, p_mapinfo, c_control):
while True:
# -----your ai operation,This code is a random strategy,please design your ai !!-----------------------
self.Get_mapInfo(p_mapinfo)
player_rect = self.mapinfo[3][0][0]
# sort enemy with manhattan distance to castle
sorted_enemy_with_distance_to_castle = sorted(self.mapinfo[1],
key=lambda x: self.manhattan_distance(x[0].center,
self.castle_rect.center))
# sort enemy with manhattan distance to player current position
sorted_enemy_with_distance_to_player = sorted(self.mapinfo[1],
key=lambda x: self.manhattan_distance(x[0].center,
player_rect.center))
# default position
default_pos_rect = pygame.Rect(195, 3, 26, 26)
# exists enemy
if sorted_enemy_with_distance_to_castle:
# if enemy distance with castle < 150, chase it
if self.manhattan_distance(sorted_enemy_with_distance_to_castle[0][0].topleft, self.castle_rect.topleft) < 150:
enemy_rect = sorted_enemy_with_distance_to_castle[0][0]
enemy_direction = sorted_enemy_with_distance_to_castle[0][1]
# else chase the nearest enemy to player
else:
enemy_rect = sorted_enemy_with_distance_to_player[0][0]
enemy_direction = sorted_enemy_with_distance_to_player[0][1]
# check if inline with enemy
inline_direction = self.inline_with_enemy(player_rect, enemy_rect)
# perform a star
astar_direction = self.a_star(player_rect, enemy_rect, 6)
# perform bullet avoidance
shoot, direction = self.bullet_avoidance(self.mapinfo[3][0], 6, self.mapinfo[0], astar_direction, inline_direction)
# update strategy
self.Update_Strategy(c_control, shoot, direction)
time.sleep(0.005)
# go to default position
else:
# perform a star
astar_direction = self.a_star(player_rect, default_pos_rect, 6)
# update strategy
if astar_direction is not None:
self.Update_Strategy(c_control, 0, astar_direction)
# time.sleep(0.001)
else:
self.Update_Strategy(c_control, 0, 0)
# time.sleep(0.001)
# ------------------------------------------------------------------------------------------------------
def Get_mapInfo(self, p_mapinfo):
if p_mapinfo.empty() != True:
try:
self.mapinfo = p_mapinfo.get(False)
except Queue.Empty:
skip_this = True
def Update_Strategy(self, c_control, shoot, move_dir):
if c_control.empty() == True:
c_control.put([shoot, move_dir])
def should_fire(self, player_rect, enemy_rect_info_list):
for enemy_rect_info in enemy_rect_info_list:
if self.inline_with_enemy(player_rect, enemy_rect_info[0]) is not False:
return True
# A* algorithm, return a series of command to reach enemy
def a_star(self, start_rect, goal_rect, speed):
# print 'trigger a*'
start = (start_rect.left, start_rect.top)
goal = (goal_rect.left, goal_rect.top)
# initialise frontier
frontier = PriorityQueue()
came_from = {}
cost_so_far = {}
# put start into frontier
frontier.put(start, 0)
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current_left, current_top = frontier.get()
current = (current_left, current_top)
# goal test
temp_rect = pygame.Rect(current_left, current_top, 26, 26)
if self.is_goal(temp_rect, goal_rect):
break
# try every neighbour
for next in self.find_neighbour(current_top, current_left, speed, goal_rect):
# calculate new cost
new_cost = cost_so_far[current] + speed
# update if next haven't visited or cost more
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + self.heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
# build path
# dir_cmd = []
# while current != start:
# parent = came_from[current]
# parent_left, parent_top = parent
# current_left, current_top = current
# # up
# if current_top < parent_top:
# dir_cmd.append(0)
# # down
# elif current_top > parent_top:
# dir_cmd.append(2)
# # left
# elif current_left < parent_left:
# dir_cmd.append(3)
# # right
# elif current_left > parent_left:
# dir_cmd.append(1)
# current = came_from[current]
# dir_cmd.reverse()
# return the first move is enough
next = None
dir_cmd = None
while current != start:
next = current
current = came_from[current]
if next:
next_left, next_top = next
current_left, current_top = current
# up
if current_top > next_top:
dir_cmd = 0
# down
elif current_top < next_top:
dir_cmd = 2
# left
elif current_left > next_left:
dir_cmd = 3
# right
elif current_left < next_left:
dir_cmd = 1
return dir_cmd
def manhattan_distance(self, a, b):
x1, y1 = a
x2, y2 = b
return abs(x1 - x2) + abs(y1 - y2)
def euclidean_distance(self, a, b):
x1, y1 = a
x2, y2 = b
return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# heuristic func, use euclidean dist
def heuristic(self, a, b):
return self.manhattan_distance(a, b)
# return True when two rects collide
def is_goal(self, rect1, rect2):
center_x1, center_y1 = rect1.center
center_x2, center_y2 = rect2.center
if abs(center_x1 - center_x2) <= 7 and abs(center_y1 - center_y2) <= 7:
return True
else:
return False
# return [(top,left)]
# each time move 2px (speed)
def find_neighbour(self, top, left, speed, goal_rect):
# Rect(left, top, width, height)
allowable_move = []
# move up
new_top = top - speed
new_left = left
if not (new_top < 0):
move_up = True
temp_rect = pygame.Rect(new_left, new_top, 26, 26)
# check collision with enemy except goal
for enemy in self.mapinfo[1]:
if enemy[0] is not goal_rect:
if temp_rect.colliderect(enemy[0]):
move_up = False
break
# check collision with bullet
# for bullet in self.mapinfo[0]:
# if temp_rect.colliderect(bullet[0]):
# move_up = False
# break
# check collision with tile
if move_up:
for tile in self.mapinfo[2]:
# not a grass tile
if tile[1] != 4:
if temp_rect.colliderect(tile[0]):
move_up = False
break
if move_up:
allowable_move.append((new_left, new_top))
# move right
new_top = top
new_left = left + speed
if not (new_left > (416 - 26)):
move_right = True
temp_rect = pygame.Rect(new_left, new_top, 26, 26)
# check collision with enemy except goal
for enemy in self.mapinfo[1]:
if enemy[0] is not goal_rect:
if temp_rect.colliderect(enemy[0]):
move_right = False
break
# check collision with bullet
# for bullet in self.mapinfo[0]:
# if temp_rect.colliderect(bullet[0]):
# move_right = False
# break
# check collision with tile
if move_right:
for tile in self.mapinfo[2]:
# not a grass tile
if tile[1] != 4:
if temp_rect.colliderect(tile[0]):
move_right = False
break
if move_right:
allowable_move.append((new_left, new_top))
# move down
new_top = top + speed
new_left = left
if not (new_top > (416 - 26)):
move_down = True
temp_rect = pygame.Rect(new_left, new_top, 26, 26)
# check collision with enemy except goal
for enemy in self.mapinfo[1]:
if enemy[0] is not goal_rect:
if temp_rect.colliderect(enemy[0]):
move_down = False
break
# check collision with bullet
# for bullet in self.mapinfo[0]:
# if temp_rect.colliderect(bullet[0]):
# move_down = False
# break
# check collision with
if move_down:
for tile in self.mapinfo[2]:
# not a grass tile
if tile[1] != 4:
if temp_rect.colliderect(tile[0]):
move_down = False
break
if move_down:
allowable_move.append((new_left, new_top))
# move left
new_top = top
new_left = left - speed
if not (new_left < 0):
move_left = True
temp_rect = pygame.Rect(new_left, new_top, 26, 26)
# check collision with enemy except goal
for enemy in self.mapinfo[1]:
if enemy[0] is not goal_rect:
if temp_rect.colliderect(enemy[0]):
move_left = False
break
# check collision with bullet
# for bullet in self.mapinfo[0]:
# if temp_rect.colliderect(bullet[0]):
# move_left = False
# break
# check collision with tile
if move_left:
for tile in self.mapinfo[2]:
# not a grass tile
if tile[1] != 4:
if temp_rect.colliderect(tile[0]):
move_left = False
break
if move_left:
allowable_move.append((new_left, new_top))
return allowable_move
def inline_with_enemy(self, player_rect, enemy_rect):
# vertical inline
if enemy_rect.left <= player_rect.centerx <= enemy_rect.right and abs(player_rect.top - enemy_rect.bottom) <= 151:
# enemy on top
if enemy_rect.bottom <= player_rect.top:
print('enemy on top')
return 0
# enemy on bottom
elif player_rect.bottom <= enemy_rect.top:
print('enemy on bottom')
return 2
# horizontal inline
if enemy_rect.top <= player_rect.centery <= enemy_rect.bottom and abs(player_rect.left - enemy_rect.right) <= 151:
# enemy on left
if enemy_rect.right <= player_rect.left:
print('enemy on left')
return 3
# enemy on right
elif player_rect.right <= enemy_rect.left:
print('enemy on right')
return 1
return False
def bullet_avoidance(self, player_info, speed, bullet_info_list, direction_from_astar, inlined_with_enemy):
# possible direction list
directions = []
# player rect
player_rect = player_info[0]
# sort bullet by euclidean distance with player
sorted_bullet_info_list = sorted(bullet_info_list, key=lambda x: self.euclidean_distance((x[0].left, x[0].top), (player_rect.centerx, player_rect.centery)))
# default shoot
shoot = 0
# default minimal distance with bullet, infinity
if sorted_bullet_info_list:
min_dist_with_bullet = self.euclidean_distance((sorted_bullet_info_list[0][0].left, sorted_bullet_info_list[0][0].top), (player_rect.centerx, player_rect.centery))
else:
min_dist_with_bullet = float(1e30000)
# trigger when bullet distance with player <= 100
if min_dist_with_bullet <= 120:
# pick the nearest bullet
bullet_rect = sorted_bullet_info_list[0][0]
bullet_direction = sorted_bullet_info_list[0][1]
# distance with center x <= 20
if abs(bullet_rect.centerx - player_rect.centerx) <= 25:
# distance with center x <= 2
if abs(bullet_rect.centerx - player_rect.centerx) <= 5:
# bullet direction to up, on player's bottom
if bullet_direction == 0 and bullet_rect.top > player_rect.top:
# add direction to down
directions.append(2)
# shoot
shoot = 1
print 'block bullet from down'
# direction to down, on player's top
if bullet_direction == 2 and bullet_rect.top < player_rect.top:
# add direction to up
directions.append(0)
# shoot
shoot = 1
print 'block bullet from up'
# not too near
else:
# if bullet on player's right
if bullet_rect.left > player_rect.centerx:
# go left
directions.append(3)
# go right
# directions.append(1)
print 'go left, skip bullet'
else:
# go right
directions.append(1)
# go left
# directions.append(3)
print 'go right, skip bullet'
# distance with center y <= 20
elif abs(bullet_rect.centery - player_rect.centery) <= 25:
# distance with center y <= 2
if abs(bullet_rect.centery - player_rect.centery) <= 5:
# bullet direction to right, on player's left
if bullet_direction == 1 and bullet_rect.left < player_rect.left:
# go left
directions.append(3)
# shoot
shoot = 1
print 'block bullet from left'
# bullet direction to left, on player's right
if bullet_direction == 3 and bullet_rect.left > player_rect.left:
# go right
directions.append(1)
# shoot
shoot = 1
print 'block bullet from right'
# not too near
else:
# on player bottom
if bullet_rect.top > player_rect.centery:
directions.append(0)
directions.append(2)
print 'go up, skip bullet'
else:
directions.append(2)
directions.append(0)
print 'go down, skip bullet'
# neither distance with center x or center y <= 20
else:
# inline with enemy direction is same as a star direction
if inlined_with_enemy == direction_from_astar:
shoot = 1
directions.append(direction_from_astar)
# bullet direction down or up
if bullet_direction == 0 or bullet_direction == 2:
# bullet on right hand side
if bullet_rect.left > player_rect.left:
if 1 in directions:
directions.remove(1)
print 'bullet on rhs, don\'t go right'
else:
if 3 in directions:
directions.remove(3)
print 'bullet on lhs, don\'t go left'
# bullet direction to left or right
if bullet_direction == 1 or bullet_direction == 3:
# bullet on bottom
if bullet_rect.top > player_rect.top:
if 2 in directions:
directions.remove(2)
print 'bullet on bottom, don\'t go down'
else:
if 0 in directions:
directions.remove(0)
print 'bullt on top, don\'t go up'
# distance with nearest bullet > 100 (threshold)
else:
# if inlined
if inlined_with_enemy == direction_from_astar:
shoot = 1
directions.append(direction_from_astar)
if directions:
for direction in directions:
# go up
if direction == 0:
new_left = player_rect.left
new_top = player_rect.top - speed
# go right
elif direction == 1:
new_left = player_rect.left + speed
new_top = player_rect.top
# go down
elif direction == 2:
new_left = player_rect.left
new_top = player_rect.top + speed
# go left
elif direction == 3:
new_left = player_rect.left - speed
new_top = player_rect.top
# no change
else:
new_top = player_rect.top
new_left = player_rect.left
temp_rect = pygame.Rect(new_left, new_top, 26, 26)
# check collision with tile
if 0 <= new_top <= 416 - 26 and 0 <= new_left <= 416 - 26:
collision = False
for tile_info in self.mapinfo[2]:
tile_rect = tile_info[0]
tile_type = tile_info[1]
if tile_type != 4:
if temp_rect.colliderect(tile_rect):
collision = True
break
if collision:
if inlined_with_enemy == direction_from_astar:
shoot = 1
break
else:
return shoot, direction
# collision = temp_rect.collidelist(obstacles)
# if collision:
# if inlined_with_enemy == direction_from_astar:
# shoot = 1
# break
# else:
# return shoot, direction
# no direction appended
else:
return shoot, 4
return shoot, direction_from_astar
|
import urllib2, re, getpass
##DETERMINE WHICH DB
db_name = "2014_comp10120_x2"
user_name = "mbax4hw2"
passW = "<PASSWORD>"
print "Scraping Manchester"
domain = "https://secure.manchester.gov.uk"
cgiAddr ="/site/custom_scripts/events_search.php"
searchURL = "?searchresults=yes&dateType=anydate"
searchURL += "&date=&startDate=&endDate=&location=Anywhere&offset="
eventURL = "?hideform=yes&displayevent=yes&eventid="
HTMLSource = ""
events = []
url = domain + cgiAddr + searchURL
while url:
page = urllib2.urlopen(url)
try:
HTMLSource = page.read()
except:
pass
finally:
page.close()
eventURLs = re.findall(cgiAddr + '\\' + eventURL + '\d*', HTMLSource)
for event in eventURLs:
evPage = urllib2.urlopen(domain + event)
try:
evSource = evPage.read()
except:
pass
finally:
evPage.close()
evDetails = []
evSource = evSource[re.search('<div id="content"', evSource).start():].split('</section')[0]
title = re.search('<h\d>(.*?)</h\d', evSource).group(1)
date = re.search('<span class="icon-calendar"></span>(.*?)</li', evSource, re.S).group(1)
location = re.search('<span class="icon-location"></span>(.*?)</li', evSource).group(1)
post = location.split(',')[-1]
try:
time = re.search('<span class="icon-clock"></span>(.*?)</li', evSource).group(1)
except:
time = ''
desc = re.search('<article.*?>(.*?)</article', evSource, re.S).group(1)
events.append([title, desc, location + ", Manchester", post, date])
url = re.search('(' + cgiAddr + '\\' + searchURL + '\d*)">Next Page >>', HTMLSource)
if url != None:
url = domain + url.group(1)
else:
url = False
def notin(table, obj):
for i in table:
if i[1] == obj[0] and i[2] == obj[2]:
if i[3] == obj[4] and i[4] == obj[1]:
if i[5] == obj[3]:
return False
return True
import MySQLdb
db = MySQLdb.connect(host="dbhost.cs.man.ac.uk", # your host, usually localhost
user=user_name, # your username
passwd=<PASSWORD>, # your password
db=db_name) # name of the data base
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor()
# Use all the SQL you like
#Get all events
cur.execute("SELECT * FROM Events");
existing = cur.fetchall()
commited = []
import datetime
for i in events:
#Get rid of the h2s
i[1] = i[1].replace("<h2>","").replace("</h2>","")
for j in range(len(i)):
i[j] = i[j].strip().replace("Back to search results","")
if notin(existing, i) and notin(commited, i):
i[1] = i[1].strip().replace("<h2>","")
for j in range(len(i)):
i[j] = i[j].strip()
commited.append([None, i[0], i[2], i[4], i[1], i[3]])
date = i[4].split("-")
unixTimes = []
for j in date:
dt = datetime.datetime.strptime(j.replace("\r\n","").strip(), "%A %d %B %Y")
unixTimes.append(int((dt - datetime.datetime(1970,1,1)).total_seconds()))
if len(unixTimes) == 1:
unixTimes.append(unixTimes[0])
cur.execute("""INSERT INTO Events
(`name`, `location`, `startDate`, `endDate`, `description`,`postcode`,`createdBy`)
VALUES (%s,%s,%s,%s,%s,%s,%s)""",
[i[0], i[2], str(unixTimes[0]), str(unixTimes[1]), i[1], i[3], "71409503111"])
db.commit()
db.close()
print "Scraped Manchester succesfully"
|
from __future__ import division
from past.utils import old_div
import math
import astropy.units as astropy_units
import numpy as np
from scipy.special import erfcinv, erf
from astromodels.functions.function import Function1D, FunctionMeta, ModelAssertionViolation
deg2rad = old_div(np.pi,180.)
rad2deg = old_div(180.,np.pi)
# noinspection PyPep8Naming
class Gaussian(Function1D, metaclass=FunctionMeta):
r"""
description :
A Gaussian function
latex : $ K \frac{1}{\sigma \sqrt{2 \pi}}\exp{\frac{(x-\mu)^2}{2~(\sigma)^2}} $
parameters :
F :
desc : Integral between -inf and +inf. Fix this to 1 to obtain a Normal distribution
initial value : 1
mu :
desc : Central value
initial value : 0.0
sigma :
desc : standard deviation
initial value : 1.0
min : 1e-12
tests :
- { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
- { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}
"""
# Place this here to avoid recomputing it all the time
__norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))
def _setup(self):
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# The normalization is the integral from -inf to +inf, i.e., has dimensions of
# y_unit * x_unit
self.F.unit = y_unit * x_unit
# The mu has the same dimensions as the x
self.mu.unit = x_unit
# sigma has the same dimensions as x
self.sigma.unit = x_unit
# noinspection PyPep8Naming
def evaluate(self, x, F, mu, sigma):
norm = old_div(self.__norm_const, sigma)
return F * norm * np.exp(old_div(-np.power(x - mu, 2.), (2 * np.power(sigma, 2.))))
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
mu = self.mu.value
sigma = self.sigma.value
sqrt_two = 1.414213562
if x < 1e-16 or (1 - x) < 1e-16:
res = -1e32
else:
res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))
return res
class Truncated_gaussian(Function1D, metaclass=FunctionMeta):
r"""
description :
A truncated Gaussian function defined on the interval between the lower_bound (a) and upper_bound (b)
latex : $\begin{split}f(x;\mu,\sigma,a,b)=\frac{\frac{1}{\sigma} \phi\left( \frac{x-\mu}{\sigma} \right)}{\Phi\left( \frac{b-\mu}{\sigma} \right) - \Phi\left( \frac{a-\mu}{\sigma} \right)}\\\phi\left(z\right)=\frac{1}{\sqrt{2 \pi}}\exp\left(-\frac{1}{2}z^2\right)\\\Phi\left(z\right)=\frac{1}{2}\left(1+erf\left(\frac{z}{\sqrt(2)}\right)\right)\end{split}$
parameters :
F :
desc : Integral between -inf and +inf. Fix this to 1 to obtain a Normal distribution
initial value : 1
mu :
desc : Central value
initial value : 0.0
sigma :
desc : standard deviation
initial value : 1.0
min : 1e-12
lower_bound :
desc: lower bound of gaussian, setting to -np.inf results in half normal distribution
initial value : -1.
upper_bound :
desc: upper bound of gaussian setting to np.inf results in half normal distribution
initial value : 1.
tests :
- { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
- { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}
"""
# Place this here to avoid recomputing it all the time
__norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))
def _setup(self):
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# The normalization is the integral from -inf to +inf, i.e., has dimensions of
# y_unit * x_unit
self.F.unit = y_unit * x_unit
# The mu has the same dimensions as the x
self.mu.unit = x_unit
# The lower_bound has the same dimensions as the x
self.lower_bound.unit = x_unit
# The upper_bound has the same dimensions as the x
self.upper_bound.unit = x_unit
# sigma has the same dimensions as x
self.sigma.unit = x_unit
# noinspection PyPep8Naming
def evaluate(self, x, F, mu, sigma, lower_bound, upper_bound):
# phi is in unitless, so we need to do this trick
# to keep the units right
norm = old_div(self.__norm_const, sigma)
phi = np.zeros(x.shape) * F * norm * 0.
idx = (x >= lower_bound) & (x <= upper_bound)
sqrt_two = 1.414213562
# precalculate the arguments to the CDF
lower_arg = old_div((lower_bound - mu), sigma)
upper_arg = old_div((upper_bound - mu), sigma)
# the typical gaussian functions
phi[idx] = np.exp(old_div(-np.power(x[idx] - mu, 2.), (2 * np.power(sigma, 2.)))) * F * norm
# the denominator is a function of the CDF
if isinstance(F, astropy_units.Quantity):
# erf cannot accept units
upper_arg = upper_arg.value
lower_arg = lower_arg.value
theta_lower = 0.5 + 0.5 * erf(old_div(lower_arg, sqrt_two))
theta_upper = 0.5 + 0.5 * erf(old_div(upper_arg, sqrt_two))
return old_div(phi, (theta_upper - theta_lower))
def from_unit_cube(self, x):
mu = self.mu.value
sigma = self.sigma.value
lower_bound = self.lower_bound.value
upper_bound = self.upper_bound.value
sqrt_two = 1.414213562
if x < 1e-16 or (1 - x) < 1e-16:
res = -1e32
# precalculate the arguments to the CDF
lower_arg = old_div((lower_bound - mu), sigma)
upper_arg = old_div((upper_bound - mu), sigma)
theta_lower = 0.5 + 0.5 * erf(old_div(lower_arg, sqrt_two))
theta_upper = 0.5 + 0.5 * erf(old_div(upper_arg, sqrt_two))
# now precalculate the argument to the Inv. CDF
arg = theta_lower + x * (theta_upper - theta_lower)
out = mu + sigma * sqrt_two * erfcinv(2 * (1 - arg))
return np.clip(out, lower_bound, upper_bound)
class Cauchy(Function1D, metaclass=FunctionMeta):
r"""
description :
The Cauchy distribution
latex : $ K \frac{1}{ \gamma \pi} \left[ \frac{\gamma^2}{(x-x_0)^2 + \gamma^2} \right] $
parameters :
K :
desc : Integral between -inf and +inf. Fix this to 1 to obtain a Cauchy distribution
initial value : 1
x0 :
desc : Central value
initial value : 0.0
gamma :
desc : standard deviation
initial value : 1.0
min : 1e-12
tests :
- { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
- { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}
"""
# Place this here to avoid recomputing it all the time
__norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))
def _setup(self):
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# The normalization is the integral from -inf to +inf, i.e., has dimensions of
# y_unit * x_unit
self.K.unit = y_unit * x_unit
# The mu has the same dimensions as the x
self.x0.unit = x_unit
# sigma has the same dimensions as x
self.gamma.unit = x_unit
# noinspection PyPep8Naming
def evaluate(self, x, K, x0, gamma):
norm = old_div(1, (gamma * np.pi))
gamma2 = gamma * gamma
return K * norm * gamma2 / ((x - x0) * (x - x0) + gamma2)
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
x0 = self.x0.value
gamma = self.gamma.value
half_pi = 1.57079632679
res = np.tan(np.pi * x - half_pi) * gamma + x0
return res
class Cosine_Prior(Function1D, metaclass=FunctionMeta):
r"""
description :
A function which is constant on the interval angular interval of cosine
latex : $\cos(x)$
parameters :
lower_bound :
desc : Lower bound for the interval
initial value : -90
min : -np.inf
max : np.inf
upper_bound :
desc : Upper bound for the interval
initial value : 90
min : -np.inf
max : np.inf
value :
desc : Value in the interval
initial value : 1.0
"""
def _setup(self):
self._fixed_units = (astropy_units.dimensionless_unscaled,astropy_units.dimensionless_unscaled)
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# this prior needs to use the fixed units and
# they do not need to be converted as they have no
# dimension
x_unit = self._fixed_units[0]
y_unit = self._fixed_units[1]
# Lower and upper bound has the same unit as x
self.lower_bound.unit = x_unit
self.upper_bound.unit = x_unit
# value has the same unit as y
self.value.unit = y_unit
def has_fixed_units(self):
return True
def evaluate(self, x, lower_bound, upper_bound,value):
# The value * 0 is to keep the units right
result = np.zeros(x.shape) * value * 0
idx = (x >= lower_bound) & (x <= upper_bound)
norm = (np.sin(deg2rad*(upper_bound)) - np.sin(deg2rad*(lower_bound))) * 57.29577795
result[idx] = value * np.cos(deg2rad*( x[idx] )) / norm
return result
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
cosdec_min = np.cos(deg2rad*(90.0 + self.lower_bound.value))
cosdec_max = np.cos(deg2rad*(90.0 + self.upper_bound.value))
v = x * (cosdec_max - cosdec_min)
v += cosdec_min
v = np.clip(v, -1.0, 1.0)
# Now this generates on [0,pi)
dec = np.arccos(v)
# convert to degrees
dec = rad2deg * dec
# now in range [-90,90.0)
dec -= 90.0
return dec
class Log_normal(Function1D, metaclass=FunctionMeta):
r"""
description :
A log normal function
latex : $ K \frac{1}{ x \sigma \sqrt{2 \pi}}\exp{\frac{(\log x/piv - \mu/piv)^2}{2~(\sigma)^2}} $
parameters :
F :
desc : Integral between 0and +inf. Fix this to 1 to obtain a log Normal distribution
initial value : 1
mu :
desc : Central value
initial value : 0.0
sigma :
desc : standard deviation
initial value : 1.0
min : 1e-12
piv :
desc : pivot. Leave this to 1 for a proper log normal distribution
initial value : 1.0
fix : yes
tests :
- { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
- { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}
"""
# Place this here to avoid recomputing it all the time
__norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))
def _setup(self):
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# The normalization is the integral from -inf to +inf, i.e., has dimensions of
# y_unit * x_unit
self.F.unit = y_unit
# The mu has the same dimensions as the x
self.mu.unit = x_unit
# The pivot has the same units as x
self.piv.unit = x_unit
# sigma has the same dimensions as x
self.sigma.unit = x_unit
# noinspection PyPep8Naming
def evaluate(self, x, F, mu, sigma, piv):
# The value * 0 is to keep the units right
result = np.zeros(x.shape) * F * 0
# The log normal is not defined if x < 0. The "0 * x" part is to conserve the units if
# x has them, because 0 * x will be a Quantity with the same units as x
idx = (x > 0 * x)
result[idx] = F * self.__norm_const / (sigma / piv * x / piv) * np.exp(
old_div(-np.power(np.log(old_div(x, piv)) - old_div(mu, piv), 2.), (2 * np.power(old_div(sigma, piv), 2.))))
return result
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
mu = self.mu.value
sigma = self.sigma.value
sqrt_two = 1.414213562
if x < 1e-16 or (1 - x) < 1e-16:
res = -1e32
else:
res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))
return np.exp(res)
class Uniform_prior(Function1D, metaclass=FunctionMeta):
r"""
description :
A function which is constant on the interval lower_bound - upper_bound and 0 outside the interval. The
extremes of the interval are counted as part of the interval.
latex : $ f(x)=\begin{cases}0 & x < \text{lower_bound} \\\text{value} & \text{lower_bound} \le x \le \text{upper_bound} \\ 0 & x > \text{upper_bound} \end{cases}$
parameters :
lower_bound :
desc : Lower bound for the interval
initial value : 0
min : -np.inf
max : np.inf
upper_bound :
desc : Upper bound for the interval
initial value : 1
min : -np.inf
max : np.inf
value :
desc : Value in the interval
initial value : 1.0
tests :
- { x : 0.5, function value: 1.0, tolerance: 1e-20}
- { x : -0.5, function value: 0, tolerance: 1e-20}
"""
def _setup(self):
self._is_prior = True
def _set_units(self, x_unit, y_unit):
# Lower and upper bound has the same unit as x
self.lower_bound.unit = x_unit
self.upper_bound.unit = x_unit
# value has the same unit as y
self.value.unit = y_unit
def evaluate(self, x, lower_bound, upper_bound, value):
# The value * 0 is to keep the units right
result = np.zeros(x.shape) * value * 0
idx = (x >= lower_bound) & (x <= upper_bound)
result[idx] = value
return result
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
lower_bound = self.lower_bound.value
upper_bound = self.upper_bound.value
low = lower_bound
spread = float(upper_bound - lower_bound)
par = x * spread + low
return par
class Log_uniform_prior(Function1D, metaclass=FunctionMeta):
r"""
description :
A function which is K/x on the interval lower_bound - upper_bound and 0 outside the interval. The
extremes of the interval are NOT counted as part of the interval. Lower_bound must be >= 0.
latex : $ f(x)=K~\begin{cases}0 & x \le \text{lower_bound} \\\frac{1}{x} & \text{lower_bound} < x < \text{upper_bound} \\ 0 & x \ge \text{upper_bound} \end{cases}$
parameters :
lower_bound :
desc : Lower bound for the interval
initial value : 1e-20
min : 1e-30
max : np.inf
upper_bound :
desc : Upper bound for the interval
initial value : 100
min : 1e-30
max : np.inf
K :
desc : Normalization
initial value : 1
fix : yes
"""
def _setup(self):
self._is_prior = True
self._handle_units = False
def _set_units(self, x_unit, y_unit):
# Lower and upper bound has the same unit as x
self.lower_bound.unit = x_unit
self.upper_bound.unit = x_unit
self.K.unit = y_unit * x_unit
def evaluate(self, x, lower_bound, upper_bound, K):
# This makes the prior proper because it is the integral between lower_bound and upper_bound
res = np.where((x > lower_bound) & (x < upper_bound), old_div(K, x), 0)
if isinstance(x, astropy_units.Quantity):
return res * self.y_unit
else:
return res
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
low = math.log10(self.lower_bound.value)
up = math.log10(self.upper_bound.value)
spread = up - low
par = 10 ** (x * spread + low)
return par
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# Import modules
import checkvalue as cv
class Clock(object):
def __init__(self, start_time=0.0, end_time=3.0, dt_outer=1.e-1, dt_inner=1.e-2):
self._positions = ['START', 'PREVIOUS_OUT', 'PREVIOUS_IN', 'CURRENT', 'FORWARD_IN_OLD', 'FORWARD_OUT',
'FORWARD_OUT_OLD', 'END']
# Initialize class attributes
self._times = dict()
self._times['START'] = start_time
self._times['PREVIOUS_OUT'] = 0.0
self._times['PREVIOUS_IN'] = 0.0
self._times['CURRENT'] = 0.0
self._times['FORWARD_IN_OLD'] = 0.0
self._times['FORWARD_OUT'] = 0.0
self._times['FORWARD_OUT_OLD'] = 0.0
self._times['END'] = end_time
self._dt_outer = None
self._dt_inner = None
self.set_dt_outer(dt_outer)
self.set_dt_inner(dt_inner)
def get_time(self, position):
return self._times[position]
def set_time(self, position_from, position_to):
self._times[position_to] = self._times[position_from]
def get_positions(self):
return self._positions
def get_dt_inner(self):
return self._dt_inner
def get_dt_outer(self):
return self._dt_outer
def set_dt_outer(self, dt_outer):
if not cv.is_float(dt_outer):
msg = 'Unable to set DT outer for non-float value {1}' \
.format(dt_outer)
raise ValueError(msg)
elif dt_outer <= 0.0:
msg = 'Unable to set DT outer for non positive value {1}' \
.format(dt_outer)
raise ValueError(msg)
else:
self._dt_outer = dt_outer
def set_dt_inner(self, dt_inner):
if not cv.is_float(dt_inner):
msg = 'Unable to set DT inner for non-float value {1}' \
.format(dt_inner)
raise ValueError(msg)
elif dt_inner <= 0.0:
msg = 'Unable to set DT inner for non positive value {1}' \
.format(dt_inner)
raise ValueError(msg)
else:
self._dt_inner = dt_inner
def take_inner_step(self):
# Check to make sure inner step is multiple of outer time step size
if abs(self._dt_outer - round(self._dt_outer / self._dt_inner) * self._dt_inner) > 1e-8:
msg = 'Unable to take inner step since DT outer is not an integer ' \
'multiple of DT inner. DT inner: {0}, DT outer: {1}' \
.format(self._dt_inner, self._dt_outer)
raise ValueError(msg)
# Check to make sure that CURRENT is less than FORWARD_OUT
elif self._times['CURRENT'] >= self._times['FORWARD_OUT']:
msg = 'Unable to take inner step since CURRENT time is not ' \
'less than the FORWARD_OUT time. CURRENT: {0}, FORWARD_OUT: {1}' \
.format(self._times['CURRENT'], self._times['FORWARD_OUT'])
raise ValueError(msg)
else:
self._times['PREVIOUS_IN'] = self._times['CURRENT']
self._times['CURRENT'] = self._times['CURRENT'] + self._dt_inner
def take_outer_step(self):
# Check to make sure inner step is multiple of outer time step size
if abs(self._dt_outer - round(self._dt_outer / self._dt_inner) * self._dt_inner) > 1e-8:
msg = 'Unable to take outer step since DT outer is not an integer ' \
'multiple of DT inner. DT inner: {0}, DT outer: {1}' \
.format(self._dt_inner, self._dt_outer)
raise ValueError(msg)
# Check to make sure that CURRENT time equals FORWARD_OUT
elif abs(self._times['CURRENT'] - self._times['FORWARD_OUT']) > 1.e-6:
msg = 'Unable to take outer step since CURRENT time is not equal to ' \
'FORWARD_OUT time. CURRENT: {0}, FORWARD_OUT: {1}' \
.format(self._times['CURRENT'], self._times['FORWARD_OUT'])
raise ValueError(msg)
else:
self._times['PREVIOUS_OUT'] = self._times['FORWARD_OUT']
self._times['FORWARD_OUT'] = self._times['FORWARD_OUT'] + self._dt_outer
if self._times['FORWARD_OUT'] > self._times['END']:
self._times['FORWARD_OUT'] = self._times['END']
# set CURRENT and PREVIOUS_IN to PREVIOUS_OUT
self._times['CURRENT'] = self._times['PREVIOUS_OUT']
self._times['PREVIOUS_IN'] = self._times['PREVIOUS_OUT']
def reset_to_previous_outer_step(self):
# Check to make sure that CURRENT time equals FORWARD_OUT
if abs(self._times['CURRENT'] - self._times['FORWARD_OUT']) > 1.e-6:
msg = 'Unable to reset to previous out since CURRENT time is not equal to ' \
'FORWARD_OUT time. CURRENT: {0}, FORWARD_OUT: {1}' \
.format(self._times['CURRENT'], self._times['FORWARD_OUT'])
raise ValueError(msg)
else:
self._times['CURRENT'] = self._times['PREVIOUS_OUT']
self._times['PREVIOUS_IN'] = self._times['PREVIOUS_OUT']
def __repr__(self):
string = 'OpenRK Clock\n'
string += ' START time \t\t\t = {:6.5f} \n'.format(self._times['START'])
string += ' PREVIOUS_OUT time \t\t = {:6.5f} \n'.format(self._times['PREVIOUS_OUT'])
string += ' PREVIOUS_IN time \t\t = {:6.5f} \n'.format(self._times['PREVIOUS_IN'])
string += ' CURRENT time \t\t\t = {:6.5f} \n'.format(self._times['CURRENT'])
string += ' FORWARD_IN_OLD time \t\t = {:6.5f} \n'.format(self._times['FORWARD_IN_OLD'])
string += ' FORWARD_OUT time \t\t = {:6.5f} \n'.format(self._times['FORWARD_OUT'])
string += ' FORWARD_OUT_0LD time \t\t = {:6.5f} \n'.format(self._times['FORWARD_OUT_OLD'])
string += ' END time \t\t\t = {:6.5f} \n'.format(self._times['END'])
string += ' DT outer \t\t\t = {:6.5f} \n'.format(self._dt_outer)
string += ' DT inner \t\t\t = {:6.5f} \n'.format(self._dt_inner)
return string
|
# Copyright (c) 2021 OpenCyphal
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Callable, AbstractSet, Any
import dataclasses
import math
import numpy as np
from numpy.typing import NDArray
import pycyphal
from pycyphal.transport import MessageDataSpecifier, ServiceDataSpecifier, Timestamp, AlienTransfer
import yakut
from ._iface import Iface
if TYPE_CHECKING:
import uavcan.node
import uavcan.node.port
@dataclasses.dataclass()
class PortSet:
pub: AbstractSet[int] = dataclasses.field(default_factory=frozenset)
sub: AbstractSet[int] = dataclasses.field(default_factory=frozenset)
cln: AbstractSet[int] = dataclasses.field(default_factory=frozenset)
srv: AbstractSet[int] = dataclasses.field(default_factory=frozenset)
@dataclasses.dataclass(frozen=True)
class NodeState:
online: bool
"""
Online means that the node is emitting any transfers whatsoever.
"""
heartbeat: Optional[uavcan.node.Heartbeat_1_0]
"""
An online node without a heartbeat is a zombie, which is an error condition because heartbeats are required
for all nodes unconditionally.
"""
info: Optional[uavcan.node.GetInfo_1_0.Response]
ports: Optional[PortSet]
"""
Defined only if the node keeps its uavcan.node.List publications up-to-date.
"""
class Avatar: # pylint: disable=too-many-instance-attributes
def __init__(
self,
iface: Iface,
node_id: Optional[int],
info: Optional[uavcan.node.GetInfo_1_0.Response] = None,
) -> None:
import uavcan.node
import uavcan.node.port
self._node_id = node_id
self._heartbeat: Optional[uavcan.node.Heartbeat_1_0] = None
self._iface = iface
self._info = info
self._num_info_requests = 0
self._ts_activity = -math.inf
self._ts_heartbeat = -math.inf
self._ts_port_list = -math.inf
self._ts_info_request = -math.inf
self._ports = PortSet()
self._dispatch: dict[Any | tuple[Any, ServiceDataSpecifier.Role], Callable[[float, Any], None],] = {
(uavcan.node.GetInfo_1_0, ServiceDataSpecifier.Role.RESPONSE): self._on_info_response,
uavcan.node.port.List_0_1: self._on_port_list,
uavcan.node.Heartbeat_1_0: self._on_heartbeat,
}
self._iface.add_standard_subscription(uavcan.node.Heartbeat_1_0)
self._iface.add_standard_subscription(uavcan.node.port.List_0_1)
self._iface.add_trace_handler(self._on_trace)
def _restart(self) -> None:
self._info = None
self._num_info_requests = 0
self._ts_port_list = -math.inf
def _on_info_response(self, ts: float, obj: Any) -> None:
import uavcan.node
_logger.info("%r: Received node info", self)
assert isinstance(obj, uavcan.node.GetInfo_1_0.Response)
_ = ts
self._info = obj
def _on_port_list(self, ts: float, obj: Any) -> None:
import uavcan.node.port
assert isinstance(obj, uavcan.node.port.List_0_1)
self._ports.pub = expand_subjects(obj.publishers)
self._ports.sub = expand_subjects(obj.subscribers)
self._ports.cln = expand_mask(obj.clients.mask)
self._ports.srv = expand_mask(obj.servers.mask)
self._ts_port_list = ts
def _on_heartbeat(self, ts: float, obj: Any) -> None:
from uavcan.node import Heartbeat_1_0 as Heartbeat, GetInfo_1_0 as GetInfo
assert isinstance(obj, Heartbeat)
# We used to have a node-ID collision heuristic here that checked if the timestamp is oscillating back and
# forth, as it would indicate that there are multiple nodes running on the same node-ID. While this
# heuristic is correct, it is ineffective in practice because heartbeats of nodes with a lower uptime
# would have lower transfer-ID values, which (unless the transport is cyclic-TID) would make the transfer
# reassembler discard such new heartbeats from conflicting nodes as duplicates (already seen transfers).
# It is therefore impossible to detect collisions at this layer (it is possible only below the transport
# layer). Although it might *occasionally* work if the heartbeats are delayed or published irregularly.
# Invalidate the node info if the uptime goes backwards or if we received a heartbeat after a long pause.
restart = self._heartbeat and (
(self._heartbeat.uptime > obj.uptime) or (ts - self._ts_heartbeat > Heartbeat.OFFLINE_TIMEOUT)
)
if restart:
_logger.info("%r: Restart detected: %r", self, obj)
self._restart()
if not self._info and self._node_id is not None:
timeout = 2 ** (self._num_info_requests + 2)
if ts - self._ts_info_request >= timeout:
_logger.debug("%r: Would request info; timeout=%.1f", self, timeout)
self._num_info_requests += 1
self._ts_info_request = ts
self._iface.try_request(GetInfo, self._node_id, GetInfo.Request())
self._heartbeat = obj
self._ts_heartbeat = ts
def _on_trace(self, ts: Timestamp, tr: AlienTransfer) -> None:
from pycyphal.dsdl import get_fixed_port_id
own = tr.metadata.session_specifier.source_node_id == self._node_id
if not own:
return
ds = tr.metadata.session_specifier.data_specifier
self._ts_activity = float(ts.monotonic)
# Snoop on transfers sent by our node. Even if we can't ask we can still learn things by listening.
for ty, handler in self._dispatch.items():
if isinstance(ty, tuple):
ty, role = ty
assert isinstance(ty, type) and isinstance(role, ServiceDataSpecifier.Role)
if isinstance(ds, ServiceDataSpecifier) and ds.role == role and ds.service_id == get_fixed_port_id(ty):
rr = getattr(ty, role.name.capitalize())
obj = pycyphal.dsdl.deserialize(rr, tr.fragmented_payload)
_logger.debug("%r: Service snoop: %r from %r", self, obj, tr)
if obj is not None:
handler(float(ts.monotonic), obj)
elif isinstance(ty, type) and (fpid := get_fixed_port_id(ty)) is not None:
if isinstance(ds, MessageDataSpecifier) and ds.subject_id == fpid:
obj = pycyphal.dsdl.deserialize(ty, tr.fragmented_payload)
_logger.debug("%r: Message snoop: %r from %r", self, obj, tr)
if obj is not None:
handler(float(ts.monotonic), obj)
else:
assert False
def update(self, ts: float) -> NodeState:
from uavcan.node import Heartbeat_1_0 as Heartbeat
from uavcan.node.port import List_0_1 as PortList
if self._heartbeat and self._ts_activity - self._ts_heartbeat > Heartbeat.OFFLINE_TIMEOUT:
_logger.info("%r: Much more recent activity than the last heartbeat, we've gone zombie", self)
self._heartbeat = None
online = (ts - max(self._ts_heartbeat, self._ts_activity)) <= Heartbeat.OFFLINE_TIMEOUT
port_introspection_valid = (ts - self._ts_port_list) <= PortList.MAX_PUBLICATION_PERIOD * 2
return NodeState(
online=online,
heartbeat=self._heartbeat,
info=self._info,
ports=self._ports if port_introspection_valid else None,
)
def __repr__(self) -> str:
return str(pycyphal.util.repr_attributes(self, node_id=self._node_id))
def expand_subjects(m: uavcan.node.port.SubjectIDList_0_1) -> AbstractSet[int]:
if m.sparse_list is not None:
return frozenset(int(x.value) for x in m.sparse_list)
if m.mask:
return expand_mask(m.mask)
if m.total:
return _COMPLETE_SUBJECT_SET
assert False
def expand_mask(mask: NDArray[np.bool_]) -> AbstractSet[int]:
return frozenset(x for x in range(len(mask)) if mask[x])
N_NODES = 65535 # The theoretical limit for all kinds of transports.
N_SUBJECTS = MessageDataSpecifier.SUBJECT_ID_MASK + 1
N_SERVICES = ServiceDataSpecifier.SERVICE_ID_MASK + 1
_COMPLETE_SUBJECT_SET = frozenset(range(N_SUBJECTS))
"""Made static for performance reasons."""
_logger = yakut.get_logger(__name__)
|
<reponame>bit-bcilab/SiamDCA
import numpy as np
import tensorflow as tf
import keras.backend as K
from tracker.BaseTracker import BaseSiamTracker, change, sz
from training.Augmentation import random_crop
from tracker.BoxDecoder import ltrb_decoder
from utils.grid import generate_grid
from utils.image import rgb_normalize, get_subwindow
from utils.bbox import corner2center, clip_bbox_center, center2corner, clip_bbox_corner
import random
import cv2
class SiamDCATracker(BaseSiamTracker):
def __init__(self, model, model_cfg, session):
super(BaseSiamTracker, self).__init__()
self.model = model
self.mode = model_cfg['MODE']
self.crop_settings_temp = model_cfg['CROP_SETTINGS_TEMP']
self.crop_size_rate_z = self.crop_settings_temp['crop_size_rate']
self.crop_settings_search = model_cfg['CROP_SETTINGS_SEARCH']
self.search_size = model_cfg['SEARCH_SIZE'][1]
self.template_size = model_cfg['TEMPLATE_SIZE'][1]
self.grid = generate_grid(model_cfg['SEARCH_SIZE'], model_cfg['SCORE_SIZE'])
hanning = np.hanning(model_cfg['SCORE_SIZE'][0])
window_ = np.outer(hanning, hanning)
self.window = window_.astype(np.float32)
num_filters, nz = model.layers[-2].outputs[0].shape.as_list()[1:3]
self.x = tf.placeholder(tf.float32, shape=[None] + model_cfg['SEARCH_SIZE'])
self.z = tf.placeholder(tf.float32, shape=[None] + model_cfg['TEMPLATE_SIZE'])
self.zf_t = [tf.placeholder(tf.float32, shape=[None, num_filters, nz]) for i in range(3)] + \
[tf.placeholder(tf.float32, shape=[None, nz, num_filters]) for i in range(3)]
self.score, self.bbox = self.build_graph()
self.model.load_weights(model_cfg['WEIGHT_PATH'], by_name=True)
self.sess = session
def build_graph(self):
zf = self.model.layers[2](self.z)
zf = self.model.layers[3](zf)
self.zf = self.model.layers[4](zf)
xf = self.model.layers[2](self.x)
xf = self.model.layers[3](xf)
cls, loc = self.model.layers[5](xf + self.zf_t)
return ltrb_decoder(cls, loc, self.grid)
def init(self, img, bbox, video_name=None):
self.video_name = video_name
self.channel_average = np.mean(img, axis=(0, 1))
self.image_shape = img.shape[:2]
self.center_pos = np.array([bbox[0] + (bbox[2] - 1) / 2, bbox[1] + (bbox[3] - 1) / 2])
self.size = np.array([bbox[2], bbox[3]])
self.target = img[int(bbox[1]): int(bbox[1] + bbox[3]), int(bbox[0]): int(bbox[0] + bbox[2]), :]
if self.mode == 'NFS':
self.crop_settings_temp['crop_size_rate'] = fix_temp_area(bbox) * self.crop_size_rate_z
box = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32)
z_crop, z_box = random_crop(img, box, self.template_size, self.crop_settings_temp)
self.z_crop = rgb_normalize(z_crop, mobilenet=False)
self.zf_v = self.sess.run(self.zf, feed_dict={self.z: self.z_crop, K.learning_phase(): 0})
self.s_x0 = round(np.sqrt(np.prod(self.size + self.crop_settings_search['context_amount'] * np.sum(self.size)))
* self.crop_settings_search['crop_size_rate'])
self.lost_num = 0
self.success = 1
if self.mode == 'NFS':
self.ratios = [0.76, 0.85, 0.94, 1., 1.08, 1.16]
zf_v = []
for i in range(len(self.zf_v)):
zf_v_ = self.zf_v[i].copy()
zf_v_ = np.tile(zf_v_, (len(self.ratios), 1, 1))
zf_v.append(zf_v_)
self.zf_v_ = zf_v
def track(self, img, track_cfg):
# UAV数据集大多为长时序列,出视野干扰比较严重
# 采用置信度判断丢失+扩大检测范围+得分超过阈值的强寻回策略
# 因为长时序列中目标容易发生剧烈变化,因此不对搜索区域的尺寸进行限制
if 'UAV' in self.mode:
return self.track_uav(img, track_cfg)
# OTB数据集大多为简单的短序列,少数序列会因为短暂遮挡而无法跟踪
# 采用松散的丢失判断与寻回策略,但对搜索区域的尺寸变化采取强抑制
elif 'OTB' in self.mode:
return self.track_otb(img, track_cfg)
elif 'LT' in self.mode:
return self.track_lt(img, track_cfg)
# 在测试VOT数据集或评测其他短时序列时,只对搜索区域范围变化进行较弱的抑制
# 避免搜索区域过大时带着大量背景一起跟踪/过小时只能跟踪目标外观上的一小块区域,而无法恢复到正常的搜索范围
elif 'VOT' in self.mode:
return self.track_vot(img, track_cfg)
# 采用SiamFC的多尺度搜索策略,设置多个从大到小的搜索区域
elif 'NFS' in self.mode:
return self.track_nfs(img, track_cfg)
# 从KCF到SiamRPN++一路沿袭至今的后处理pipeline
else:
return self.track_normal(img, track_cfg)
def track_normal(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
# 确定搜索范围的大小,并计算如何将搜索patch坐标下相对位置box转化到整幅图像的绝对位置box
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'])
scales = s_x / self.search_size
offset = np.floor(self.center_pos - s_x / 2.)
offset = np.concatenate([offset, offset])
# 将搜索区域图像裁剪出来,并进行与训练时相同的数据预处理操作(rgb通道顺序转换、归一化等)
window = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
x_crop = rgb_normalize(window, mobilenet=False)
# Inference
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v[0],
self.zf_t[1]: self.zf_v[1],
self.zf_t[2]: self.zf_v[2],
self.zf_t[3]: self.zf_v[3],
self.zf_t[4]: self.zf_v[4],
self.zf_t[5]: self.zf_v[5],
K.learning_phase(): 0})
# 将搜索patch坐标下相对位置box转化到整幅图像的绝对位置box,并防止box超出图像范围
score = score[0]
pred_lrtb = pred_lrtb[0]
pred_corner = pred_lrtb * scales # 在搜索区域上的位置
pred_corner = pred_corner + offset # 在原图像上的位置
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
# 先加尺寸抑制再加余弦窗抑制
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
# 最高得分处即为目标中心所在位置
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
# 对box尺寸进行滑动平均
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
# demo模式下先显示,再传回box
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(30) & 0xff
return bbox, bbox_xywh, k
# 其他不可视的模式下,返回最高得分与rect形式的box
else:
score_ = score.reshape((-1,))
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': score_[index]}
return outputs
def track_vot(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'])
if s_x < self.s_x0 * 0.40:
s_x = s_x * 1.15
if s_x > self.s_x0 * 3.5:
s_x = s_x * 0.90
scales = s_x / self.search_size
offset = np.floor(self.center_pos - s_x / 2.)
offset = np.concatenate([offset, offset])
window = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
x_crop = rgb_normalize(window, mobilenet=False)
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v[0],
self.zf_t[1]: self.zf_v[1],
self.zf_t[2]: self.zf_v[2],
self.zf_t[3]: self.zf_v[3],
self.zf_t[4]: self.zf_v[4],
self.zf_t[5]: self.zf_v[5],
K.learning_phase(): 0})
score = score[0]
pred_lrtb = pred_lrtb[0]
pred_corner = pred_lrtb * scales
pred_corner = pred_corner + offset
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(30) & 0xff
return bbox, bbox_xywh, k
else:
score_ = score.reshape((-1,))
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': score_[index]}
return outputs
def track_otb(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'])
if s_x < self.s_x0 * 0.40:
if s_x < self.s_x0 * 0.25:
s_x = self.s_x0 * 0.35
else:
s_x = s_x * 1.15
if s_x > self.s_x0 * 3.0:
if s_x > self.s_x0 * 5.0:
s_x = s_x * 0.75
else:
s_x = s_x * 0.90
scales = s_x / self.search_size
offset = np.floor(self.center_pos - s_x / 2.)
offset = np.concatenate([offset, offset])
# 将搜索区域图像裁剪出来,并进行与训练相同的数据预处理操作(rgb通道顺序转换、归一化等)
window = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
# window, _ = random_crop_local(img, self.box, self.search_size, self.crop_settings_search)
x_crop = rgb_normalize(window, mobilenet=False)
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v[0],
self.zf_t[1]: self.zf_v[1],
self.zf_t[2]: self.zf_v[2],
self.zf_t[3]: self.zf_v[3],
self.zf_t[4]: self.zf_v[4],
self.zf_t[5]: self.zf_v[5],
K.learning_phase(): 0})
score = score[0]
pred_lrtb = pred_lrtb[0]
pred_corner = pred_lrtb * scales # 在搜索区域上的位置
pred_corner = pred_corner + offset # 在原图像上的位置
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
score_ = np.reshape(score, (-1,))
max_score = np.max(score_)
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
if self.success >= 0:
if max_score < 0.6:
self.success -= 1
self.center_pos_lost = self.center_pos
self.size_lost = self.size
self.s_x_lost = s_x
self.lost_frame = 0
else:
self.success = 1
else:
if self.lost_frame > 3:
search_patches, offset_, scale_, num = build_detect_area(img, self.lost_frame, self.center_pos_lost,
self.s_x_lost, self.search_size, max_num=64)
zf_v = []
for i in range(len(self.zf_v)):
zf_v_ = self.zf_v[i].copy()
zf_v_ = np.tile(zf_v_, (num, 1, 1))
zf_v.append(zf_v_)
score_, pred_lrtb_ = self.sess.run([self.score, self.bbox], feed_dict={self.x: search_patches,
self.zf_t[0]: zf_v[0],
self.zf_t[1]: zf_v[1],
self.zf_t[2]: zf_v[2],
self.zf_t[3]: zf_v[3],
self.zf_t[4]: zf_v[4],
self.zf_t[5]: zf_v[5],
K.learning_phase(): 0})
pred_corner_ = pred_lrtb_ * scale_ # 在搜索区域上的位置
pred_corner_ = pred_corner_ + offset_ # 在原图像上的位置
pred_xywh_ = corner2center(pred_corner_)
max_ = score_.max(axis=(1, 2))
if max_.max() > 0.95:
index = max_.argmax()
max_search_score = score_[index]
max_search_score = np.reshape(max_search_score, (-1,))
max_pred_xywh_ = np.reshape(pred_xywh_[index], (-1, 4))
index = np.argmax(max_search_score)
bbox_xywh = max_pred_xywh_[index, :]
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
self.success = 1
self.lost_frame = 0
else:
self.lost_frame += 1
else:
self.lost_frame += 1
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(15) & 0xff
return bbox, bbox_xywh, k
else:
score_ = score.reshape((-1,))
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': score_[index]}
return outputs
def track_uav(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'])
scales = s_x / self.search_size
offset = np.floor(self.center_pos - s_x / 2.)
offset = np.concatenate([offset, offset])
window = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
x_crop = rgb_normalize(window, mobilenet=False)
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v[0],
self.zf_t[1]: self.zf_v[1],
self.zf_t[2]: self.zf_v[2],
self.zf_t[3]: self.zf_v[3],
self.zf_t[4]: self.zf_v[4],
self.zf_t[5]: self.zf_v[5],
K.learning_phase(): 0})
score = score[0]
pred_lrtb = pred_lrtb[0]
pred_corner = pred_lrtb * scales
pred_corner = pred_corner + offset
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
score_ = np.reshape(score, (-1,))
max_score = np.max(score_)
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
if self.success >= 0:
if max_score < 0.6:
self.success -= 1
self.center_pos_lost = self.center_pos
self.size_lost = self.size
self.s_x_lost = s_x
self.lost_frame = 0
else:
self.success = 1
else:
search_patches, offset_, scale_, num = build_detect_area(img, self.lost_frame, self.center_pos_lost,
self.s_x_lost, self.search_size, max_num=64)
zf_v = []
for i in range(len(self.zf_v)):
zf_v_ = self.zf_v[i].copy()
zf_v_ = np.tile(zf_v_, (num, 1, 1))
zf_v.append(zf_v_)
score_, pred_lrtb_ = self.sess.run([self.score, self.bbox], feed_dict={self.x: search_patches,
self.zf_t[0]: zf_v[0],
self.zf_t[1]: zf_v[1],
self.zf_t[2]: zf_v[2],
self.zf_t[3]: zf_v[3],
self.zf_t[4]: zf_v[4],
self.zf_t[5]: zf_v[5],
K.learning_phase(): 0})
pred_corner_ = pred_lrtb_ * scale_
pred_corner_ = pred_corner_ + offset_
pred_xywh_ = corner2center(pred_corner_)
max_ = score_.max(axis=(1, 2))
if max_.max() > 0.95:
index = max_.argmax()
max_search_score = score_[index]
max_search_score = np.reshape(max_search_score, (-1,))
max_pred_xywh_ = np.reshape(pred_xywh_[index], (-1, 4))
index = np.argmax(max_search_score)
bbox_xywh = max_pred_xywh_[index, :]
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
self.success = 1
self.lost_frame = 0
else:
self.lost_frame += 1
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(15) & 0xff
return bbox, bbox_xywh, k
else:
score_ = score.reshape((-1,))
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': score_[index]}
return outputs
def track_lt(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'])
if s_x < self.s_x0 * 0.35:
if s_x < self.s_x0 * 0.25:
s_x = self.s_x0 * 0.30
else:
s_x = s_x * 1.15
if s_x > self.s_x0 * 3.5:
s_x = s_x * 0.90
scales = s_x / self.search_size
offset = np.floor(self.center_pos - s_x / 2.)
offset = np.concatenate([offset, offset])
window = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
x_crop = rgb_normalize(window, mobilenet=False)
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v[0],
self.zf_t[1]: self.zf_v[1],
self.zf_t[2]: self.zf_v[2],
self.zf_t[3]: self.zf_v[3],
self.zf_t[4]: self.zf_v[4],
self.zf_t[5]: self.zf_v[5],
K.learning_phase(): 0})
score = score[0]
pred_lrtb = pred_lrtb[0]
pred_corner = pred_lrtb * scales
pred_corner = pred_corner + offset
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
score_ = np.reshape(score, (-1,))
max_score = np.max(score_)
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
if self.success >= 0:
out_score = score.max()
if max_score < 0.7:
self.success -= 1
self.center_pos_lost = self.center_pos
self.size_lost = self.size
self.s_x_lost = s_x
self.lost_frame = 0
else:
self.success = 1
else:
search_patches, offset_, scale_, num = build_detect_area(img, self.lost_frame, self.center_pos_lost,
self.s_x_lost, self.search_size, max_num=64)
zf_v = []
for i in range(len(self.zf_v)):
zf_v_ = self.zf_v[i].copy()
zf_v_ = np.tile(zf_v_, (num, 1, 1))
zf_v.append(zf_v_)
score_, pred_lrtb_ = self.sess.run([self.score, self.bbox], feed_dict={self.x: search_patches,
self.zf_t[0]: zf_v[0],
self.zf_t[1]: zf_v[1],
self.zf_t[2]: zf_v[2],
self.zf_t[3]: zf_v[3],
self.zf_t[4]: zf_v[4],
self.zf_t[5]: zf_v[5],
K.learning_phase(): 0})
pred_corner_ = pred_lrtb_ * scale_
pred_corner_ = pred_corner_ + offset_
pred_xywh_ = corner2center(pred_corner_)
max_ = score_.max(axis=(1, 2))
out_score = max_.max()
if out_score > 0.95:
index = max_.argmax()
max_search_score = score_[index]
max_search_score = np.reshape(max_search_score, (-1,))
max_pred_xywh_ = np.reshape(pred_xywh_[index], (-1, 4))
index = np.argmax(max_search_score)
bbox_xywh = max_pred_xywh_[index, :]
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
self.success = 1
self.lost_frame = 0
else:
self.lost_frame += 1
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(15) & 0xff
return bbox, bbox_xywh, k
else:
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': max(out_score, max_score)}
return outputs
def track_nfs(self, img, track_cfg):
context_amount = track_cfg.context_amount
penalty_k = track_cfg.penalty_k
window_influence = track_cfg.window_influence
lr = track_cfg.size_lr
s_xs = []
windows = []
for ratio in self.ratios:
s_x = round(np.sqrt(np.prod(self.size + context_amount * np.sum(self.size))) *
self.crop_settings_search['crop_size_rate'] * ratio)
if s_x < self.s_x0 * 0.25:
if s_x < self.s_x0 * 0.10:
s_x = self.s_x0 * 0.15
else:
s_x = s_x * 1.12
window_ = get_subwindow(img, self.center_pos, self.search_size, s_x, self.channel_average)
windows.append(window_)
s_xs.append(s_x)
windows = np.concatenate(windows, axis=0)
x_crop = rgb_normalize(windows, mobilenet=False)
s_xs = np.array(s_xs)
scales = s_xs / self.search_size # (n, )
scales = scales[:, None, None, None]
s_xs = s_xs.reshape((-1, 1))
offset = np.floor(self.center_pos.reshape((1, -1)) - s_xs / 2.)
offset = np.concatenate([offset, offset], axis=-1)[:, None, None, :] # (n, 32, 32, 4)
score, pred_lrtb = self.sess.run([self.score, self.bbox], feed_dict={self.x: x_crop,
self.zf_t[0]: self.zf_v_[0],
self.zf_t[1]: self.zf_v_[1],
self.zf_t[2]: self.zf_v_[2],
self.zf_t[3]: self.zf_v_[3],
self.zf_t[4]: self.zf_v_[4],
self.zf_t[5]: self.zf_v_[5],
K.learning_phase(): 0})
pred_corner = pred_lrtb * scales
pred_corner = pred_corner + offset
pred_xywh = corner2center(pred_corner)
pred_xywh = clip_bbox_center(pred_xywh, self.image_shape)
s_c = change(sz(pred_xywh[..., 2], pred_xywh[..., 3], context_amount=context_amount) /
(sz(self.size[0], self.size[1], context_amount=context_amount)))
r_c = change((self.size[0] / self.size[1]) / (pred_xywh[..., 2] / pred_xywh[..., 3]))
penalty = np.exp(-(r_c * s_c - 1) * penalty_k)
pscore = penalty * score
wscore = (1. - window_influence) * pscore + window_influence * self.window
wscore_ = np.reshape(wscore, (-1,))
index = np.argmax(wscore_)
pred_xywh_ = np.reshape(pred_xywh, (-1, 4))
bbox_xywh = pred_xywh_[index, :]
bbox = np.concatenate([bbox_xywh[:2] - bbox_xywh[2:] / 2., bbox_xywh[:2] + bbox_xywh[2:] / 2.])
bbox_xywh[2:] = self.size * (1 - lr) + bbox_xywh[2:] * lr
self.center_pos = bbox_xywh[:2]
self.size = bbox_xywh[2:]
if self.video_name is not None:
box = list(map(int, bbox))
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imshow(self.video_name, img)
k = cv2.waitKey(30) & 0xff
return bbox, bbox_xywh, k
else:
score_ = score.reshape((-1,))
outputs = {'bbox': [bbox[0], bbox[1], bbox_xywh[2], bbox_xywh[3]],
'best_score': score_[index]}
return outputs
def build_detect_area(img, lost_frame, lost_center, s_x, search_size, max_num=64):
sx_ratio = np.concatenate([np.logspace(np.log10(1.0), np.log10(1.04), num=15),
np.logspace(np.log10(1.04), np.log10(1.08), num=20),
np.logspace(np.log10(1.08), np.log10(1.12), num=20),
np.logspace(np.log10(1.12), np.log10(1.15), num=35)])
search_ratio = np.concatenate([np.logspace(np.log10(2.), np.log10(3.), num=15),
np.logspace(np.log10(3.), np.log10(4.), num=20),
np.logspace(np.log10(4.), np.log10(5.), num=20),
np.logspace(np.log10(5.), np.log10(8.), num=35)])
num_patches = [5, 6, 7, 8]
ih, iw = img.shape[:2]
img_area = np.array([5., 5., iw - 5., ih - 5.], dtype=np.float32)
ix1, iy1, ix2, iy2 = img_area
"""
随着丢失时间的增长,不断扩大搜索范围,直到在整幅图像中进行检测搜索
"""
if lost_frame <= 15:
num_patch = num_patches[0]
elif lost_frame <= 35:
num_patch = num_patches[1]
elif lost_frame <= 55:
num_patch = num_patches[2]
elif lost_frame <= 90:
num_patch = num_patches[3]
else:
num_patch = None
if lost_frame >= 90:
search_area = img_area
s_x = s_x * sx_ratio[-1]
else:
s_x = s_x * sx_ratio[lost_frame]
S_X = s_x * search_ratio[lost_frame]
search_area_xywh = np.array([lost_center[0], lost_center[1], S_X, S_X], dtype=np.float32)
x1, y1, x2, y2 = center2corner(search_area_xywh)
x1_, y1_, x2_, y2_ = x1, y1, x2, y2
# 当检测区域有超过图像边界时,在内部进行一定补偿
if x1 < ix1 and x2 < ix2:
x2_ = x2 + (ix1 - x1) * 0.4
if x1 > ix1 and x2 > ix2:
x1_ = x1 + (x2 - ix2) * 0.4
if y1 < iy1 and y2 < iy2:
y2_ = y2 + (iy1 - y1) * 0.4
if y1 > iy1 and y2 > iy2:
y1_ = y1 + (y2 - iy2) * 0.4
search_area_ = np.array([x1_, y1_, x2_, y2_], dtype=np.float32)
search_area = np.concatenate \
([np.maximum(search_area_[:2], img_area[:2]), np.minimum(search_area_[2:], img_area[2:])])
return create_search_patches(img, s_x, search_area, search_size, num_patch=num_patch, max_num=max_num)
def find_center_pos(length, s_x, num=None):
if num is None:
num = np.ceil(length / s_x)
center = np.linspace(start=s_x / 2 + 5., stop=length - s_x / 2 - 5., num=int(num))
return center
def fix_temp_area(bbox):
ratio = max(bbox[2], bbox[3]) / min(bbox[2], bbox[3])
area = bbox[2] * bbox[3]
rate = 1.0
if ratio < 1.05:
if 250 ** 2 <= area < 270 ** 2:
rate = 0.6
elif ratio <= 1.16:
if area < 30 ** 2:
rate = 0.6
elif 1.30 <= ratio < 1.50:
if area >= 100 ** 2:
rate = 0.65
elif 1.80 <= ratio < 2.20:
if area < 100 ** 2:
rate = 0.6
elif 2.20 <= ratio <= 2.70:
if area >= 100 ** 2:
rate = 0.6
return rate
def create_search_patches(img, s_x, search_area, search_size, num_patch, max_num):
x_center = find_center_pos(search_area[2] - search_area[0], s_x, num_patch) + search_area[0]
y_center = find_center_pos(search_area[3] - search_area[1], s_x, num_patch) + search_area[1]
x_center, y_center = np.meshgrid(x_center, y_center)
center = np.concatenate((x_center[..., None], y_center[..., None]), axis=-1)
center = np.reshape(center, (-1, 2))
num = center.shape[0]
if num > max_num:
slt = np.arange(num)
random.shuffle(slt)
slt = slt[:max_num]
center = center[slt]
num = max_num
size = np.ones((num, 2)) * s_x
box_center = np.concatenate([center, size], axis=-1)
box = center2corner(box_center).astype(np.int32)
box = clip_bbox_corner(box, img.shape[:2])
search_patches_ = []
for i in range(num):
search_patch = img[int(box[i, 1]): int(box[i, 3]), int(box[i, 0]): int(box[i, 2]), :]
search_patch = cv2.resize(search_patch, (search_size, search_size), interpolation=cv2.INTER_CUBIC)
search_patches_.append(search_patch)
search_patches = np.array(search_patches_).astype(np.float32)
search_patches = rgb_normalize(search_patches, mobilenet=False)
scale = s_x / search_size
offset = np.concatenate([box[:, :2], box[:, :2]], axis=-1)[:, None, None, :]
return search_patches, offset, scale, num
|
# Template project parameter file VLBAContPipe
# Generate parameter file using VLBACal.VLBAMakeParmFile
#
# Substitutions surrounded by 'at' characters
# PROJECT Project name (up to 12 char)
# SESSION Session code
# BAND Band code
# UVFITS Name of uvfits file in $FITS
# IDIFITS List of IDIFITS files in $FITS
# CALINT CL table interval in min
# DESTDIR Output directory
# ARCHFILEID Archive file ID
# DOLOADIDI Load IDI FITS files
# DOLOADFITS Load UV FITS files
#--------------------------------------------------------------------------------------------
project = "@PROJECT@" # Project name (12 char or less, used as AIPS Name)
session = "@SESSION@" # Project session code
band = "@BAND@" # Observing band
archFileID = @ARCHFILEID@ # Archive File ID
logFile = project+"_"+session+"_"+band+".log" # Processing log file
# Destination directory for copying output files. empty string -> do not copy
parms["copyDestDir"] = "@DESTDIR@/" + project + session + band
# NOTE: these files must be in $FITS directory!
dataInUVF = "@UVFITS@" # UVFITS data file name
dataInIDI = @IDIFITS@ # List of IDIFITS data files
calInt = @CALINT@ # Calibration table interval in min.
Compress = True # Use compressed UV data?
# Quantization correction
parms["QuantFlag"] = 0.8 # If >0, flag solutions < QuantFlag (use 0.9 for 1 bit, 0.8 for 2 bit)
# Specify calibration/target sources. (When initialized, both parameters
# default to all sources, so leave commented here until ready to use.)
#parms["contCals"] = [] # list of calibrators
#parms["targets"] = [] # targets, empty = all
parms["contCalModel"] = None # List of models
parms["refAnts"] = [2,4,5,8,9] # List of acceptable reference antennas for fringe fitting
# Final Image/Clean
parms["Stokes"] = "I" # Stokes to image
# Control
T = True
F = False
check = F # Only check script, don't execute tasks
debug = F # run tasks debug
doLoadIDI = @DOLOADIDI@ # Load data from IDI FITS?, else already in AIPS?
doLoadUVF = @DOLOADUVF@ # Load the "AIPS Friendly" UV FITS version?
parms["doClearTab"] = T # Clear cal/edit tables
parms["doCopyFG"] = T # Copy FG 1 to FG 2
parms["doEditList"] = T # Edit using editList?
parms["doQuack"] = T # Quack data?
parms["doQuantCor"] = T # Quantization correction/flagging?
parms["doPACor"] = T # Make parallactic angle correction?
parms["doOpacCor"] = T # Make Opacity/Tsys/gain correction?
parms["doFindOK"] = T # Search for OK cals if contCals not given
parms["doFindCal"] = T # Search for best calibration/reference antenna
parms["doPCcor"] = T # Apply PC table?
parms["doManPCal"] = T # Determine and apply manual phase cals?
parms["doBPCal"] = T # Determine Bandpass calibration
parms["doImgCal"] = T # Image calibrators
parms["doDelayCal"] = T # Determine/apply delays from contCals
parms["doAmpCal"] = T # Determine/smooth/apply amplitudes from contCals
parms["doCalAvg"] = T # calibrate and average
parms["doImgTarget"] = T # Image targets?
parms["doPhaseCal"] = T # Phase calibrate all data with self-cal?
parms["doImgFullTarget"] = T # Final Image/Clean/selfcal ?
parms["doSaveUV"] = T # Save UV (calibrated and averaged) results?
parms["doSaveImg"] = T # Save image results?
parms["doSaveTab"] = T # Save calibration and editing tables?
parms["doCleanup"] = T # Cleanup AIPS directories?
# diagnostics/reports
parms["doSNPlot"] = T # Plot SN tables etc
parms["doPCPlot"] = T # Plot PC results?
parms["doSpecPlot"] = T # Plot the amp. and phase across the spectrum
parms["doDiagPlots"] = T # Source plots
parms["doKntrPlots"] = T # Contour plots
parms["doMetadata"] = T # Generate metadata dictionaries
parms["doVOTable"] = T # VOTable report
parms["doHTML"] = T # Generate HTML report?
|
<filename>angr-management/angrmanagement/logic/threads.py
import thread
import threading
from PySide.QtCore import QEvent, QCoreApplication
from . import GlobalInfo
class ExecuteCodeEvent(QEvent):
def __init__(self, callable, args=None):
super(ExecuteCodeEvent, self).__init__(QEvent.User)
self.callable = callable
self.args = args
self.event = threading.Event()
self.result = None
self.exception = None
def execute(self):
if self.args is None:
self.callable()
else:
self.callable(*self.args)
class GUIObjProxy(object):
"""
Derived from http://code.activestate.com/recipes/496741-object-proxying/
"""
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattribute__(self, name):
result = gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name))
if result is None:
return result
if type(result) in [int, float, str, bool]:
return result
return GUIObjProxy(result)
def __delattr__(self, name):
gui_thread_schedule(lambda: delattr(object.__getattribute__(self, "_obj"), name))
def __setattr__(self, name, value):
gui_thread_schedule(lambda: setattr(object.__getattribute__(self, "_obj"), name, value))
def __nonzero__(self):
return gui_thread_schedule(lambda: bool(object.__getattribute__(self, "_obj")))
def __str__(self):
return gui_thread_schedule(lambda: str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return gui_thread_schedule(lambda: repr(object.__getattribute__(self, "_obj")))
#
# factories
#
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""
Creates a proxy for the given class.
"""
def make_method(name):
def method(self, *args, **kw):
return gui_thread_schedule(lambda: getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw))
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
def is_gui_thread():
return thread.get_ident() == GlobalInfo.gui_thread
def gui_thread_schedule(callable, args=None):
if is_gui_thread():
if args is None:
return callable()
else:
return callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
event.event.wait() # TODO: unsafe. to be fixed later.
if event.exception is not None:
raise event.exception[0], event.exception[1], event.exception[2]
return event.result
def gui_thread_schedule_async(callable, args=None):
if is_gui_thread():
if args is None:
callable()
else:
callable(*args)
event = ExecuteCodeEvent(callable, args)
QCoreApplication.postEvent(GlobalInfo.main_window, event)
|
"""
raven.handlers.logging
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import sys
import traceback
from raven.base import Client
from raven.utils.encoding import to_string
from raven.utils.stacks import iter_stack_frames
class SentryHandler(logging.Handler, object):
def __init__(self, *args, **kwargs):
client = kwargs.get('client_cls', Client)
if len(args) == 1:
arg = args[0]
if isinstance(arg, basestring):
self.client = client(dsn=arg)
elif isinstance(arg, Client):
self.client = arg
else:
raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % (
self.__class__.__name__,
arg,
))
elif 'client' in kwargs:
self.client = kwargs['client']
elif len(args) == 2 and not kwargs:
servers, key = args
self.client = client(servers=servers, key=key)
else:
self.client = client(*args, **kwargs)
logging.Handler.__init__(self)
def emit(self, record):
# from sentry.client.middleware import SentryLogMiddleware
# # Fetch the request from a threadlocal variable, if available
# request = getattr(SentryLogMiddleware.thread, 'request', None)
self.format(record)
# Avoid typical config issues by overriding loggers behavior
if record.name.startswith('sentry.errors'):
print >> sys.stderr, to_string(record.message)
return
try:
return self._emit(record)
except Exception:
print >> sys.stderr, "Top level Sentry exception caught - failed creating log record"
print >> sys.stderr, to_string(record.msg)
print >> sys.stderr, to_string(traceback.format_exc())
try:
self.client.capture('Exception')
except Exception:
pass
def _emit(self, record, **kwargs):
data = {}
for k, v in record.__dict__.iteritems():
if '.' not in k and k not in ('culprit',):
continue
data[k] = v
stack = getattr(record, 'stack', None)
if stack is True:
stack = iter_stack_frames()
if stack:
frames = []
started = False
last_mod = ''
for item in stack:
if isinstance(item, (list, tuple)):
frame, lineno = item
else:
frame, lineno = item, item.f_lineno
if not started:
f_globals = getattr(frame, 'f_globals', {})
module_name = f_globals.get('__name__', '')
if last_mod.startswith('logging') and not module_name.startswith('logging'):
started = True
else:
last_mod = module_name
continue
frames.append((frame, lineno))
stack = frames
extra = getattr(record, 'data', {})
# Add in all of the data from the record that we aren't already capturing
for k in record.__dict__.keys():
if k in ('stack', 'name', 'args', 'msg', 'levelno', 'exc_text', 'exc_info', 'data', 'created', 'levelname', 'msecs', 'relativeCreated'):
continue
if k.startswith('_'):
continue
extra[k] = record.__dict__[k]
date = datetime.datetime.utcfromtimestamp(record.created)
# If there's no exception being processed, exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info and all(record.exc_info):
handler = self.client.get_handler('raven.events.Exception')
data.update(handler.capture(exc_info=record.exc_info))
data['checksum'] = handler.get_hash(data)
data['level'] = record.levelno
data['logger'] = record.name
return self.client.capture('Message', message=record.msg, params=record.args,
stack=stack, data=data, extra=extra,
date=date, **kwargs)
|
"""
Adapted from https://github.com/kuangliu/pytorch-cifar/blob/master/models/preact_resnet.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import pandas as pd
from numbers import Number
from .layers import Conv2d
from .base import RegressionModel, ClassificationModel
from .dense_nets import LinearPrior
from .. import prior
__all__ = ('Conv2dPrior', 'PreActResNet18', 'PreActResNet34', 'ClassificationConvNet', 'CorrelatedClassificationConvNet', 'ThinPreActResNet18', 'DataDrivenPreActResNet18')
def Conv2dPrior(in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, padding_mode='zeros',
prior_w=prior.Normal, loc_w=0., std_w=1., prior_b=prior.Normal,
loc_b=0., std_b=1., scaling_fn=None, weight_prior_params={}, bias_prior_params={}):
if scaling_fn is None:
def scaling_fn(std, dim):
return std/dim**0.5
in_dim = in_channels * kernel_size**2
kernel_size = nn.modules.utils._pair(kernel_size)
bias_prior = prior_b((out_channels,), 0., std_b, **bias_prior_params) if prior_b is not None else None
return Conv2d(weight_prior=prior_w((out_channels, in_channels//groups, kernel_size[0], kernel_size[1]),
loc_w, scaling_fn(std_w, in_channels), # TODO: use `in_dim` here to prevent the variance from blowing up
**weight_prior_params),
bias_prior=bias_prior,
stride=stride, padding=padding, dilation=dilation,
groups=groups, padding_mode=padding_mode)
class Reshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
def ClassificationConvNet(in_channels, img_height, out_features, width, depth=3, softmax_temp=1.,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, weight_prior_params={}, bias_prior_params={}):
assert depth >= 2, "We can't have less than two layers"
layers = [Reshape(-1, in_channels, img_height, img_height),
Conv2dPrior(in_channels, width, kernel_size=3, padding=1, prior_w=prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params),
nn.ReLU(), nn.MaxPool2d(2)]
for _ in range(depth-2):
layers.append(Conv2dPrior(width, width, kernel_size=3, padding=1, prior_w=prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(2))
layers.append(nn.Flatten())
reshaped_size = width*(img_height//2**(depth-1))**2
layers.append(LinearPrior(reshaped_size, out_features, prior_w=prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params))
return ClassificationModel(nn.Sequential(*layers), softmax_temp)
def CorrelatedClassificationConvNet(in_channels, img_height, out_features, width, depth=3, softmax_temp=1.,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, weight_prior_params={}, bias_prior_params={}):
# This is the same as `ClassificationConvNet`, but with the `ConvCorrelatedNormal` prior. The scaling is chosen
# to be such that the same prior is obtained when no correlation is given.
assert depth >= 2, "We can't have less than two layers"
# TODO: ideally we should be able to specify different priors for conv weight and dense weight
# But for now it probably suffices to be able to change the conv weight prior to try different ones
conv_prior_w = prior_w
prior_w = prior.Normal
conv_weight_prior_params_1 = {}
if "lengthscale_1" in weight_prior_params:
conv_weight_prior_params_1["lengthscale"] = weight_prior_params["lengthscale_1"]
conv_weight_prior_params_2 = {}
if "lengthscale_2" in weight_prior_params:
conv_weight_prior_params_2["lengthscale"] = weight_prior_params["lengthscale_2"]
weight_prior_params = {k: v for k, v in weight_prior_params.items()
if k not in ["lengthscale_1", "lengthscale_2"]}
layers = [Reshape(-1, in_channels, img_height, img_height),
Conv2dPrior(in_channels, width, kernel_size=3, padding=1, prior_w=conv_prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=conv_weight_prior_params_1,
bias_prior_params=bias_prior_params),
nn.ReLU(), nn.MaxPool2d(2)]
for _ in range(depth-2):
layers.append(Conv2dPrior(width, width, kernel_size=3, padding=1, prior_w=conv_prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=conv_weight_prior_params_2,
bias_prior_params=bias_prior_params))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(2))
layers.append(nn.Flatten())
reshaped_size = width*(img_height//2**(depth-1))**2
layers.append(LinearPrior(reshaped_size, out_features, prior_w=prior_w, loc_w=loc_w,
std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,
scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params))
return ClassificationModel(nn.Sequential(*layers), softmax_temp)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, bn=True,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, weight_prior_params={}, bias_prior_params={}):
super(PreActBlock, self).__init__()
if bn:
batchnorm = nn.BatchNorm2d
else:
batchnorm = nn.Identity
self.bn1 = batchnorm(in_planes)
self.conv1 = Conv2dPrior(in_planes, planes, kernel_size=3, stride=stride, padding=1,
prior_w=prior_w, loc_w=loc_w, std_w=std_w,
prior_b=None, scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params)
self.bn2 = batchnorm(planes)
self.conv2 = Conv2dPrior(planes, planes, kernel_size=3, stride=1, padding=1,
prior_w=prior_w, loc_w=loc_w, std_w=std_w,
prior_b=None, scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
Conv2dPrior(in_planes, self.expansion*planes, kernel_size=1, stride=stride,
prior_w=prior_w, loc_w=loc_w, std_w=std_w,
prior_b=None, scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params)
)
else:
self.shortcut = (lambda x: x)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, bn=True,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
in_planes=64, scaling_fn=None,
weight_prior_params={}, bias_prior_params={}):
super(PreActResNet, self).__init__()
self.in_planes = in_planes
self.bn = bn
self.prior_w = prior_w
self.loc_w = loc_w
self.std_w = std_w
self.prior_b = prior_b
self.loc_b = loc_b
self.std_b = std_b
self.scaling_fn = scaling_fn
self.weight_prior_params = weight_prior_params
self.bias_prior_params = bias_prior_params
if prior_w in [prior.ConvCorrelatedNormal, prior.FixedCovNormal]:
dense_prior_w = prior.Normal
elif prior_w == prior.FixedCovGenNorm:
dense_prior_w = prior.GenNorm
else:
dense_prior_w = prior_w
# `self.in_planes` gets modified, so we use `in_planes`.
self.conv1 = Conv2dPrior(3, in_planes, kernel_size=3, stride=1, padding=1, prior_b=None,
prior_w=self.prior_w, loc_w=self.loc_w, std_w=self.std_w,
scaling_fn=self.scaling_fn, weight_prior_params=self.weight_prior_params,
bias_prior_params=self.bias_prior_params)
self.layer1 = self._make_layer(block, in_planes, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 2 * in_planes, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 4 * in_planes, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 8 * in_planes, num_blocks[3], stride=2)
self.linear = LinearPrior(8 * in_planes, num_classes,
prior_w=dense_prior_w, loc_w=self.loc_w, std_w=self.std_w,
prior_b=self.prior_b, loc_b=self.loc_b, std_b=self.std_b,
scaling_fn=self.scaling_fn, weight_prior_params=self.weight_prior_params,
bias_prior_params=self.bias_prior_params)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, bn=self.bn,
prior_w=self.prior_w, loc_w=self.loc_w, std_w=self.std_w,
prior_b=self.prior_b, loc_b=self.loc_b, std_b=self.std_b,
scaling_fn=self.scaling_fn, weight_prior_params=self.weight_prior_params,
bias_prior_params=self.bias_prior_params))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18(softmax_temp=1., width=64,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, bn=True, weight_prior_params={}, bias_prior_params={}):
load_file_keys = ['lengthscale_dict_file']
load_file = {k: v for k, v in weight_prior_params.items() if k in load_file_keys}
weight_prior_params = {k: v for k, v in weight_prior_params.items() if k not in load_file_keys}
model = ClassificationModel(PreActResNet(PreActBlock,
[2,2,2,2], bn=bn,
prior_w=prior_w,
loc_w=loc_w,
std_w=std_w,
prior_b=prior_b,
loc_b=loc_b,
std_b=std_b,
scaling_fn=scaling_fn, in_planes=width,
weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params), softmax_temp)
if 'lengthscale_dict_file' in load_file:
lengthscale_dict = pd.read_pickle(load_file['lengthscale_dict_file'])
sd = model.state_dict()
for k, v in lengthscale_dict.items():
assert k.startswith("net.module.") and k.endswith(".p")
new_k = "net." + k[len("net.module."):-len(".p")] + ".lengthscale"
sd[ new_k ][...] = v
model.load_state_dict(sd)
return model
def DataDrivenPreActResNet18(softmax_temp=1., width=64,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, bn=True, weight_prior_params={}, bias_prior_params={}):
assert scaling_fn is None
scaling_fn = (lambda std, dim: std)
load_file_keys = ['mean_covs_file', 'fits_dict_file']
load_file = {k: v for k, v in weight_prior_params.items() if k in load_file_keys}
weight_prior_params = {k: v for k, v in weight_prior_params.items() if k not in load_file_keys}
model = ClassificationModel(PreActResNet(PreActBlock,
[2,2,2,2], bn=bn,
prior_w=prior_w,
loc_w=loc_w,
std_w=std_w,
prior_b=prior_b,
loc_b=loc_b,
std_b=std_b,
scaling_fn=scaling_fn, in_planes=width,
weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params), softmax_temp)
if 'mean_covs_file' in load_file:
loaded_keys = set()
mean_covs = pd.read_pickle(load_file['mean_covs_file'])
prior_modules = {("net." + k[len("net.module."):-len(".p")]): k
for k in mean_covs.keys()}
for name, mod in model.named_modules():
if name not in prior_modules:
continue
key = prior_modules[name]
mean, cov = mean_covs[key]
loaded_keys.add(key)
if isinstance(mean, Number):
assert mod.loc.numel() == 1
mod.loc[...] = float(mean)
else:
assert mod.loc.shape == mean.shape
mod.loc[...] = torch.from_numpy(mean)
if isinstance(cov, Number):
assert mod.scale.numel() == 1
mod.scale[...] = cov**.5
else:
assert mod.scale.shape == cov.shape
mod.assign_cov(torch.from_numpy(cov))
assert loaded_keys == set(mean_covs.keys())
if 'fits_dict_file' in load_file:
assert prior_w == prior.FixedCovGenNorm
loaded_keys = set()
_, fits_dict = pd.read_pickle(load_file['fits_dict_file'])
prior_modules = {("net." + k[len("net.module."):-len(".p")]): k
for k in fits_dict.keys()}
for name, mod in model.named_modules():
if name not in prior_modules:
continue
key = prior_modules[name]
loaded_keys.add(key)
mod.beta[...] = fits_dict[key]["gennorm"][0]
if isinstance(mod, prior.FixedCovGenNorm):
mod.base_scale[...] = fits_dict[key]["gennorm"][2]
else:
assert isinstance(mod, prior.GenNorm)
mod.loc[...] = fits_dict[key]["gennorm"][1]
mod.scale[...] = fits_dict[key]["gennorm"][2]
assert loaded_keys == set(fits_dict.keys())
return model
def ThinPreActResNet18(softmax_temp=1.,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, bn=True,
weight_prior_params={}, bias_prior_params={}):
return ClassificationModel(PreActResNet(
PreActBlock, [2,2,2,2], bn=bn,
prior_w=prior_w, loc_w=loc_w, std_w=std_w,
prior_b=prior_b, loc_b=loc_b, std_b=std_b,
in_planes=16, scaling_fn=scaling_fn,
weight_prior_params=weight_prior_params, bias_prior_params=bias_prior_params),
softmax_temp)
def PreActResNet34(softmax_temp=1.,
prior_w=prior.Normal, loc_w=0., std_w=2**.5,
prior_b=prior.Normal, loc_b=0., std_b=1.,
scaling_fn=None, bn=True, weight_prior_params={}, bias_prior_params={}):
return ClassificationModel(PreActResNet(PreActBlock,
[3,4,6,3], bn=bn,
prior_w=prior_w,
loc_w=loc_w,
std_w=std_w,
prior_b=prior_b,
loc_b=loc_b,
std_b=std_b,
scaling_fn=scaling_fn,
weight_prior_params=weight_prior_params,
bias_prior_params=bias_prior_params), softmax_temp)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import sys
import threading
from dataclasses import asdict
from pprint import pformat
from typing import Dict, List, Optional, Type
import torchx.specs as specs
from pyre_extensions import none_throws
from torchx.cli.cmd_base import SubCommand
from torchx.cli.cmd_log import get_logs
from torchx.runner import Runner, config, get_runner
from torchx.schedulers import get_default_scheduler_name, get_scheduler_factories
from torchx.specs import CfgVal
from torchx.specs.finder import (
ComponentNotFoundException,
ComponentValidationException,
_Component,
get_builtin_source,
get_components,
)
from torchx.util.types import to_dict
logger: logging.Logger = logging.getLogger(__name__)
def _convert_to_option_type(
value: str, option_type: Type[specs.CfgVal]
) -> specs.CfgVal:
if option_type == bool:
return value.lower() == "true"
elif option_type == List[str]:
return value.split(";")
else:
# pyre-ignore[19]
return option_type(value)
def _parse_run_config(arg: str, scheduler_opts: specs.runopts) -> Dict[str, CfgVal]:
conf: Dict[str, CfgVal] = {}
if not arg:
return conf
for key, value in to_dict(arg).items():
option = scheduler_opts.get(key)
if option is None:
raise ValueError(f"Unknown {key}, run `torchx runopts` for more info")
option_type = option.opt_type
typed_value = _convert_to_option_type(value, option_type)
conf[key] = typed_value
return conf
class CmdBuiltins(SubCommand):
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
subparser.add_argument(
"--print",
type=str,
help="prints the builtin's component def to stdout",
)
def _builtins(self) -> Dict[str, _Component]:
return get_components()
def run(self, args: argparse.Namespace) -> None:
builtin_name = args.print
if not builtin_name:
builtin_components = self._builtins()
num_builtins = len(builtin_components)
print(f"Found {num_builtins} builtin components:")
for i, component in enumerate(builtin_components.values()):
print(f" {i + 1:2d}. {component.name}")
else:
print(get_builtin_source(builtin_name))
class CmdRun(SubCommand):
def __init__(self) -> None:
self._subparser: Optional[argparse.ArgumentParser] = None
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
scheduler_names = get_scheduler_factories().keys()
self._subparser = subparser
subparser.add_argument(
"-s",
"--scheduler",
type=str,
help=f"Name of the scheduler to use. One of: [{','.join(scheduler_names)}]",
default=get_default_scheduler_name(),
)
subparser.add_argument(
"-cfg",
"--scheduler_args",
type=str,
help="Arguments to pass to the scheduler (Ex:`cluster=foo,user=bar`)."
" For a list of scheduler run options run: `torchx runopts`"
"",
)
subparser.add_argument(
"--dryrun",
action="store_true",
default=False,
help="Does not actually submit the app,"
" just prints the scheduler request",
)
subparser.add_argument(
"--wait",
action="store_true",
default=False,
help="Wait for the app to finish before exiting.",
)
subparser.add_argument(
"--log",
action="store_true",
default=False,
help="Stream logs while waiting for app to finish.",
)
subparser.add_argument(
"conf_args",
nargs=argparse.REMAINDER,
)
def _run(self, runner: Runner, args: argparse.Namespace) -> None:
if args.scheduler == "local":
logger.warning(
"`local` scheduler is deprecated and will be"
" removed in the near future,"
" please use other variants of the local scheduler"
" (e.g. `local_cwd`)"
)
run_opts = get_runner().run_opts()
scheduler_opts = run_opts[args.scheduler]
cfg = _parse_run_config(args.scheduler_args, scheduler_opts)
config.apply(scheduler=args.scheduler, cfg=cfg)
if len(args.conf_args) < 1:
none_throws(self._subparser).error(
"the following arguments are required: conf_file, conf_args"
)
# Python argparse would remove `--` if it was the first argument. This
# does not work well for torchx, since torchx.specs.api uses another argparser to
# parse component arguments.
conf_file, conf_args = args.conf_args[0], args.conf_args[1:]
try:
if args.dryrun:
dryrun_info = runner.dryrun_component(
conf_file, conf_args, args.scheduler, cfg
)
logger.info(
"\n=== APPLICATION ===\n"
f"{pformat(asdict(dryrun_info._app), indent=2, width=80)}"
)
logger.info("\n=== SCHEDULER REQUEST ===\n" f"{dryrun_info}")
else:
app_handle = runner.run_component(
conf_file,
conf_args,
args.scheduler,
cfg,
)
# DO NOT delete this line. It is used by slurm tests to retrieve the app id
print(app_handle)
if args.scheduler.startswith("local"):
self._wait_and_exit(runner, app_handle, log=True)
else:
logger.info(f"Launched app: {app_handle}")
status = runner.status(app_handle)
logger.info(status)
logger.info(f"Job URL: {none_throws(status).ui_url}")
if args.wait:
self._wait_and_exit(runner, app_handle, log=args.log)
except (ComponentValidationException, ComponentNotFoundException) as e:
error_msg = f"\nFailed to run component `{conf_file}` got errors: \n {e}"
logger.error(error_msg)
sys.exit(1)
except specs.InvalidRunConfigException as e:
error_msg = (
f"Scheduler arg is incorrect or missing required option: `{e.cfg_key}`\n"
f"Run `torchx runopts` to check configuration for `{args.scheduler}` scheduler\n"
f"Use `-cfg` to specify run cfg as `key1=value1,key2=value2` pair\n"
"of setup `.torchxconfig` file, see: https://pytorch.org/torchx/main/experimental/runner.config.html"
)
logger.error(error_msg)
sys.exit(1)
def run(self, args: argparse.Namespace) -> None:
os.environ["TORCHX_CONTEXT_NAME"] = os.getenv("TORCHX_CONTEXT_NAME", "cli_run")
with get_runner() as runner:
self._run(runner, args)
def _wait_and_exit(self, runner: Runner, app_handle: str, log: bool) -> None:
logger.info("Waiting for the app to finish...")
log_thread = self._start_log_thread(runner, app_handle) if log else None
status = runner.wait(app_handle, wait_interval=1)
if not status:
raise RuntimeError(f"unknown status, wait returned {status}")
logger.info(f"Job finished: {status.state}")
if log_thread:
log_thread.join()
if status.state != specs.AppState.SUCCEEDED:
logger.error(status)
sys.exit(1)
else:
logger.debug(status)
def _start_log_thread(self, runner: Runner, app_handle: str) -> threading.Thread:
thread = threading.Thread(
target=get_logs,
kwargs={
"file": sys.stderr,
"runner": runner,
"identifier": app_handle,
"regex": None,
"should_tail": True,
},
)
thread.daemon = True
thread.start()
return thread
|
import pytest
import pandas as pd
from primrose.transformers.combine import left_merge_dataframe_on_validated_join_keys
from testfixtures import LogCapture
from primrose.transformers.combine import LeftJoinDataCombiner
def test_left_merge_dataframe_on_validated_join_keys():
corpus = pd.read_csv("test/minimal.csv")
data_out = left_merge_dataframe_on_validated_join_keys(
corpus, right_df=None, join_keys=None
)
assert data_out is corpus
def test_left_merge_dataframe_on_validated_join_keys2():
corpus = pd.read_csv("test/minimal.csv")
with pytest.raises(Exception) as e:
left_merge_dataframe_on_validated_join_keys(
corpus, right_df=corpus, join_keys=["JUNK"]
)
assert (
"Join key JUNK not in left Index(['first', 'last'], dtype='object'). Aborting merge."
in str(e)
)
def test_left_merge_dataframe_on_validated_join_keys3():
corpus = pd.read_csv("test/minimal.csv")
right_df = pd.read_csv("test/merge_right.csv")
with pytest.raises(Exception) as e:
left_merge_dataframe_on_validated_join_keys(
corpus, right_df=right_df, join_keys=["first"]
)
assert (
"Join key first not in right Index(['col1', 'col2'], dtype='object'). Aborting merge."
in str(e)
)
def test_left_merge_dataframe_on_validated_join_keys4():
right_df = pd.read_csv("test/minimal.csv")
left_df = pd.read_csv("test/merge_right2.csv")
with pytest.raises(Exception) as e:
left_merge_dataframe_on_validated_join_keys(
left_df, right_df=right_df, join_keys=["first"]
)
assert "Cannot cast join key first as int64" in str(e)
def test_left_merge_dataframe_on_validated_join_keys5():
left_df = pd.read_csv("test/minimal.csv")
right_df = pd.read_csv("test/merge_right3.csv")
out = left_merge_dataframe_on_validated_join_keys(
left_df, right_df=right_df, join_keys=["first"]
)
assert out.shape[0] == 2
assert list(out.T.to_dict().values())[0] == {
"first": "joe",
"last": "doe",
"age": 47,
}
assert list(out.T.to_dict().values())[1] == {
"first": "mary",
"last": "poppins",
"age": 42,
}
def test_left_merge_dataframe_on_validated_join_keys_fanout():
left_df = pd.read_csv("test/minimal.csv")
right_df = pd.read_csv("test/merge_right4.csv")
with LogCapture() as l:
out = left_merge_dataframe_on_validated_join_keys(
left_df, right_df=right_df, join_keys=["first"]
)
l.check(("root", "WARNING", "Merge increased data size by 1 rows."))
assert out.shape[0] == 3
assert list(out.T.to_dict().values())[0] == {
"first": "joe",
"last": "doe",
"age": 47,
}
assert list(out.T.to_dict().values())[1] == {
"first": "joe",
"last": "doe",
"age": 48,
}
assert list(out.T.to_dict().values())[2] == {
"first": "mary",
"last": "poppins",
"age": 42,
}
def test_LeftJoinDataCombiner():
combiner = LeftJoinDataCombiner(["first"])
# does nothing
combiner.fit(None)
corpus = pd.read_csv("test/minimal.csv")
with LogCapture() as l:
out = combiner.transform([corpus])
l.check(
(
"root",
"WARNING",
"Combiner needs at least two reader inputs, passing unchanged data.",
)
)
assert out == [corpus]
def test_LeftJoinDataCombiner2():
with pytest.raises(Exception) as e:
LeftJoinDataCombiner(["first"]).transform("JUNK")
assert "In this transformer, data needs to be a list of dataframes" in str(e)
def test_LeftJoinDataCombiner3():
with pytest.raises(Exception) as e:
LeftJoinDataCombiner(["first"]).transform(["JUNK", "JUNK2"])
assert (
"LeftJoinDataCombiner must operate on an iterable of pandas.DataFrame objects."
in str(e)
)
def test_LeftJoinDataCombiner4():
left_df = pd.read_csv("test/minimal.csv")
right_df = pd.read_csv("test/merge_right3.csv")
data_list = [left_df, right_df]
out = LeftJoinDataCombiner(["first"]).transform(data_list)
assert out.shape[0] == 2
assert list(out.T.to_dict().values())[0] == {
"first": "joe",
"last": "doe",
"age": 47,
}
assert list(out.T.to_dict().values())[1] == {
"first": "mary",
"last": "poppins",
"age": 42,
}
|
import re
from terra_sdk.client.lcd import LCDClient
from terra_sdk.client.lcd.params import PaginationOptions
from terra_sdk.key.mnemonic import MnemonicKey
terra = LCDClient(
url="https://pisco-lcd.terra.dev/",
chain_id="pisco-1",
)
pagOpt = PaginationOptions(limit=1, count_total=True)
mk1 = MnemonicKey(
mnemonic="nut praise glare false post crane clinic nothing happy effort loyal point parent few series task base maximum insect glass twice inflict tragic cancel"
)
mk2 = MnemonicKey(
mnemonic="invite tape senior armor tragic punch actor then patrol mother version impact floor begin fitness tool street lava evidence lemon oval width soda actual"
)
test1_address = terra.wallet(mk1).key.acc_address
test2_address = terra.wallet(mk2).key.acc_address
validator1_address = "terravaloper1thuj2a8sgtxr7z3gr39egng3syqqwas4hmvvlg"
validator2_address = "terravaloper1q33jd4t8788ckkq8u935wtxstjnphcsdne3gud"
def test_delegations():
result = terra.staking.delegations(
validator=validator1_address,
delegator=None,
params=pagOpt,
)
assert result is not None
result = terra.staking.delegations(
validator=None,
delegator=test1_address,
params=pagOpt,
)
assert result is not None
result = terra.staking.delegations(
validator=validator1_address,
delegator=test1_address,
)
assert result is not None
result = terra.staking.delegation(
validator=validator1_address,
delegator=test1_address,
)
assert result is not None
# Blocked : unbond on testnet
# def test_unbonding():
# result = terra.staking.unbonding_delegations(
# validator=validator1_address,
# delegator=None
# )
# assert len(result[0]) >0
# result = terra.staking.unbonding_delegations(
# validator=None,
# delegator=test1_address,
# params=pagOpt,
# )
# assert len(result[0]) >0
# result = terra.staking.unbonding_delegation(
# validator=validator_address,
# delegator=test1_address
# )
# assert result is not None
def test_validators():
_pagOpt = PaginationOptions(limit=3, count_total=True, reverse=False)
result = terra.staking.validators(_pagOpt)
assert result is not None
result = terra.staking.validator(validator1_address)
assert result is not None
# Blocked : due to completion_time
# def test_redelagations():
# _pagOpt = PaginationOptions(limit=1, count_total=True, reverse=False)
# result = terra.staking.redelegations(
# test1_address, params=_pagOpt
# )
# assert result[0] is not None
# result = terra.staking.redelegations(
# test1_address,
# validator_src=validator1_address,
# params=_pagOpt
# )
# assert(result[0] is not None)
# result = terra.staking.redelegations(
# test1_address,
# validator_dst=validator2_address,
# params=_pagOpt
# )
# assert(result[0] is not None)
# result = terra.staking.redelegations(
# test1_address,
# validator_src=validator1_address,
# validator_dst=validator2_address
# )
# assert(result is not None)
# def test_bonded_validators():
# result = terra.staking.bonded_validators(
# test1_address, pagOpt
# )
# assert result is not None
def test_pool():
result = terra.staking.pool()
assert result is not None
def test_parameters():
result = terra.staking.parameters()
assert result.get("unbonding_time")
assert result.get("max_validators")
assert result.get("max_entries")
assert result.get("historical_entries")
assert result.get("bond_denom")
|
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ParameterType, RangeParameter
from ax.core.parameter_constraint import ParameterConstraint
from ax.core.search_space import SearchSpace, RobustSearchSpace
from ax.exceptions.core import UnsupportedError
from ax.modelbridge.transforms.base import Transform
from ax.models.types import TConfig
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
class UnitX(Transform):
"""Map X to [0, 1]^d for RangeParameter of type float and not log scale.
Uses bounds l <= x <= u, sets x_tilde_i = (x_i - l_i) / (u_i - l_i).
Constraints wTx <= b are converted to gTx_tilde <= h, where
g_i = w_i (u_i - l_i) and h = b - wTl.
Transform is done in-place.
"""
target_lb: float = 0.0
target_range: float = 1.0
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
# Identify parameters that should be transformed
self.bounds: Dict[str, Tuple[float, float]] = {}
for p_name, p in search_space.parameters.items():
if (
isinstance(p, RangeParameter)
and p.parameter_type == ParameterType.FLOAT
and not p.log_scale
):
self.bounds[p_name] = (p.lower, p.upper)
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
if p_name in obsf.parameters:
# pyre: param is declared to have type `float` but is used
# pyre-fixme[9]: as type `Optional[typing.Union[bool, float, str]]`.
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = self._normalize_value(param, (l, u))
return observation_features
def _transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
for p_name, p in search_space.parameters.items():
if p_name in self.bounds and isinstance(p, RangeParameter):
p.update_range(
lower=self.target_lb,
upper=self.target_lb + self.target_range,
)
if p.target_value is not None:
p._target_value = self._normalize_value(
p.target_value, self.bounds[p_name] # pyre-ignore[6]
)
new_constraints: List[ParameterConstraint] = []
for c in search_space.parameter_constraints:
constraint_dict: Dict[str, float] = {}
bound = float(c.bound)
for p_name, w in c.constraint_dict.items():
# p is RangeParameter, but may not be transformed (Int or log)
if p_name in self.bounds:
l, u = self.bounds[p_name]
new_w = w * (u - l) / self.target_range
constraint_dict[p_name] = new_w
bound += self.target_lb * new_w - w * l
else:
constraint_dict[p_name] = w
new_constraints.append(
ParameterConstraint(constraint_dict=constraint_dict, bound=bound)
)
search_space.set_parameter_constraints(new_constraints)
return search_space
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
# pyre: param is declared to have type `float` but is used as
# pyre-fixme[9]: type `Optional[typing.Union[bool, float, str]]`.
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = (
param - self.target_lb
) / self.target_range * (u - l) + l
return observation_features
def _transform_parameter_distributions(self, search_space: SearchSpace) -> None:
"""Transform the parameter distributions of the given search space, in-place.
This method should be called in transform_search_space before parameters
are transformed.
"""
if not isinstance(search_space, RobustSearchSpace):
return
distributions = search_space.parameter_distributions
for dist in distributions:
if dist.multiplicative:
# TODO: Transforming multiplicative distributions is a bit more
# complicated. Will investigate further and implement as needed.
raise NotImplementedError(
f"{self.__class__.__name__} transform of multiplicative "
"distributions is not yet implemented."
)
if len(dist.parameters) != 1:
# Ignore if the ranges of all parameters are same as the target range.
if (
all(
self.bounds[p_name][1] - self.bounds[p_name][0]
== self.target_range
for p_name in dist.parameters
)
and not search_space.is_environmental
):
continue
# TODO: Support transforming multivariate distributions.
raise UnsupportedError(
f"{self.__class__.__name__} transform of multivariate "
"distributions is not supported. Consider manually normalizing "
"the parameter and the corresponding distribution."
)
bounds = self.bounds[dist.parameters[0]]
p_range = bounds[1] - bounds[0]
if p_range == self.target_range and (
not search_space.is_environmental or bounds[0] == self.target_lb
):
# NOTE: This helps avoid raising the error below if using a discrete
# distribution in cases where we do not need to transform.
continue
loc = dist.distribution_parameters.get("loc", 0.0)
if search_space.is_environmental:
loc = self._normalize_value(loc, bounds)
else:
loc = loc / p_range * self.target_range
dist.distribution_parameters["loc"] = loc
dist.distribution_parameters["scale"] = (
dist.distribution_parameters.get("scale", 1.0)
/ p_range
* self.target_range
)
# Check that the distribution is valid after the transform.
try:
dist.distribution
except TypeError:
raise UnsupportedError(
f"The distribution {str(dist)} does not support transforming via "
"`loc` and `scale` arguments. Consider manually normalizing the "
"parameter and the corresponding distribution."
)
def _normalize_value(self, value: float, bounds: Tuple[float, float]) -> float:
"""Normalize the given value - bounds pair to
[self.target_lb, self.target_lb + self.target_range].
"""
lower, upper = bounds
return (value - lower) / (upper - lower) * self.target_range + self.target_lb
|
<reponame>RizaXudayi/VarNet
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 17:04:47 2018
-------------------------------------------------------------------------------
=============================== VarNet Library ================================
-------------------------------------------------------------------------------
Authors: <NAME> and <NAME>
<EMAIL>
http://people.duke.edu/~rk157/
Department of Mechanical Engineering and Materials Science,
Duke University, Durham, NC 27708, USA.
Copyright (c) 2019 <NAME> - licensed under the MIT License
For a full copyright statement see the accompanying LICENSE.md file.
For theoretical derivations as well as numerical experiment results, see:
<NAME> and <NAME>. VarNet: Variational neural networks
for the solution of partial differential equations, 2019.
https://arxiv.org/pdf/1912.07443.pdf
To examine the functionalities of the VarNet library, see the acompanying
Operater files.
The code is fully functional with the following module versions:
- tensorflow: 1.10.0
- numpy: 1.16.4
- scipy: 1.2.1
- matplotlib: 3.0.3
-------------------------------------------------------------------------------
This file contains the main class for variational Neural Networks that solve PDEs.
"""
#%% Modules:
import os
import math
import time
import warnings
import scipy.io as spio
import numpy as np
shape = np.shape
reshape = np.reshape
size = np.size
from scipy import interpolate
import matplotlib.pyplot as plt
import tensorflow as tf
from TFModel import TFNN
from VarNetUtility import RNNData, FIXData, ManageTrainData, TrainResult
from FiniteElement import FE
from ContourPlot import ContourPlot
from UtilityFunc import UF
uf = UF()
#%% Training class:
class VarNet():
"""Class to construct the relevant input and perform training for the NN model."""
def __init__(self,
PDE,
layerWidth=[20],
modelId='MLP',
activationFun=None,
discNum=20,
bDiscNum=[],
tDiscNum=[],
MORdiscScheme=None,
processors=None,
controller=None,
integPnum=2,
optimizer='adam',
learning_rate=0.001):
"""
Function to initialize the attributes of the class.
Note that every time the VarNet class is instantiated, a new TensorFlow
graph is constructed and populated with computational nodes. This means
that repeating this process many times, slows down the code.
Inputs:
PDE: instance of ADPDE to be solved
layerWidth [lNum x 1]: widths of the hidden layers
modelId: indicator for the sequential TensorFlow model to be trained:
'MLP': multi-layer perceptron with 'sigmoid' activation
'RNN': recurrent network with 'gru' nodes
activationFun: activation function used in the NN - options: 'sigmoid' or 'tanh'
discNum [dim x 1]: list of training points at each dimension
bDiscNum [bIndNum x 1]: list of training point density per unit length
tDiscNum: time discretization number
MORdiscScheme [list]: list of argument discretization schemes for each
function, each item in the list could be:
- a scalar specifying the same number of discretizations for
all variable arguments
- a column vector specifying the number of discretizations per
argument (used to select training points)
- a function handle to discretize the arguments whose values
are defined in relation with each other (they are dependent),
e.g., for the source support the upper bound in each
dimension is strictly larger than the lower bound.
The function returns a matrix of all combinations of the
discretized values for all arguments stored in columns
processors: processor(s) to be used for training (GPUs or CPU)
data is split between processors if more than one is specified
should be specified as 'CPU:i' or 'GPU:i' where i is
the index of the processor
controller (CPU or GPU): processor to contain the training data and
perform optimization in the parallel setting
integPnum: number of integration points per dimension for each element
optimizer: to be used for training: Adam, RMSprop
learning_rate: learning rate for Adam optimizer
Attributes:
dim
feDim: Finite Element integration dimension (one larger than 'dim'
for time dependent problems)
fixData: fix data used throughout the implementation
PDE
layerWidth
modelId
discNum
bDiscNum
tDiscNum
ht: time element size
t_coord: time discretization
graph: TensorFlow computational graph that stores the data
inpDim: number of the NN inputs
model: NN model
processor: processor to be used for training (GPU or CPU)
"""
# Data:
dim = PDE.dim
timeDependent = PDE.timeDependent
MORvar = PDE.MORvar
# Error handling:
if size(discNum)!=1 and size(discNum)!=dim:
raise ValueError('dimension of the number of discretizations does not' +
' match dimension of the domain!')
elif size(discNum)==1:
discNum = [discNum]*dim
if size(bDiscNum)!=1:
raise ValueError('density of boundary discretizations must be a scalar!')
if not timeDependent and modelId=='RNN':
raise ValueError('recurrent Neural net structure cannot used for ' +
'time-independent PDEs!')
elif timeDependent and uf.isempty(tDiscNum):
raise ValueError('time discretization number must be provided for ' +
'time-dependent PDEs!')
if uf.isnone(activationFun):
if modelId=='MLP': activationFun = 'sigmoid'
else: activationFun = 'tanh'
if type(layerWidth) is not list:
raise ValueError('layer widths should be given in a list!')
if not uf.isnone(MORvar) and uf.isnone(MORdiscScheme):
raise ValueError('\'MORdiscScheme\' must be given for MOR!')
# Determine the input dimension to the NN:
inpDim = dim
if PDE.timeDependent:
inpDim += 1
if not uf.isnone(MORvar):
MORvarNum = MORvar.varNum
for f in range(len(MORvarNum)):
inpDim += MORvarNum[f]
if integPnum==2: lossOpt = {'integWflag': False}
else: lossOpt = {'integWflag': True}
if hasattr(PDE,'source') and PDE.source==0.0: lossOpt['isSource']=False
else: lossOpt['isSource']=True
self.dim = dim
self.discNum = discNum
self.bDiscNum = bDiscNum
self.tDiscNum = tDiscNum
self.MORdiscScheme = MORdiscScheme
self.modelId = modelId
self.PDE = PDE
if modelId=='MLP':
self.fixData = FIXData(self, integPnum) # store fixed data to save computation
self.fixData.setInputData(self) # fixed PDE input data
RNNdata = None
elif modelId=='RNN':
self.FixDataRNN(inpDim, integPnum) # generate the FE data for numerical integration
RNNdata = self.RNNdata # RNN data including sequence length
self.tfData = TFNN(dim, inpDim, layerWidth,
modelId, activationFun, timeDependent, RNNdata,
processors, controller, lossOpt, optimizer, learning_rate)
def FixDataRNN(self, inpDim, integPnum=2):
"""
Function to generate the FE basis function data for RNNs.
Input:
integPnum: number of integration points in each dimension
"""
# Data:
dim = self.dim
discNum = self.discNum
bDiscNum = self.bDiscNum
PDE = self.PDE
timeDependent = PDE.timeDependent
BCtype = PDE.BCtype
domain = PDE.domain
bIndNum = domain.bIndNum
# Error handling:
if not timeDependent:
raise ValueError('the NN is not recurrent!')
# Get the mesh for the time coordinate:
tDiscNum = self.tDiscNum
ht, seqLen, t_coord, tDiscInd, tIntegInd = self.timeDiscRNN(integPnum)
# Get the mesh for the domain:
mesh = domain.getMesh(discNum, bDiscNum)
dof = mesh.dof # number of training points inside domain
bdof = mesh.bdof # number of all boundary segment dofs
he = mesh.he # node sizes
hVec = np.vstack([he, ht]) # element sizes in space-time
coord = mesh.coordinates # mesh coordinates for inner domain
# Total boundary nodes (over space):
bDof = 0
for bInd in range(bIndNum):
if BCtype[bInd] == 'Dirichlet':
bDof += bdof[bInd]
# Input to the NN for residual computations:
# (smaller than training Input and uniform for easier interation):
uniform_input = uf.pairMats(coord, t_coord)
# Exact solution over the grid for error computation:
if not uf.isnone(PDE.cEx):
Coord = uniform_input[:,:dim]
tCoord = uniform_input[:, dim:dim+1]
cEx = PDE.cEx(Coord, tCoord)
else:
cEx = None
# Fold into RNN input shape:
uniform_input = reshape(uniform_input, [dof, seqLen, dim+1])
# Uniform boundary input data:
uniform_biInput, biDof = self.biTrainPoints(mesh, t_coord)
# Total (Dirichlet) boundary discretization:
bDofsum = np.sum(biDof[:-1]) # exclude the initial condition dof
# Processed FE basis data for RNNs:
integNum, integW, delta, N, nablaPhi, deltaRNN = self.sortRNN(integPnum)
nRNN = ((2*integPnum)**dim)*dof # number of spatial points for RNN input
nt = dof*tDiscNum # total number of training points in space-time
nT = nt*integNum # total number of integration points
N = np.tile(N, reps=[nt, 1]) # repeat basis values for trainig points
detJ = np.prod(0.5*hVec) # Jacobian scaling of the integral
# Basis derivatives in physical domain:
dN = 2/hVec*nablaPhi # derivative of the bases at integration points
dN = np.tile(dN, reps=[1,nt]).T
# Split spatial and temporal derivative values:
dNx = dN[:,0:dim]
dNt = dN[:,dim:dim+1]
# Store data:
self.fixData = FIXData(dim=dim, feDim=dim+1, integPnum=integPnum, integNum=integNum,
dof=dof, bdof=bdof, biDof=biDof, bDofsum=bDofsum,
nt=nt, nT=nT, delta=delta, uniform_input=uniform_input,
uniform_biInput=uniform_biInput, cEx=cEx, N=N, dNx=dNx,
dNt=dNt, integW=integW, hVec=hVec, detJ=detJ)
self.RNNdata = RNNData(dim, inpDim, integPnum, integNum, dof, bDof, tDiscNum,
nRNN, seqLen, nt, nT, t_coord, tDiscInd,
tIntegInd, deltaRNN)
def timeDisc(self, tdof=None, rfrac=0, sortflg=True, discTol=None):
"""
Time discretization for time-dependent PDE.
Inputs:
tdof: number of time discretization points (use default if None)
rfrac: fraction of samples that are drawn randomly
sortflg: if True sort the randomly drawn samples
discTol: minimum distance of discretization points from time lower bound
Outputs:
ht: time element size
t_coord: time discretization
"""
# Data:
PDE = self.PDE
# Error handling:
if not PDE.timeDependent:
raise Exception('The problem is time-independent!')
# Project the random sampling fraction back to [0,1] interval:
if rfrac<0: rfrac = 0
elif rfrac>1: rfrac = 1
if uf.isnone(tdof):
tdof = self.tDiscNum
tlim = PDE.tInterval
ht = (tlim[1] - tlim[0])/tdof # time element size
if uf.isnone(discTol): tol = ht # discretization tolerance
else: tol = np.asscalar(discTol)
dof1 = math.floor(tdof*rfrac) # random grid
dof2 = tdof - dof1 # uniform grid
t_coord1 = np.random.uniform(tlim[0]+tol, tlim[1], dof1) # random test function locations
t_coord2 = np.linspace(tlim[0]+tol, tlim[1], dof2) # uniform test function locations
t_coord = uf.hstack([t_coord1, t_coord2])
if rfrac>0 and sortflg: t_coord = np.sort(t_coord)
t_coord = reshape(t_coord, [tdof,1])
return ht, t_coord
def timeDiscRNN(self, integPnum):
"""
The RNN includes fixed time-discretization with inquiry points for requested
discrete times as well as numerical integration points. This function
calculates the total sequence length for these inquiries which is also
the length of the time dimension for the RNN and also constructs the
index sequence for extraction numerical integration data from the output
of the RNN. It also gives the discrete time values corresponding to the
data that the RNN is trained on.
Inputs:
integP: number of integration points in time dimension
Outputs:
ht: time element size
t_coord: time discretization
"""
# Data:
tDiscNum = self.tDiscNum
# Error handling:
if not self.modelId=='RNN':
raise Exception('The NN is not recurrent!')
# Construct the FE data for time dimension:
fixData = FE(dim=1, integPnum=integPnum)
delta = fixData.delta[0][0,:]
# Add the inetrval end (discrete times) to query points if it is not part of the integration points:
if np.sum(delta==0)==0:
intDiscNum = integPnum+1
delta = np.hstack([0, delta])
else:
intDiscNum = integPnum
# Time discretization:
ht, t_coord = self.timeDisc()
t_coord = np.vstack([[0], t_coord]) # include t=0 into the discretized vector
tDiscNum += 1
# Total number of time discretizations (sequence length):
seqLen = intDiscNum*tDiscNum
# Time values for the RNN sequence:
delta = np.tile(delta, reps=[tDiscNum,1])
t_coord = t_coord + ht*delta
t_coord = reshape(t_coord, newshape=[seqLen, 1])
# Indices of discretization points:
discInd = np.arange(intDiscNum, seqLen, intDiscNum)
# Numerical integration over time involves copying the relevant nodes for spatial discretization points:
integInd = []
for i in range(1,integPnum+1): # loop over integration points
start = intDiscNum+i # skip the first element
stop = (tDiscNum-1)*intDiscNum # skip the last element
step = intDiscNum
integInd.append( np.arange(start, stop, step) )
integInd = np.array(integInd).T
integInd = np.hstack([integInd, integInd]) # copy for each element
integInd = reshape(integInd, newshape=[(tDiscNum-2)*integPnum*2, 1])
# Include the first and last elements for numerical integration:
ind = np.arange(1, integPnum+1)[np.newaxis].T
integInd = np.vstack([ind, integInd])
ind = np.arange(stop+1, stop+integPnum+1)[np.newaxis].T
integInd = np.vstack([integInd, ind])
integInd = reshape(integInd, newshape=len(integInd))
return ht, seqLen, t_coord, discInd, integInd
def sortRNN(self, integPnum):
"""Function to sort the FE basis data for RNNs."""
# Data:
dim = self.dim
# FE basis data:
feDim = dim+1 # FE dimension for time-dependent problem
feData = FE(feDim) # construct the Finite Element bsais functions
basisNum = feData.basisNum # number of basis functions (equivalently elements)
IntegPnum = feData.IntegPnum # number of integration points
integNum = basisNum*IntegPnum # summation bound for numerical integration at each point
integW = feData.integW # numerical integration weights
delta = reshape(feData.delta, [feDim, integNum])
basVal = reshape(feData.basVal, newshape=[integNum, 1]) # basis values at integration points ()
basDeriVal = reshape(feData.basDeriVal, newshape=[feDim, integNum])
# Sort coordinates and update the corresponding basis values and derivatives:
ind = np.lexsort( [ delta[i,:] for i in reversed(range(feDim)) ] )
delta = delta[:,ind]
basVal = basVal[ind]
basDeriVal = basDeriVal[:,ind]
# Extract the corresponding spatial translation vector:
# (this only works if ordering in FE class is such that the last dimension is flipped first)
ind = np.arange(0, integNum, step=IntegPnum, dtype=int)
deltaRNN = delta[0:dim, ind]
return integNum, integW, delta, basVal, basDeriVal, deltaRNN
def trainingPointsRNN(self, mesh):
"""
Function to construct the input to RNN corresponding to the training
data provided from the other functions in the class.
"""
# Error handling:
if not self.modelId=='RNN':
raise ValueError('the NN is not recurrent!')
# Data:
dim = self.dim
RNNdata = self.RNNdata
nRNN = RNNdata.ns
deltaRNN = RNNdata.deltaRNN
PDE = self.PDE
domain = PDE.domain
BCtype = PDE.BCtype
bIndNum = domain.bIndNum
he = mesh.he
coord = mesh.coordinates
# Spatial integration points:
coord2 = np.zeros([nRNN,dim]) # store spatial coordinates of spatial integration points
for d in range(dim):
coordTmp = coord[:,d:d+1] + he[d]*deltaRNN[d,:]
coord2[:,d] = reshape(coordTmp, nRNN)
# Corresponding RNN input:
Coord = RNNdata.buildInput(coord2)
# Update the mesh struct to construct the initial coordinates over all points:
mesh.dof = nRNN
mesh.coordinates = coord2
# Dirichlet boundary conditions:
b_coord = mesh.bCoordinates # mesh coordinates for boundaries
bInput = []
for bInd in range(bIndNum):
if BCtype[bInd] == 'Dirichlet':
bInpuTmp = RNNdata.buildInput(b_coord[bInd])
bInput.append(bInpuTmp) # append boundary input
bInput = uf.vstack(bInput)
# Put together the boundary and inner-domain inputs:
InputRNN = uf.vstack([bInput, Coord])
return InputRNN, mesh
def trainingPoints(self, smpScheme='uniform', frac=0.5, addTrainPts=True, suppFactor=1.0):
"""
Function to generate the training points and update the training points.
Input:
smpScheme: sampling scheme
uniform: constant uniform samples
random: randomly sample the space-time with a uniform distribution
optimal: use feedback from PDE-residual to select optimal training points
frac: fraction of training points selected optimally
addTrainPts: if True add optimal training points when 'smpScheme=optimal',
i.e., refine the mesh, o.w., keep the total number of training points constant
suppFactor: support scaling for optimal training points if they are
added to already exisiting training points
"""
# If optimal sampling scheme is requested, invoke a different function:
if smpScheme=='optimal':
return self.optTrainPoints(frac, addTrainPts, suppFactor)
elif smpScheme=='random':
rfrac = frac
else:
rfrac = 0.
# Data:
dim = self.dim
discNum = self.discNum
bDiscNum = self.bDiscNum
modelId = self.modelId
PDE = self.PDE
timeDependent = PDE.timeDependent
domain = PDE.domain
fixData = self.fixData
dof = fixData.dof
nt = fixData.nt
nT = fixData.nT
delta = fixData.delta
# Get the mesh for the time coordinate:
if modelId=='RNN':
ht = fixData.hVec[-1]
RNNdata = self.RNNdata
tDiscNum = RNNdata.tDiscNum
tDiscInd = RNNdata.tDiscInd
t_coord = RNNdata.t_coord[tDiscInd] # keep time-instances corresponding to numerical integration
elif timeDependent:
tDiscNum = self.tDiscNum
ht, t_coord = self.timeDisc(rfrac=rfrac)
else:
tDiscNum = 1 # temporary value to create the integration points
t_coord = []
# Get the mesh for the domain:
mesh = domain.getMesh(discNum, bDiscNum, rfrac=rfrac)
if smpScheme=='random' and mesh.dof<dof: # ensure that at the presence of obstacles exactly 'dof' samples are drawn
dofTmp = mesh.dof
coord = mesh.coordinates
while dofTmp<dof: # append random samples to reach dof discretization points
meshTmp = domain.getMesh(discNum, bDiscNum, rfrac=1.)
coord = uf.vstack([coord, meshTmp.coordinates])
dofTmp += meshTmp.dof
# Update the mesh attributes:
mesh.dof = dof # number of nodes in the inner domain
mesh.coordinates = coord[:dof,:] # mesh coordinates for inner domain
# Load mesh data:
he = mesh.he # node sizes
coord = mesh.coordinates # mesh coordinates for inner domain
# Integration points:
Coord = np.zeros([nT,dim]) # store spatial coordinates of integration points
for d in range(dim): # add integration points to spatial coordinates of each training point
coordTmp = np.repeat(coord[:,d], repeats=tDiscNum)
coordTmp = reshape(coordTmp, [nt, 1]) + he[d]*delta[d,:]
Coord[:,d] = np.reshape(coordTmp, nT) # re-arrange into a column
if timeDependent: # add integration points to time-coordinate of each node
tCoord = np.tile(t_coord, reps=[dof,1])
tCoord = tCoord + ht*delta[-1,:]
tCoord = np.reshape(tCoord, [nT, 1]) # re-arrange into a column
Input = np.concatenate([Coord, tCoord], axis=1)
else:
Input = Coord
# Construct the input to RNN:
if modelId=='RNN':
t_coord = RNNdata.t_coord # for RNN use all time-discretization for BCs
InputRNN, mesh = self.trainingPointsRNN(mesh) # secondary mesh only used to generate initial condition
else:
InputRNN = []
# Initial and boundary training data:
biInput, biDof = self.biTrainPoints(mesh, t_coord)
return Input, InputRNN, biInput, biDof
def biTrainPoints(self, mesh, t_coord):
"""
Function to prepare the training points for the boundary and initial
conditions.
Inputs:
mesh: dictionary containing discretization information
coord: coordinates of the discretization points in inner domain
b_coord: list of boundary coordinates
t_coord: time discretization
"""
# Data:
PDE = self.PDE
timeDependent = PDE.timeDependent
domain = PDE.domain
BCtype = PDE.BCtype
bIndNum = domain.bIndNum
coord = mesh.coordinates # mesh coordinates for inner domain
dof = mesh.dof
b_coord = mesh.bCoordinates # mesh coordinates for boundaries
# Initial condition:
if timeDependent:
iInput = np.concatenate([coord, np.zeros([dof,1])], axis=1)
else:
iInput = []
# Dirichlet boundary conditions:
bInput = []
biDof = []
for bInd in range(bIndNum):
if BCtype[bInd] == 'Dirichlet':
bInpuTmp = uf.pairMats(b_coord[bInd], t_coord)
biDof.append(len(bInpuTmp)) # number of boundary nodes
bInput.append(bInpuTmp) # append boundary input
bInput = uf.vstack(bInput)
if timeDependent: biDof.append(dof) # add nodes corresponding to initial condition
biInput = uf.vstack([bInput, iInput])
return biInput, biDof
def biTrainData(self, biInput, biDof, biArg=[], biLabel0=[]):
"""
Function to prepare the training data for the boundary and initial
conditions.
Inputs:
biInput: input value to the NN for boundary and initial condition
biDof: length of each boundary-initial input
biArg [list]: contains dictionaries for MOR variable arguments of
the boundary-initial condition functions
Note: BICs will be computed without extra MOR arguments if biArg is empty
if each individual term in biArg is None, the corresponding BIC
will not be recomputed
biLabel0: array containing the previous set of computed boundary-initial
labels
"""
# Data:
dim = self.dim
PDE = self.PDE
timeDependent = PDE.timeDependent
domain = PDE.domain
BCtype = PDE.BCtype
BCs = PDE.BCs
bIndNum = domain.bIndNum
# Default for MOR variable arguments:
if uf.isempty(biArg):
biArg = [{} for i in range(bIndNum)]
if timeDependent: biArg.append({})
# Dirichlet boundary conditions:
bLabel = []
indp = 0
for bInd in range(bIndNum):
ind = indp + biDof[bInd] # current end index
# Check if the computation of current boundary function is required:
if not BCtype[bInd]=='Dirichlet':
continue
elif uf.isnone(biArg[bInd]) and uf.isempty(biLabel0):
raise ValueError('\'biLabel0\' must be provided for \'None\' arguments!')
elif uf.isnone(biArg[bInd]):
bLabel.append(biLabel0[indp:ind,:])
continue
# Dirichlet BC data:
beta = BCs[bInd][1]
g = BCs[bInd][2]
# Arguments:
bCoord = biInput[indp:ind, :dim]
if timeDependent:
tCoord = [ biInput[indp:ind, dim][np.newaxis].T ]
else:
tCoord = []
bLab = g(bCoord, *tCoord, **biArg[bInd]) # compute the boudary function
bLabel.append(bLab/beta) # append corresponding label
indp = ind # update the previous index
bLabel = uf.vstack(bLabel)
# Initial condition:
if timeDependent and uf.isnone(biArg[-1]) and uf.isempty(biLabel0):
raise ValueError('\'biLabel0\' must be provided for \'None\' arguments!')
elif timeDependent and uf.isnone(biArg[-1]):
iLabel = biLabel0[ind:,:]
elif timeDependent:
coord = biInput[ind:, :dim]
iLabel = PDE.IC(coord, **biArg[-1])
else:
iLabel = []
return uf.vstack([bLabel, iLabel])
def PDEinpData(self, Input, inpArg=[]):
"""
Function to specify the parameterized PDE input data.
Inputs:
Input: coordinates of the numerical integration points in space-time
inpArg [list]: containing the following dictionaries:
diffArg [dict]: dictionary containing MOR variables for diffusivity function
velArg [dict]: dictionary containing MOR variables for velocity function
sourceArg [dict]: dictionary containing MOR variables for source function
Note: input-data will be computed without extra MOR arguments if inpArg is empty
if each individual term in inpArg is None, the corresponding field will not
be recomputed
"""
# Data:
dim = self.dim
PDE = self.PDE
timeDependent = PDE.timeDependent
diffFun = PDE.diffFun
velFun = PDE.velFun
sourceFun = PDE.sourceFun
# Default for MOR variable arguments:
if uf.isempty(inpArg):
diffArg, velArg, sourceArg = [{} for i in range(3)]
else:
diffArg, velArg, sourceArg = inpArg
if timeDependent:
tCoord = [ Input[:,-1][np.newaxis].T ]
else:
tCoord = []
if uf.isnone(diffArg):
diff = None
else:
diff = diffFun(Input[:,0:dim], *tCoord, **diffArg)
if uf.isnone(velArg):
vel = None
else:
vel = velFun(Input[:,0:dim], *tCoord, **velArg)
if uf.isnone(sourceArg):
sourceVal = None
else:
sourceVal = sourceFun(Input[:,0:dim], *tCoord, **sourceArg)
return diff, vel, sourceVal
def trainData(self, batch, MORdiscArg, tData, resCalc=False):
"""
Function to update the PDE input and boundary-inital data in a smart
way for computational savings. The function only updates segments of
data if it has to change due to the update of space-time sampling and/or
due to change in variable arguments of MOR.
Inputs:
tData: instance of 'ManageTrainData' class to store training data
resCalc [bool]: determines if the data is being prepared for residual computations
"""
# Default: return the data without update if space-time discretization
# has not changed and MOR variables do not exist:
if not tData.inputUpdated and uf.isnone(MORdiscArg):
return tData
# Avoid recomputation if MOR data are stored:
elif tData.MORdataSaved:
tData.loadMORData(batch)
return tData
# Data:
tfData = self.tfData
Input, biInput, _, _, biLabel0, _, _, diff0, vel0 = tData.getAllData()
fixData = self.fixData
if not resCalc:
nT = fixData.nT
else:
nT = shape(Input)[0]
biDof = fixData.biDof
N = fixData.N
dNx = fixData.dNx
# Update all data if the space-time discretization has been updated:
if tData.inputUpdated:
if not batch==0 and not resCalc:
raise ValueError('\'batch\' variable must be reset when the ' +
'space-time discretization is updated!')
elif uf.isnone(MORdiscArg):
biArg, inpArg = [[]]*2
MORinpNN = None
else:
biArg, inpArg, MORinpNN = self.MORargExtract(batch, MORdiscArg, defArg={})
# Boundary-initial data:
if not resCalc:
biLabel = self.biTrainData(biInput, biDof, biArg)
else:
biLabel = [] # not required for residual calculations
# PDE-input data:
if resCalc and uf.isnone(MORdiscArg) and shape(fixData.uniform_input)[0]==nT:
diff, vel, sourceVal = fixData.uniform_inpData
else:
diff, vel, sourceVal = self.PDEinpData(Input, inpArg)
if not resCalc:
gcoef = diff*dNx + vel*N
else:
gcoef = [] # not required for residual calculations
# Construct the total input containing space-time as well as MOR discretizations:
if not uf.isnone(MORdiscArg):
InpMOR = np.tile(MORinpNN, reps=[nT,1])
InpuTot = np.hstack([Input, InpMOR])
if not resCalc:
InpMOR = np.tile(MORinpNN, reps=[np.sum(biDof),1])
biInpuTot = np.hstack([biInput, InpMOR])
else:
biInpuTot = [] # not required for residual calculations
else:
InpuTot, biInpuTot = Input, biInput
# Update the training data and return:
tData.updateData(InpuTot, biInpuTot, biLabel, gcoef, sourceVal, diff, vel, MORinpNN)
if not resCalc:
tData.trainDicts(fixData, tfData) # generate training dictionaries
return tData
# Extract the relevant MOR arguments:
biArg, inpArg, MORinpNN = self.MORargExtract(batch, MORdiscArg)
# Update the boundary-initial condition data:
funInd = self.PDE.MORfunInd # mapping between PDE data and MOR functions
if funInd['biData'] and not resCalc:
biLabel = self.biTrainData(biInput, biDof, biArg, biLabel0)
elif not resCalc:
biLabel = None
else:
biLabel = []
# Update the PDE input data:
if funInd['inpData']:
diff, vel, sourceVal = self.PDEinpData(Input, inpArg)
if resCalc: gcoef = [] # not required for residual calculations
elif uf.isnone(diff) and uf.isnone(vel):
gcoef, diff, vel = [None]*3 # none of the fields are updated
elif uf.isnone(diff): gcoef = diff0*dNx + vel*N
elif uf.isnone(vel): gcoef = diff*dNx + vel0*N
else: gcoef = diff*dNx + vel*N
else:
gcoef, sourceVal, diff, vel = [None]*4
# Construct the total input containing space-time as well as MOR discretizations:
InpMOR = np.tile(MORinpNN, reps=[nT,1])
InpuTot = np.hstack([Input, InpMOR])
if not resCalc:
InpMOR = np.tile(MORinpNN, reps=[np.sum(biDof),1])
biInpuTot = np.hstack([biInput, InpMOR])
else:
biInpuTot = [] # not required for residual calculations
# Update the training data and return:
tData.updateData(InpuTot, biInpuTot, biLabel, gcoef, sourceVal, diff, vel, MORinpNN)
return tData
def MORargExtract(self, batch, MORdiscArg, defArg=None):
"""
Extract the relevant arguments for MOR functions.
Inputs:
batch[int]: current batch number
MORdiscArg [list]: list of discretized variable arguments for MOR
defArg: default argument to be passed to functions that are not part
of the MOR:
None: function will not be recomputed and the stored data is used
[]: function will be computed but without extra MOR arguments
Note: this argument is not part of user input but code and it
should be used in agreement with PDEinpData() and biTrainData()
Outputs:
biArg: arguments to be sent to biTrainData() function
inpArg: arguments to be sent to PDEinpData() function
inpNN: extra inputs to the NN due to MOR parameters
"""
# Data:
PDE = self.PDE
BCtype = PDE.BCtype
bIndNum = PDE.domain.bIndNum
funInd = PDE.MORfunInd # mapping between PDE data and MOR functions
MORvar = PDE.MORvar
argNames = MORvar.ArgNames # names of variable arguments for each function
argInd = self.fixData.MORargInd # index combinations of all functions (batch loop)
inpNN = [] # corresponding MOR input to the NN
# Boundary-initial data:
biArg = []
if funInd['biData']:
BCind = funInd['BCs']
for bInd in range(bIndNum): # loop over boundary conditions
if not BCtype[bInd]=='Dirichlet' or uf.isnone(BCind[bInd]):
biArg.append(None)
continue
# Avoid recomputation if the argument for current function has not changed:
find = BCind[bInd] # MOR function corresponding to currrent BC function 'g'
ind = argInd[batch, find] # argument index for function specified by find
values = MORdiscArg[find][ind,:]
if batch>0:
indp = argInd[batch-1, find] # argument index for function specified by find
valprev = MORdiscArg[find][indp,:] # previous argument
if uf.l2Err(values, valprev)<1.e-10:
biArg.append(None)
continue
# Construct the dictionary of variable arguments:
biArg.append(uf.buildDict(argNames[find], values))
inpNN.extend(values)
# Initial condition
if not uf.isnone(funInd['IC']):
find = funInd['IC']
ind = argInd[batch, find] # argument index for function specified by find
values = MORdiscArg[find][ind,:]
# Avoid recomputation if the argument for current function has not changed:
if batch==0:
biArg.append(uf.buildDict(argNames[find], values))
inpNN.extend(values)
else:
indp = argInd[batch-1, find] # argument index for function specified by find
valprev = MORdiscArg[find][indp,:] # previous argument
if uf.l2Err(values, valprev)<1.e-10:
biArg.append(None)
else:
biArg.append(uf.buildDict(argNames[find], values))
inpNN.extend(values)
else:
biArg = [] # assign the default if MOR does not involve boudary-initial data
# PDE input data:
if funInd['inpData'] and not uf.isnone(funInd['diff']):
find = funInd['diff']
ind = argInd[batch, find] # argument index for function specified by funInd['diff']
values = MORdiscArg[find][ind,:]
# Avoid recomputation if the argument for current function has not changed:
if batch==0:
diffArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
indp = argInd[batch-1, find] # argument index for function specified by find
valprev = MORdiscArg[find][indp,:] # previous argument
if uf.l2Err(values, valprev)<1.e-10:
diffArg = defArg
else:
diffArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
diffArg = defArg # assign default argument of 'diff' is not part of MOR
if funInd['inpData'] and not uf.isnone(funInd['vel']):
find = funInd['vel']
ind = argInd[batch, find] # argument index for function specified by funInd['vel']
values = MORdiscArg[find][ind,:]
# Avoid recomputation if the argument for current function has not changed:
if batch==0:
velArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
indp = argInd[batch-1, find] # argument index for function specified by find
valprev = MORdiscArg[find][indp,:] # previous argument
if uf.l2Err(values, valprev)<1.e-10:
velArg = defArg
else:
velArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
velArg = defArg # assign default argument of 'vel' is not part of MOR
if funInd['inpData'] and not uf.isnone(funInd['source']):
find = funInd['source']
ind = argInd[batch, find] # argument index for function specified by funInd['source']
values = MORdiscArg[find][ind,:]
# Avoid recomputation if the argument for current function has not changed:
if batch==0:
sourceArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
indp = argInd[batch-1, find] # argument index for function specified by find
valprev = MORdiscArg[find][indp,:] # previous argument
if uf.l2Err(values, valprev)<1.e-10:
sourceArg = defArg
else:
sourceArg = uf.buildDict(argNames[find], values)
inpNN.extend(values)
else:
sourceArg = defArg # assign default argument of 'source' is not part of MOR
if funInd['inpData']:
inpArg = [diffArg, velArg, sourceArg] # group the arguments together
else:
inpArg = [] # assign default argument if PDE-data is not part of MOR
inpNN = reshape(inpNN, [1, size(inpNN)]) # store the NN input in a row
return biArg, inpArg, inpNN
def splitLoss(self, tData, fixData=None, MORdiscArg=None, W=None):
"""
Function to compute the components of the loss function for a given
input data and MOR discretization. The 'W' argument defines arbitrary
combination of weights on individual terms of the loss function.
Inputs:
fixData: global fixed data used throughout the class
tData: instance of 'ManageTrainData' class
MORdiscArg: discretization of the MOR arguments
W [nx3]: each row corresponds to a combination of weights
"""
# Error handling:
if not (uf.isnone(W) or type(W)==np.ndarray):
raise ValueError('\'W\' must be an array with shape (,3)!')
# If no weight is supplied, get contribution of each term to loss function:
if uf.isnone(W): W = np.eye(3)
# Data:
if uf.isnone(fixData):
fixData = self.fixData
MORbatchNum = fixData.MORbatchNum
lossVecflag = fixData.lossVecflag
tfData = self.tfData
if uf.isnone(MORdiscArg): MORdiscArg = fixData.MORdiscArg
lossComp = 0.
if lossVecflag: lossVec = []
else: lossVec = None
for batch in range(MORbatchNum): # loop over MOR batches
tData = self.trainData(batch, MORdiscArg, tData)
BCloss, ICloss, varLoss, lossVecTmp = tData.splitLoss(tfData, lossVecflag) # individual loss components
lossComp += np.array([[BCloss, ICloss, varLoss]]).T # column vector
if lossVecflag: lossVec.append(lossVecTmp) # append loss field for current batch
lossVal = np.matmul(W, lossComp) # apply the weights
return lossVal, tData, lossVec
def trainWeight(self, weight, tData, MORdiscArg, normalizeW, useOriginalW, lossTot=1.e6):
"""
Function to determine the penalty weights for training. The function
adjusts the weights so that different terms have contributions specified
by 'weight'. The maximum value of objective is set to 1.e6 so that
there is a general sense of convergence for the training process.
Output:
tData: instance of training data management class passed back to save
computation on PDE input data
normalizeW: wether normalize the weights so that individual terms have
weights specified by 'weight' argument at the initiation of iterations
useOriginalW: if True use 'weight' directly as training weights 'trainW'
"""
# Data:
timeDependent = self.PDE.timeDependent
trainRes = self.trainRes
# Compute the individual loss terms:
lossVal, tData, _ = self.splitLoss(tData, MORdiscArg=MORdiscArg)
lossVal = reshape(lossVal,3)
if not timeDependent:
lossTmp = [lossVal[0], lossVal[2]] # remove initial condition
else:
lossTmp = lossVal
# Compute the training weights:
if useOriginalW:
trainW = weight
elif normalizeW:
nw = len(weight)
W = np.tile(weight, [nw, 1])
W = W/reshape(weight, [nw, 1])
W = np.sum(W, axis=1, keepdims=True)*uf.vstack(lossTmp)
trainW = reshape(lossTot/W, nw)
else:
loss = np.sum(np.array(weight)*np.array(lossTmp))
trainW = lossTot/loss*np.array(weight)
if not timeDependent: trainW = np.array([trainW[0], 0., trainW[1]])
trainW = np.array(trainW)
# Print weight data to output and case file:
string = 'Training weight information:\n'
string += '\tboundary condition loss value: ' + np.array2string(lossVal[0], precision=4) + '\n'
string += '\tinitial condition loss value: ' + np.array2string(lossVal[1], precision=4) + '\n'
string += '\tintegral loss value: ' + np.array2string(lossVal[2], precision=4) + '\n'
string += '\trequested weight on each term: ' + str(weight) + '\n'
string += '\tcorresponding training weights: ' + np.array2string(trainW, precision=4) + '\n\n'
print(string)
trainRes.writeCase(string)
return trainW, tData, lossVal
def weightUpdate(self, trainW, tData, MORdiscArg):
"""
Function to periodically update the training weights to ensure that
the weights of individual terms in the loss function stay constant.
The desired weight is preset to [10, 10, 1] meaning that the weighted
loss value of boundary-initial terms is kept ten times higehr than the
integral term. If this balance is violated, the function incrementally
adds 10% to smaller weights so that after a set of weight updates the
balance is restored.
Output:
trainW: current training weight values
tData: instance of training data management class passed back to save
computation on PDE input data
"""
# Data:
timeDependent = self.PDE.timeDependent
trainRes = self.trainRes
# Compute the individual loss terms and total loss:
lossVal, tData, _ = self.splitLoss(tData, MORdiscArg=MORdiscArg)
lossVal = np.array(lossVal)
lossTot = np.sum(trainW*lossVal) # total loss value
if not timeDependent:
weight = np.array([10, 1.0]) # desired weights
lossVal = lossVal[[0,2]] # remove initial condition
else:
weight = np.array([10, 10, 1.0]) # desired weights
effLoss = trainW*weight*lossVal # effective loss
wBar = effLoss/min(effLoss) # normalize the weights
wBar = 0.1*(wBar>2.0) + 1 # add 10% to under-represented terms
trainW = wBar*trainW # update the weights
alpha = lossTot/np.sum(trainW*lossVal) # coefficent to reset the total loss to current value
trainW = alpha*trainW # reset the total loss to current value
if not timeDependent: trainW = np.array([trainW[0], 0., trainW[1]])
# Print weight data to output and case file:
string = '\nupdated training weights:: ' + np.array2string(trainW, precision=4) + '\n'
print(string)
trainRes.writeCase(string)
return trainW, tData
def train(self, folderpath, weight=None, smpScheme='uniform', epochNum=500000,
tol=1.e-1, verbose=True, saveFreq=100, pltReplace=True, saveMORdata=False,
frac=None, addTrainPts=True, suppFactor=1.0, multiTrainUpd=False,
trainUpdelay=2e4, tolUpd = 0.01, reinitrain = True,
updateWeights=False, normalizeW=False, adjustWeight=False, useOriginalW=False,
batchNum=None, batchLen=None, shuffleData=False, shuffleFreq=1):
"""
Fuction to train the VarNet. This function is the only one to run its
own session since here we prefer efficiency above clarity.
Inputs:
folderpath: path to a folder to store the training data.
weight: penalty weights for loss function terms, including the
boundary, initial, and integral terms
smpScheme: sampling scheme
uniform: constant uniform samples
random: randomly sample the space-time with a uniform distribution
optimal: use feedback from PDE-residual to select optimal training points
epochNum: number of training epochs
tol: stopping tolerance for objective (max objective value: 1.e6)
verbose: output training results if True
saveFreq: frequency of saving and reporting the training results
pltReplace: if True replace the training result plots, o.w., keep old plots
saveMORdata: save data corresponding to MOR argument combinations for faster training
frac: for non-uniform sampling determines the fraction that is drawn non-uniformly
addTrainPts: if True add optimal training points when 'smpScheme=optimal',
i.e., refine the mesh, o.w., keep the total number of training points constant
Note: multiple loops of optimal sampling only changes the location of
optimal samples and does not generate new samples indefinitely
suppFactor: if optimal training points are added to exisiting training
points, this factor scales the support of these new optimal test functions
i.e., each dimension is multiplied by 'suppFactor<1.0'
multiTrainUpd: training points are preiodically updated if True
trainUpdelay: minimum number of epochs allowed before updating training points
tolUpd: tolerance to add non-uniform training point
reinitrain: if True reinitialize training variables after addition
of new trainin points (often otherwise optimization gets stuck)
updateWeights: if True, preiodically update the training weights to
ensure that individual terms keep having a desired balance in the objective
normalizeW: wether normalize the weights so that individual terms have
weights specified by 'weight' argument at the initiation of iterations
adjustWeight: increase weight on BICs after adding non-uniform samples
useOriginalW: if True use 'weight' directly as training weights 'trainW'
batchNum: number of batches used for training
batchLen: length of training batches (recommended: 32-512 for ADAM optimizer)
shuffleData: shuffle data before dividing them into batches
shuffleFreq: shuffle data every 'shuffleFreq' epochs
"""
# Error handling:
if uf.isnone(folderpath) or uf.isempty(folderpath):
raise ValueError('a folder path must be provided to backup the trained model!')
self.folderpath = folderpath # store folder path for later use
PDE = self.PDE
timeDependent = PDE.timeDependent
if uf.isnone(weight) and timeDependent:
weight = [1., 1., 1.]
elif uf.isnone(weight):
weight = [1., 1.]
elif not uf.isnone(weight) and timeDependent and not len(weight)==3:
raise ValueError('weight dimension does not match!')
elif not uf.isnone(weight) and not timeDependent and not len(weight)==2:
raise ValueError('weight dimension does not match!')
if not (smpScheme=='uniform' or smpScheme=='random' or smpScheme=='optimal'):
raise ValueError('sampling scheme is not valid!')
self.smpScheme = smpScheme
if uf.isnone(frac):
if addTrainPts: frac=0.50
else: frac=0.25
if not addTrainPts and np.abs(suppFactor-1.0)>1.e-15:
warnings.warn('\'suppFactor\' is set to 1.0 since the number of training points does not change!')
suppFactor = 1.0
if uf.isnone(batchNum) and uf.isnone(batchLen) and shuffleData:
warnings.warn('shuffling data is possible for batch-optimization, setting \'shuffleData\' to False!')
shuffleData = False
# Store local variables (input arguments to this function) to be written to case file:
argDict = locals()
# Load data:
modelId = self.modelId
MORvar = self.PDE.MORvar # MOR class instance for the PDE
self.fixData.setFEdata() # generate FE data corresponding to initial uniform sampling
fixData = self.fixData
integPnum = fixData.integPnum
MORbatchNum = fixData.MORbatchNum # number of MOR batches
MORbatchRange = np.arange(MORbatchNum)
tfData = self.tfData
graph = tfData.graph
saver = tfData.saver
sess = tfData.sess
# Initialize tensorflow variables:
# with graph.as_default():
# sess.run(tf.global_variables_initializer()) # initialize all variables
# Training data:
Input, InputRNN, biInput, biDof = self.trainingPoints() # first set of training points are always uniformly sampled
if uf.isnone(MORvar):
MORdiscArg = None
saveMORdata = False
else:
MORdiscScheme = self.MORdiscScheme # discretization scheme for MOR variables
MORdiscArg = MORvar.discretizeArg(MORdiscScheme) # discretize the arguments
if modelId=='RNN': Input = InputRNN
tData = ManageTrainData(Input, biInput, batchNum, batchLen, # instance of training data management class
saveMORdata, MORbatchNum)
# Construct an instance of TrainResult class to manage training results:
if not uf.isnone(fixData.cEx):
cExFlg = True
else:
cExFlg = False
trainRes = TrainResult(folderpath, cExFlg, verbose, saveFreq, pltReplace)
trainRes.initializeCase(self, argDict) # write training data to text file for later reference
self.trainRes = trainRes
# Weight determination:
trainW, tData, lossVal = self.trainWeight(weight, tData, MORdiscArg, normalizeW, useOriginalW)
# if not uf.isnone(MORvar): tData.inputUpdated = True # use already computed PDE-data if possible
trainRes.trainWeight = trainW
tData.updateDictFields('trainW', trainW) # update the weight in training data
trainRes.lossComp.append(lossVal) # store the initial loss cmponents
# Store initial uniform input data for universal cost comparison:
tData0 = tData
if smpScheme=='optimal' and addTrainPts:
fixData0 = FIXData(self, integPnum) # create an independent fixed data object for universal cost comparison
fixData0.setFEdata() # FE data corresponding to initial uniform training points
fixData0.removeInputData() # remove uniform input data to minimize memory usage
else:
fixData0 = None
# Trainig variables:
min_loss = float('inf')
epoch_time = 0
tp_epoch = 1
tp_updates = 0
resVal, err, lossComp, lossVec = [None]*4
filepath2 = os.path.join(folderpath, 'best_model')
# =================================================================== #
# Training loop:
for epoch in range(1,epochNum+1): # loop over training epochs
t = time.clock()
current_loss = 0
for batch in MORbatchRange: # loop over MOR batches
tData = self.trainData(batch, MORdiscArg, tData)
current_loss += tData.optimIter(tfData)
epoch_time += time.clock()-t
if shuffleData and epoch%shuffleFreq==0:
tData.shuffleTrainData(fixData) # shuffle training points
# do not shuffle MOR batches and shuffle training points once for all MOR batches.
if epoch%saveFreq==0:
if min_loss>current_loss:
min_loss = current_loss
saver.save(sess, filepath2, global_step=epoch) # save best model so far
resVal, _, err, _ = self.residual()
# if not uf.isnone(MORvar): tData0.inputUpdated = True # use already computed PDE-data if possible
lossComp, _, lossVec = self.splitLoss(tData0, fixData0)
# Output training results:
trainRes.iterOutput(epoch, current_loss, min_loss, epoch_time,
resVal, err, lossComp, lossVec)
# Weight update:
if updateWeights and (epoch-tp_epoch)>=(trainUpdelay-1) and epoch%1000==0:
trainW = self.weightUpdate(trainW, tData, MORdiscArg)
if not uf.isnone(MORvar): tData.inputUpdated = True # use already computed PDE-data if possible
tData.updateDictFields('trainW', trainW) # update the weight in training data
# Convergence:
if current_loss<tol:
string = 'Training completed!'
print(string)
trainRes.writeCase(string)
break
# Regenerate the training data:
if not smpScheme=='uniform' and (multiTrainUpd or tp_updates==0) and (epoch-tp_epoch)>=(trainUpdelay-1):
t_loss = np.array(self.trainRes.loss[-5:])
tp_conv = t_loss[:-1]-t_loss[1:]
tp_conv = np.sum(tp_conv[tp_conv>0])
if tp_conv/t_loss[-1]<tolUpd: # if iterations have converged
min_loss = float('inf') # reset the best model loss value
tp_epoch = epoch # reset training points update epoch
tp_updates += 1 # increment training points update counts
trainRes.inpIter.append(epoch) # store the corresponding epoch
Input, _, biInput, biDof = self.trainingPoints(smpScheme, frac, addTrainPts, suppFactor)
if addTrainPts: fixData = self.fixData # update the fixed training data
tData = ManageTrainData(Input, biInput, # space-time discretization updated
batchNum, batchLen, saveMORdata, MORbatchNum)
string = '\n\n==========================================================\n'
string += 'Training points updated.\n\n'
print(string)
trainRes.writeCase(string)
if reinitrain:
string = 'trainable variables reinitialized.\n\n'
print(string)
trainRes.writeCase(string)
# Initialize tensorflow variables:
with graph.as_default():
sess.run(tf.global_variables_initializer()) # initialize all variables
# Weight determination:
if adjustWeight:
wInteg = weight[-1]
weight = [5*w for w in weight[:-1]] # add more weights on BICs
weight.append(wInteg) # append the integral weight back
trainW, tData, _ = self.trainWeight(weight, tData, MORdiscArg, normalizeW, useOriginalW)
# if not uf.isnone(MORvar): tData.inputUpdated = True # use already computed PDE-data if possible
tData.updateDictFields('trainW', trainW) # update the weight in training data
def loadModel(self, iterNum=None, folderpath=None):
"""
Function to restore the training data to a stored checkpoint.
Note: there are four types of checkpoint files:
- meta: stores the computational graph and is identical across checkpoints
- index: stores the metadata for each variable in the graph
- data: stores the values of variables at each checkpoint
- checkpoint: text file with addresses of the saver files
(should be edited to store different checkpoints)
Inputs:
iterNum: number of iteration for which the checkpoint should be restored
folderpath: path to the folder that contains the checkpoints
"""
# Error handling:
if not hasattr(self, 'trainRes') and uf.isnone(folderpath):
raise ValueError('\'folderpath\' must be provided!')
elif uf.isnone(folderpath):
folderpath = self.trainRes.folderpath
else:
trainRes = TrainResult(folderpath)
trainRes.loadData()
plotpath = os.path.join(folderpath, 'plots')
if not os.path.exists(plotpath):
os.makedirs(plotpath)
trainRes.plotpath = plotpath
self.trainRes = trainRes
# Load tensorFlow variables:
tfData = self.tfData
sess = tfData.sess
graph = tfData.graph
# Function to create the relevant checkpoint file:
check_path = os.path.join(folderpath, 'checkpoint')
def checkpoint(modelid):
"""Construct the file paths and write the checkpoint file."""
modelpath = repr(os.path.join(folderpath, 'best_model-' + str(modelid)))
string = 'model_checkpoint_path: ' + modelpath
string += '\nall_model_checkpoint_paths: ' + modelpath + '\n'
with open(check_path, 'w') as myfile: myfile.write(string)
# Extract the stored iteration numbers:
if uf.isnone(iterNum):
iterNum = []
for file in os.listdir(folderpath):
if '.index' in file:
ind1 = file.rfind('-') + 1
ind2 = file.rfind('.')
iNum = int(file[ind1:ind2])
iterNum.append(iNum)
iterNum.sort(reverse=True)
else:
iterNum = [iterNum] # convert to list for consistency
# Load the desired model from folder:
meta_path = os.path.join(folderpath, 'best_model-' + str(iterNum[-1]) + '.meta')
errFlg = True
for iNum in iterNum:
# Check if all relevant checkpoint data are available:
filename = 'best_model-' + str(iNum)
filepath = os.path.join(folderpath, filename + '.index')
if not os.path.isfile(filepath): continue
filepath = os.path.join(folderpath, filename + '.data-00000-of-00001')
if not os.path.isfile(filepath): continue
# Restore the data:
checkpoint(iNum) # create the appropriate checkpoint data
with graph.as_default():
saver = tf.train.import_meta_graph(meta_path, clear_devices=True)
saver.restore(sess, tf.train.latest_checkpoint(folderpath))
errFlg = False
break
if errFlg: raise ValueError('no restorable checkpoint data found!')
# Save data:
tfData.saver = saver
def evaluate(self, x=None, t=None, batch=None, MORarg=None):
"""
Function to approximate the solution of the PDE via the NN.
Inputs:
x [n x dim]: vector of spatial discretizations
t [n x 1]: corresponding vector of temporal coordinates
batch [int]: index of the batch of MOR parameters
MORarg [n x (inpDim-feDim)]: corresponding vector of MOR arguments
"""
# Data:
dim = self.dim
modelId = self.modelId
PDE = self.PDE
timeDependent = PDE.timeDependent
MORvar = PDE.MORvar # MOR class instance for the PDE
fixData = self.fixData
feDim = fixData.feDim
MORbatchNum = fixData.MORbatchNum # number of MOR batches
MORdiscArg = fixData.MORdiscArg # discretization of MOR arguments
# TensorFlow model data:
tfData = self.tfData
inpDim = tfData.inpDim
# Error handling:
if uf.isnone(x):
if (timeDependent and (uf.isnone(t) or modelId=='RNN')) or not timeDependent:
dof = fixData.nt
Input = fixData.uniform_input # extract the whole input
else:
dof = fixData.dof
x = fixData.uniform_input[:dof, :dim] # only extract the spatial discretization
elif not shape(x)[1]==dim:
raise ValueError('spatial coordinates dimension does not match domain!')
else:
dof = shape(x)[0]
if timeDependent and not uf.isnone(t) and not (size(t)==1 or shape(t)[0]==dof):
raise ValueError('temporal discretrization does not match spatial discretization!')
elif timeDependent and not modelId=='RNN' and not uf.isnone(t) and size(t)==1:
t = t*np.ones([dof,1])
if not uf.isnone(MORvar) and uf.isnone(batch) and uf.isnone(MORarg):
raise ValueError('batch number or argument values must be given for MOR!')
elif not uf.isnone(MORvar) and uf.isnone(batch) and not shape(MORarg)[1]==inpDim-feDim:
raise ValueError('MOR argument dimension does not match the NN input size!')
elif not uf.isnone(MORvar) and uf.isnone(batch) and not (shape(MORarg)[0]==1 or shape(MORarg)[0]==dof):
raise ValueError('MOR argument number does not match \'x\' dimension!')
elif not uf.isnone(MORvar) and not uf.isnone(batch) and uf.isnumber(batch) and batch>MORbatchNum-1:
raise ValueError('requested batch number is higher than total available batches!')
elif not uf.isnone(MORvar) and uf.isnone(batch) and shape(MORarg)[0]==1:
MORarg = np.tile(MORarg, [dof,1])
if modelId=='RNN': RNNdata = self.RNNdata
# Construct the input:
if modelId=='RNN':
Input = RNNdata.buildInput(x)
elif timeDependent and not uf.isnone(t):
Input = np.concatenate([x, t], axis=1)
elif not uf.isnone(x):
Input = x
# Add MOR arguments:
if not uf.isnone(MORvar) and uf.isnone(batch):
tData = ManageTrainData(Input, biInput=[])
InpuTot = np.hstack([Input, MORarg])
tData.updateData(InpuTot=InpuTot)
else:
tData = ManageTrainData(Input, biInput=[])
tData = self.trainData(batch, MORdiscArg, tData, resCalc=True)
# Evaluate the NN:
cApp = tData.runSession(['model'], tfData)[0]
# Extract the relevant RNN output:
if modelId=='RNN':
cApp = RNNdata.flatten(cApp) # flatten the output
tDiscIND = RNNdata.timeIndex(len(x), t) # extract relevant time indices
cApp = cApp[tDiscIND,:] # extract the corresponding values
return cApp
def residual(self, Input=None, tDiscIND=None, batch=None):
"""
Function to compute the residual of the PDE.
Inputs:
Input: input to NN (also used to compute the exact solution)
tDiscIND: determines the indices of entries to the RNN that are of
interest (the rest of the output is discarded)
batch: batch number for which the residual should be calculated
"""
# Data:
noInput = uf.isnone(Input)
dim = self.dim
modelId = self.modelId
PDE = self.PDE
timeDependent = PDE.timeDependent
d_diffFun = PDE.d_diffFun
if modelId=='RNN':
RNNdata = self.RNNdata
if not noInput:
Input = RNNdata.flatten(Input) # flatten the input for field computations
if uf.isnone(tDiscIND):
tDiscIND = np.ones(len(Input), dtype=bool)
fixData = self.fixData
hVec = fixData.hVec
elemSize = np.prod(hVec)
MORbatchNum = fixData.MORbatchNum # number of MOR batches
MORdiscArg = fixData.MORdiscArg # discretization of MOR arguments
if uf.isnumber(batch) and batch>MORbatchNum-1:
raise ValueError('requested batch number is higher than total available batches!')
elif uf.isnumber(batch):
batchRange = range(batch,batch+1)
MORbatchNum = 1
elif uf.isnone(batch):
batchRange = range(MORbatchNum)
if noInput:
Input = fixData.uniform_input
cEx = fixData.cEx
if modelId=='RNN':
Input = RNNdata.flatten(Input) # flatten the default input for field computations
tDiscIND = RNNdata.tDiscIND
elif not uf.isnone(PDE.cEx) and timeDependent:
cEx = PDE.cEx(Input[:, :dim], Input[:, dim:dim+1])
elif not uf.isnone(PDE.cEx):
cEx = PDE.cEx(Input[:, :dim])
# TensorFlow variables:
tfData = self.tfData
# Gradient of the diffusivity field (needs to be moved into trainData() function):
if noInput:
diff_dx = fixData.d_diff
elif timeDependent:
diff_dx = d_diffFun(Input[:,:dim], Input[:, dim:dim+1])
else:
diff_dx = d_diffFun(Input[:,:dim])
# Pre-process the input for the RNN:
if modelId=='RNN':
Input = RNNdata.fold(Input) # fold the input for RNN computations
# Compute the AD-PDE residual:
res = 0
err = 0
tData = ManageTrainData(Input, biInput=None)
for batch in batchRange: # loop over MOR batches
tData = self.trainData(batch, MORdiscArg, tData, resCalc=True)
(cApp, resVec) = tData.runSession(['model', 'residual'], tfData, diff_dx=diff_dx)
# Post-processing for the RNN:
if modelId=='RNN':
cApp = RNNdata.flatten(cApp)
cApp = cApp[tDiscIND,:]
cEx = cEx[tDiscIND,:]
resVec = resVec[tDiscIND,:]
if not uf.isnone(PDE.cEx):
# err += np.sqrt(np.sum((cEx-cApp)**2)*elemSize) # intetgration over domain
err += uf.l2Err(cEx, cApp)
else:
err = None
res += np.sqrt(np.sum(resVec**2)*elemSize) # intetgration over domain
# Average the values for all batches:
res = res/MORbatchNum
if not uf.isnone(PDE.cEx):
err = err/MORbatchNum
return res, resVec, err, cApp
def optTrainPoints(self, frac=0.25, addTrainPts=True, suppFactor=1.0):
"""
Function to generate the training points.
Input:
frac: fraction of points to be selected via optimal sampling
addTrainPts: if True add optimal training points when 'smpScheme=optimal',
i.e., refine the mesh, o.w., keep the total number of training points constant
suppFactor: support scaling for optimal training points if they are
added to already exisiting training points
"""
# Data:
dim = self.dim
discNum = self.discNum
bDiscNum = self.bDiscNum
PDE = self.PDE
timeDependent = PDE.timeDependent
domain = PDE.domain
fixData = self.fixData
feDim = fixData.feDim
integNum = fixData.integNum
nt = fixData.nt0
nT = fixData.nT
hVec = fixData.hVec
delta = fixData.delta
# Fraction of samples that are drawn uniformly across dimensions:
if addTrainPts:
frac2 = 1 # keep uniform samples fixed
else:
frac2 = (1-frac)**(1/feDim)
# Get the mesh for the time coordinate:
if timeDependent:
tDiscNum2 = math.ceil(frac2*self.tDiscNum)
_, t_coord = self.timeDisc(tDiscNum2)
else:
tDiscNum2 = 1
t_coord = []
# Get the mesh for the domain:
discNum2 = [math.ceil(frac2*disc2) for disc2 in discNum]
mesh = domain.getMesh(discNum2, bDiscNum)
dof = mesh.dof
coord = mesh.coordinates # mesh coordinates for inner domain
# Pair samples in space-time:
input2 = uf.pairMats(coord, t_coord)
# Generate the optimal samples using rejection sampling (see Wikipedia):
if addTrainPts: # number of optimal samples
nt1 = math.ceil(frac*nt) # add optimal samples
else:
nt1 = nt - dof*tDiscNum2 # keep total number of samples constant
if np.abs(suppFactor-1.0)<1.e-15: tolt, tole = [None]*2
else: # adjust discretization tolerance if added points have smaller support
tole = suppFactor*hVec[:dim]
if timeDependent: tolt = suppFactor*hVec[-1]
def resfun(inpuT=None):
"""Residual function to generate the samples in inner-domain according to."""
_, resVec, _, _ = self.residual(inpuT)
return np.abs(resVec)
def smpfun():
# Mesh over time coordinate:
if timeDependent:
_, t_coord = self.timeDisc(rfrac=1, sortflg=False, discTol=tolt)# random drawings over time
else: t_coord = []
# Mesh for the inner-domain:
mesh = domain.getMesh(discNum, bDiscNum, rfrac=1, sortflg=False, # random drawings over space
discTol=tole)
coord = mesh.coordinates # mesh coordinates for inner domain
return uf.pairMats(coord, t_coord) # pair samples in space-time
input1 = uf.rejectionSampling(resfun, smpfun, nt1) # rejection sampling algorithm
# Update total number of discretization points:
if addTrainPts:
nt = nt1 + nt # new total number of samples
nT = nt*integNum
# Put uniform and optimal samples together and sort them according to time:
# (do not sort if the optimal training points have different support size)
inpuT = np.vstack([input1, input2])
coord = inpuT[:, :dim]
if timeDependent and np.abs(suppFactor-1.0)<1.e-15:
t_coord = inpuT[:, dim:dim+1]
ind = np.argsort(t_coord, axis=0)
ind = reshape(ind, nt)
coord = coord[ind]
t_coord = t_coord[ind]
elif timeDependent:
t_coord = inpuT[:, dim:dim+1]
# Initial and boundary training data:
biInput, biDof, biInput1, biInput2 = self.optBiTrainPoints(frac, addTrainPts)
# Plot samples:
if dim==1 or (dim==2 and not timeDependent):
def resFun(x,t=None):
"""Residual plot function handle."""
if timeDependent:
if size(t)==1: t = t*np.ones([len(x),1])
Input = np.hstack([x,t])
else:
Input = x
_, resVec, _, _ = self.residual(Input)
return np.abs(resVec)
# Contour plot:
contPlot = ContourPlot(domain, PDE.tInterval)
contPlot.conPlot(resFun)
# Save the plot:
plotpath = self.trainRes.plotpath
epoch = self.trainRes.inpIter[-1]
filename = '{}'.format(epoch) + '_resField'
filepath = os.path.join(plotpath, filename + '.png') # image of the plot
plt.savefig(filepath, dpi=300)
filepath = os.path.join(plotpath, filename + '.eps') # eps file
plt.savefig(filepath, dpi=300)
# Add training points:
plt.plot(input2[:,0], input2[:,1], 'y.', markersize=1)
plt.plot(input1[:,0], input1[:,1], 'w.', markersize=4) # optimal points
plt.plot(biInput2[:,0], biInput2[:,1], 'y.', markersize=1)
plt.plot(biInput1[:,0], biInput1[:,1], 'w.', markersize=4) # optimal boundary points
# Save the plot:
filename = '{}'.format(epoch) + '_optimalSmp'
filepath = os.path.join(plotpath, filename + '.png') # image of the plot
plt.savefig(filepath, dpi=300)
filepath = os.path.join(plotpath, filename + '.eps') # eps file
plt.savefig(filepath, dpi=300)
plt.show()
# Integration points:
Coord = np.zeros([nT,dim]) # store spatial coordinates of integration points
he = hVec[:dim] # element sizes
if np.abs(suppFactor-1.0)<1.e-15:
suppScale = 1.0
else:
suppScale = np.ones([nt,1])
suppScale[:nt1,:] = suppFactor*suppScale[:nt1,:]
for d in range(dim): # add integration points to spatial coordinates of each training point
coordTmp = reshape(coord[:,d], [nt, 1]) + he[d]*delta[d,:]*suppScale
Coord[:,d] = np.reshape(coordTmp, nT) # re-arrange into a column
if timeDependent: # add integration points to time-coordinate of each node
ht = hVec[-1]
tCoord = t_coord + ht*delta[-1,:]*suppScale
tCoord = np.reshape(tCoord, [nT, 1]) # re-arrange into a column
Input = np.concatenate([Coord, tCoord], axis=1)
else:
Input = Coord
# Update the fixed data if the number of training points has changed:
if addTrainPts:
self.fixData.updateOptimData(frac, suppFactor)
InputRNN = [] # redundant output returned for global compatibility
return Input, InputRNN, biInput, biDof
def optBiTrainPoints(self, frac=0.25, addTrainPts=True):
"""
Function to generate optimal boundary-initial training points.
Input:
frac: fraction of points to be selected via optimal sampling
"""
# Data:
dim = self.dim
discNum = self.discNum
bDiscNum = self.bDiscNum
PDE = self.PDE
timeDependent = PDE.timeDependent
domain = PDE.domain
fixData = self.fixData
feBiDim = fixData.feDim-1 # dimension of boundary-initial conditions (one less than space-time)
biDof = fixData.biDof0
uniform_biInput = fixData.uniform_biInput
tfData = self.tfData
model = tfData.model
Input_tf = tfData.compTowers[0].Input
sess = tfData.sess
# Fraction of samples that are drawn uniformly across dimensions:
if addTrainPts:
frac2 = 1 # keep uniform samples fixed
else:
frac2 = (1-frac)**(1/feBiDim)
# Get the mesh for the time coordinate:
if timeDependent:
tDiscNum2 = math.ceil(frac2*self.tDiscNum)
_, t_coord = self.timeDisc(tDiscNum2)
else:
t_coord = []
# Get the mesh for the domain:
discNum2 = [math.ceil(frac2*disc2) for disc2 in discNum]
bDiscNum2 = math.ceil(frac2*bDiscNum)
mesh = domain.getMesh(discNum2, bDiscNum2)
# Initial and boundary training data:
biInput2, biDof2 = self.biTrainPoints(mesh, t_coord)
# Generate the optimal samples using rejection sampling (see Wikipedia):
if addTrainPts: # number of optimal samples
biDof1 = [math.ceil(frac*bidof1) for bidof1 in biDof] # add optimal samples
else:
biDof1 = np.array(biDof) - np.array(biDof2) # keep total number of samples constant
def resfun(biInpuT=None):
"""Least-square function to generate the samples for boundary-initial conditions."""
if uf.isnone(biInpuT): biInpuT = uniform_biInput
val = sess.run(model(Input_tf), {Input_tf: biInpuT})
biLab = self.biTrainData(biInpuT, biDof)
return (val-biLab)**2
def smpfun():
if timeDependent:
_, t_coord = self.timeDisc(rfrac=1, sortflg=False) # random drawings over time
else: t_coord = []
mesh = domain.getMesh(discNum, bDiscNum, rfrac=1, sortflg=False) # random drawings over space
biInput, _ = self.biTrainPoints(mesh, t_coord)
return biInput
biInput1 = uf.rejectionSampling(resfun, smpfun, biDof1, biDof) # rejection sampling algorithm
# Update total number of discretization points on each boudary:
if addTrainPts:
biDofNew = np.array(biDof1) + np.array(biDof2) # new total samples: biDof1 + biDof2
else:
biDofNew = biDof
# Put uniform and optimal samples together and sort them according to time:
biInput1new = uf.listSegment(biInput1, biDof1)
biInput2new = uf.listSegment(biInput2, biDof2)
biInput = []
for i in range(len(biDof1)):
biInput.append( uf.vstack([biInput1new[i], biInput2new[i]]) )
if timeDependent:
coord = biInput[i][:, :dim]
t_coord = biInput[i][:, dim:dim+1]
ind = np.argsort(t_coord, axis=0)
ind = reshape(ind, biDofNew[i])
coord = coord[ind]
t_coord = t_coord[ind]
biInput[i] = uf.hstack([coord, t_coord])
biInput = uf.vstack(biInput)
return biInput, biDofNew, biInput1, biInput2
def simRes(self, batch=None, tcoord=None, plotpath=None, pltFrmt='png'):
"""
Function to plot the simulation results.
Inputs:
batch [int]: index of the batch of MOR parameters
tcoord [list]: time values for which the plots are generated
plotpath: path to store the plots
pltFrmt: plot format
Note: this function needs to be extended for more general cases!
"""
# Error handling:
if not hasattr(self, 'trainRes') and uf.isnone(plotpath):
raise ValueError('\'plotpath\' must be provided!')
elif uf.isnone(plotpath):
plotpath = self.trainRes.plotpath
if not (pltFrmt=='png' or pltFrmt=='jpg' or pltFrmt=='pdf' or pltFrmt=='eps'):
raise ValueError('invalid plot format!')
elif uf.isnone(batch):
pltFrmt = '.' + pltFrmt
else:
pltFrmt = '-b=' + str(int(batch)) + '.' + pltFrmt
# Data:
dim = self.dim
modelId = self.modelId
PDE = self.PDE
timeDependent = PDE.timeDependent
tInterval = PDE.tInterval
cExact = PDE.cEx
domain = PDE.domain
if modelId=='RNN': RNNdata = self.RNNdata
# Time snapshots for plotiing:
if timeDependent and uf.isnone(tcoord):
tcoord = np.linspace(tInterval[0], tInterval[1], num=5)
elif not timeDependent:
tcoord = [0.]
# Contour plot class:
contPlot = ContourPlot(domain, tInterval)
# Function handles:
cAppFun = lambda x, t=None: self.evaluate(x, t, batch)
def resFun(x, t=None):
"""Residual plot function handle."""
if modelId=='RNN':
Input = RNNdata.buildInput(x)
tDiscIND = RNNdata.timeIndex(len(x), t)
elif timeDependent:
Input = np.concatenate([x, t*np.ones([len(x),1])], axis=1)
tDiscIND = None
else:
Input = x
tDiscIND = None
_, resVec, _, _ = self.residual(Input, tDiscIND, batch)
return resVec
# Exact solution and the corresponding error function:
if not uf.isnone(cExact):
if timeDependent:
cExFun = lambda x,t: cExact(x, t*np.ones([len(x),1]))
else:
cExFun = lambda x,t=None: cExact(x)
cErrFun = lambda x,t=None: cExFun(x,t)-cAppFun(x,t)
# Loss field:
if hasattr(self, 'trainRes') and not uf.isnone(self.trainRes.lossVec):
if uf.isnone(batch): lossVec = self.trainRes.lossVec[0]
else: lossVec = self.trainRes.lossVec[batch]
uniform_input = self.fixData.uniform_input
lossField = interpolate.LinearNDInterpolator(uniform_input, lossVec, fill_value=0.0)
def lossFun(x, t=None):
"""Loss plot function handle."""
if timeDependent:
Input = uf.hstack([x, t*np.ones([len(x),1])])
else:
Input = x
return lossField(Input)
else: lossFun = None
title = None # default title for plots
if dim==1:
# Plot snapshots of the exact and approximate solutions:
if uf.isnone(cExact):
Legend = []
plt.figure(1)
if pltFrmt=='.png': title = 'approximate solution'
for t in tcoord:
contPlot.snap1Dt(cAppFun, t, figNum=1, title=title)
Legend.append('t={0:.2f}s'.format(t))
plt.ylabel('solution')
plt.legend(Legend)
filename = 'cApp' + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
else:
for t in tcoord:
plt.figure(1)
contPlot.snap1Dt(cExFun, t, figNum=1, lineOpt='b')
if pltFrmt=='.png': title = 't={0:.2f}s'.format(t)
contPlot.snap1Dt(cAppFun, t, figNum=1, lineOpt='r', title=title)
plt.ylabel('solution')
plt.legend(['exact solution', 'approximate solution'])
filename = 'cApp-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
# Plot snapshots of the solution error:
if not uf.isnone(cExact):
Legend = []
plt.figure(1)
if pltFrmt=='.png': title = 'solution error vs time'
for t in tcoord:
contPlot.snap1Dt(cErrFun, t, figNum=1, title=title)
Legend.append('t={0:.2f}s'.format(t))
plt.ylabel('error')
plt.legend(Legend)
filename = 'cErr' + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
# Plot snapshots of the residual:
Legend = []
plt.figure(1)
if pltFrmt=='.png': title = 'residual vs time'
for t in tcoord:
contPlot.snap1Dt(resFun, t, figNum=1, title=title)
Legend.append('t={0:.2f}s'.format(t))
plt.ylabel('residual')
plt.legend(Legend)
filename = 'residual' + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
# Plot snapshots of the loss function for training points:
if not uf.isnone(lossFun):
Legend = []
plt.figure(1)
if pltFrmt=='.png': title = 'loss field vs time'
for t in tcoord:
contPlot.snap1Dt(lossFun, t, figNum=1, title=title)
Legend.append('t={0:.2f}s'.format(t))
plt.ylabel('loss')
plt.legend(Legend)
filename = 'lossField' + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
elif dim==2:
# Plot snapshots of the exact and approximate solutions and the residual:
for t in tcoord:
if pltFrmt=='.png': title = 'approximate solution - t={0:.2f}s'.format(t)
cApp1 = contPlot.conPlot(cAppFun, t, title=title)
filename = 'cApp-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
if not uf.isnone(cExact):
if pltFrmt=='.png': title = 'exact solution - t={0:.2f}s'.format(t)
cEx1 = contPlot.conPlot(cExFun, t, title=title)
filename = 'cEx-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
if pltFrmt=='.png': title = 'error field - t={0:.2f}s'.format(t)
contPlot.conPlot(cErrFun, t, title=title)
filename = 'cErr-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
if pltFrmt=='.png': title = 'residual - t={0:.2f}s'.format(t)
contPlot.conPlot(resFun, t, figNum=1, title=title)
filename = 'res-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
# Plot snapshots of the loss function for training points:
if not uf.isnone(lossFun):
if pltFrmt=='.png': title = 'loss field - t={0:.2f}s'.format(t)
contPlot.conPlot(lossFun, t, title=title)
filename = 'lossField-' + 't={0:.2f}s'.format(t) + pltFrmt
filepath = os.path.join(plotpath, filename)
plt.savefig(filepath, dpi=300)
plt.show()
if not uf.isnone(cExact):
print('\napproximation error for t=%.2fs: %2.5f' % (t, uf.l2Err(cEx1, cApp1)) )
def saveNNparam(self, dpOut=False, matOut=False, verbose=True, timeFirst=False):
"""
Function to save the NN to numpy arrays and output them in .mat format
to be loaded to MATLAB upon request.
We arrange the weight matrices such that the when multiplied with column
input vector, they give the output. The biases are given in column
vector: o = W*i+b
Inputs:
dpOut: if True output matrices in diffPack readable '.m' format
matOut: if True output matrices in MATLAB '.mat' format
verbose: output text to screen if True
timeFirst: if True adjust the weight matrix of the first layer
so that temporal input is placed before spatial input
output: list of lists that contain the weight and bias of each layer
"""
# Data:
tfData = self.tfData
depth = tfData.depth
sess = tfData.sess
graph = tfData.graph
with graph.as_default():
trVar = tf.trainable_variables() # load trainable variables
PDE = self.PDE
if not PDE.timeDependent: timeFirst = False
else: dim = PDE.domain.dim
# Path to store the variables:
if dpOut or matOut:
folderpath = self.trainRes.folderpath
folderpath = os.path.join(folderpath, 'NN_parameters')
if not os.path.exists(folderpath):
os.makedirs(folderpath)
if len(trVar)%(2*(depth+1)): # '1' accounts for last non-activated layer
# (verytime the model is loaded new copies of the variables are added)
print(len(trVar))
print(depth)
raise ValueError('number of weights and baises do not match the number of layers!')
else:
trVar = trVar[:2*(depth+1)]
layers = []
lnum = 0
for i in np.arange(0,len(trVar),2):
if verbose: print('Layer ' + str(lnum) + ':')
lnum += 1
layer = []
if verbose: print('\tsaving weight matrix: ' + trVar[i].name)
W = sess.run(trVar[i]).T
if i==0 and timeFirst: # switch columns corresponding to spatial and temporal inputs
Wtmp = np.copy(W)
W[:,0] = Wtmp[:,dim]
W[:,1:dim+1] = Wtmp[:,:dim]
layer.append(W)
if verbose: print('\tsaving bias vector: ' + trVar[i+1].name + '\n')
b = sess.run(trVar[i+1])[np.newaxis].T
layer.append(b)
layers.append(layer)
if dpOut:
fieldname = 'W' + str(lnum)
filepath = os.path.join(folderpath, fieldname + '.m')
uf.mat2diffpack(filepath, fieldname, W)
fieldname = 'B' + str(lnum)
filepath = os.path.join(folderpath, fieldname + '.m')
uf.mat2diffpack(filepath, fieldname, b)
if matOut:
filename = 'W' + str(lnum)
filepath = os.path.join(folderpath, filename + '.mat')
spio.savemat(filepath, {filename: W})
filename = 'B' + str(lnum)
filepath = os.path.join(folderpath, filename + '.mat')
spio.savemat(filepath, {filename: b})
return layers
|
<gh_stars>0
"""
Copyright 2018 <NAME>
The University of California, Berkeley
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .constants import numpy as np
import os
import json
# Default image directory (relative path)
led_position_json_filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "resources/led_positions.json"
)
# Load image dictionary
with open(led_position_json_filename) as f:
_led_positions_dict = json.load(f)
def get_available_led_arrays():
"""Get list of available LED arrays."""
return tuple(_led_positions_dict.keys())
def get_positions_na(device_name):
"""Get positions in the format [na_x, na_y]."""
if device_name in _led_positions_dict:
led_positons = [
(pos["x"], pos["y"], pos["z"]) for pos in _led_positions_dict[device_name]
]
return cartToNa(led_positons)
else:
raise ValueError("%s is not a valid device name")
def get_positions_cart(device_name):
"""Get positions in the format [x, y, z]."""
if device_name in _led_positions_dict:
return [
(pos["x"], pos["y"], pos["z"]) for pos in _led_positions_dict[device_name]
]
else:
raise ValueError("%s is not a valid device name")
def get_board_indicies(device_name):
"""Get positions in the format [board_index]."""
if device_name in _led_positions_dict:
return [pos["board_index"] for pos in _led_positions_dict[device_name]]
else:
raise ValueError("%s is not a valid device name")
def get_positions(device_name):
"""Get positions in the format [index, x, y, z, board_index]."""
if device_name in _led_positions_dict:
return [
(pos["index"], pos["x"], pos["y"], pos["z"], pos["board_index"])
for pos in _led_positions_dict[device_name]
]
else:
raise ValueError("%s is not a valid device name")
def cart_to_na(point_list_cart, z_offset=0):
"""Function which converts a list of cartesian points to numerical aperture (NA)
Args:cd
point_list_cart: List of (x,y,z) positions relative to the sample (origin)
z_offset : Optional, offset of LED array in z, mm
Returns:
A 2D numpy array where the first dimension is the number of LEDs loaded and the second is (Na_x, NA_y)
"""
point_list_cart = (
np.asarray(point_list_cart)
if np.ndim(point_list_cart) == 2
else np.asarray([point_list_cart])
)
yz = np.sqrt(point_list_cart[:, 1] ** 2 + (point_list_cart[:, 2] + z_offset) ** 2)
xz = np.sqrt(point_list_cart[:, 0] ** 2 + (point_list_cart[:, 2] + z_offset) ** 2)
result = np.zeros((np.size(point_list_cart, 0), 2))
result[:, 0] = np.sin(np.arctan(point_list_cart[:, 0] / yz))
result[:, 1] = np.sin(np.arctan(point_list_cart[:, 1] / xz))
return result
def reloadLedPositionsFile():
"""Reload the LED positions .json file from the disk."""
global _led_positions_dict
with open(led_position_json_filename) as f:
_led_positions_dict = json.load(f)
|
<gh_stars>1-10
import json
import math
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
import numpy
from pyproj import Proj
from shapely.geometry.point import Point
from ncdjango.exceptions import ConfigurationError
from ncdjango.utils import project_geometry
from ncdjango.views import ServiceView, NetCdfDatasetMixin
from .classify import jenks, quantile, equal
from .forms import PointForm
MAX_UNIQUE_VALUES = getattr(settings, 'NC_MAX_UNIQUE_VALUES', 100)
CLASSIFY_METHODS = {
'jenks': jenks,
'quantile': quantile,
'equal': equal
}
class DataViewBase(NetCdfDatasetMixin, ServiceView):
def get_service_name(self, request, *args, **kwargs):
return kwargs['service_name']
def get_variable(self):
return get_object_or_404(self.service.variable_set.all(), name=self.kwargs.get('variable_name'))
class RangeView(DataViewBase):
"""Returns value ranges for a variable in a service"""
def handle_request(self, request, **kwargs):
variable = self.get_variable()
dataset = self.open_dataset(self.service)
try:
variable_data = dataset.variables[variable.variable][:]
min_value = float(numpy.min(variable_data))
max_value = float(numpy.max(variable_data))
data = {
'min': int(min_value) if min_value.is_integer() else min_value,
'max': int(max_value) if max_value.is_integer() else max_value
}
return HttpResponse(json.dumps(data), content_type='application/json')
finally:
self.close_dataset()
class ClassifyView(DataViewBase):
"""Generates classbreaks for a variable in a service"""
def handle_request(self, request, **kwargs):
if kwargs.get('method', '').lower() in ('jenks', 'quantile', 'equal'):
method = kwargs['method'].lower()
else:
raise ConfigurationError('Invalid method')
try:
num_breaks = int(kwargs.get('breaks'))
except (ValueError, TypeError):
raise ConfigurationError('Invalid number of breaks')
variable = self.get_variable()
dataset = self.open_dataset(self.service)
try:
variable_data = dataset.variables[variable.variable][:].ravel()
min_value = float(numpy.min(variable_data))
classes = CLASSIFY_METHODS[method](variable_data, num_breaks)
classes = [int(x) if float(x).is_integer() else x for x in classes]
data = {
'breaks': classes,
'min': int(min_value) if min_value.is_integer() else min_value
}
return HttpResponse(json.dumps(data), content_type='application/json')
finally:
self.close_dataset()
class UniqueValuesView(DataViewBase):
"""Returns unique values for a variable"""
def handle_request(self, request, **kwargs):
variable = self.get_variable()
dataset = self.open_dataset(self.service)
try:
unique_data = numpy.unique(dataset.variables[variable.variable][:])
data = {
'num_values': len(unique_data)
}
if len(unique_data) > MAX_UNIQUE_VALUES:
unique_data = unique_data[:MAX_UNIQUE_VALUES]
data['values'] = [
x for x in
(int(x) if float(x).is_integer() else float(x) for x in unique_data)
if not math.isnan(x)
]
return HttpResponse(json.dumps(data), content_type='application/json')
finally:
self.close_dataset()
class ValuesAtPointView(DataViewBase):
"""Returns all values (through time) at a given point"""
form_class = PointForm
def handle_request(self, request, **kwargs):
variable = self.get_variable()
form_params = {'projection': Proj(str(variable.projection))}
form_params.update(kwargs)
form = self.form_class(form_params)
if form.is_valid():
form_data = form.cleaned_data
else:
raise ConfigurationError
point = project_geometry(
Point(form_data['x'], form_data['y']), form_data['projection'], Proj(str(variable.projection))
)
data = {'values': []}
dataset = self.open_dataset(self.service)
try:
dataset_variable = dataset.variables[variable.variable]
dimensions = dataset_variable.dimensions
shape = [dimensions.index(variable.y_dimension), dimensions.index(variable.x_dimension)]
if variable.time_dimension:
shape.append(dimensions.index(variable.time_dimension))
skip_dimensions = 0
for i, dimension in enumerate(dimensions):
if dimension not in (variable.y_dimension, variable.x_dimension, variable.time_dimension):
shape.insert(0, i)
skip_dimensions += 1
variable_data = dataset.variables[variable.variable][:].transpose(*shape)
for __ in range(skip_dimensions):
variable_data = variable_data[0]
cell_size = (
float(variable.full_extent.width) / variable_data.shape[1],
float(variable.full_extent.height) / variable_data.shape[0]
)
cell_index = [
int(float(point.x - variable.full_extent.xmin) / cell_size[0]),
int(float(point.y - variable.full_extent.ymin) / cell_size[1])
]
if not self.is_y_increasing(variable):
cell_index[1] = variable_data.shape[0] - cell_index[1] - 1
if variable_data.shape[1] > cell_index[0] >= 0 and variable_data.shape[0] > cell_index[1] >= 0:
variable_data = variable_data[cell_index[1], cell_index[0]]
data['values'] = [
None if math.isnan(x) else x for x in
(int(x) if float(x).is_integer() else float(x) for x in variable_data)
]
return HttpResponse(json.dumps(data), content_type='application/json')
finally:
self.close_dataset()
|
<filename>py/moma/effectors/cartesian_6d_velocity_effector.py
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cartesian 6D velocity (linear and angular) effector."""
import dataclasses
from typing import Optional, Sequence, Tuple
from absl import logging
from dm_control import mjcf
from dm_control import mujoco
from dm_control.mujoco.wrapper.mjbindings.enums import mjtJoint
from dm_env import specs
from dm_robotics.controllers import cartesian_6d_to_joint_velocity_mapper
from dm_robotics.geometry import geometry
from dm_robotics.geometry import mujoco_physics
from dm_robotics.moma import effector
from dm_robotics.moma.effectors import constrained_actions_effectors
import numpy as np
_MjcfElement = mjcf.element._ElementImpl # pylint: disable=protected-access
_CartesianVelocityMapper = (cartesian_6d_to_joint_velocity_mapper.Mapper)
_CartesianVelocityMapperParams = (
cartesian_6d_to_joint_velocity_mapper.Parameters)
def _get_joint_ids(mj_model: mujoco.wrapper.MjModel,
joints: Sequence[_MjcfElement]):
"""Returns the (unsorted) IDs for a list of joints. Joints must be 1 DoF."""
joint_ids = []
for joint in joints:
joint_id = mj_model.name2id(joint.full_identifier, 'joint')
joint_type = mj_model.jnt_type[joint_id]
if not (joint_type == mjtJoint.mjJNT_HINGE or
joint_type == mjtJoint.mjJNT_SLIDE):
raise ValueError(
'Only 1 DoF joints are supported at the moment. Joint with name '
f'[{joint.full_identifier}] is not a 1 DoF joint.')
joint_ids.append(joint_id)
return joint_ids
def _get_element_type(element: _MjcfElement):
"""Returns the MuJoCo enum corresponding to the element type."""
if element.tag == 'body':
return mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_BODY
elif element.tag == 'geom':
return mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_GEOM
elif element.tag == 'site':
return mujoco.wrapper.mjbindings.enums.mjtObj.mjOBJ_SITE
else:
raise ValueError('Element must be a MuJoCo body, geom, or site. Got '
f'[{element.tag}].')
def _scale_cartesian_6d_velocity(cartesian_6d_vel: np.ndarray,
max_lin_vel: float, max_rot_vel: float):
"""Scales down the linear and angular magnitudes of the cartesian_6d_vel."""
lin_vel = cartesian_6d_vel[:3]
rot_vel = cartesian_6d_vel[3:]
lin_vel_norm = np.linalg.norm(lin_vel)
rot_vel_norm = np.linalg.norm(rot_vel)
if lin_vel_norm > max_lin_vel:
lin_vel = lin_vel * max_lin_vel / lin_vel_norm
if rot_vel_norm > max_rot_vel:
rot_vel = rot_vel * max_rot_vel / rot_vel_norm
return np.concatenate((lin_vel, rot_vel))
@dataclasses.dataclass
class ModelParams:
"""Helper class for the model parameters of Cartesian6dVelocityEffector.
Attributes:
element: the `mjcf.Element` being controlled. Cartesian velocity commands
are expressed about the element's origin in the world orientation, unless
overridden by `control_frame`. Only elements with tags `body`, `geom`, and
`site` are supported.
joints: sequence of `mjcf.Element` joint entities of the joints being
controlled. Every element must correspond to a valid MuJoCo joint in
`mjcf_model`. Only 1 DoF joints are supported. Velocity limits,
acceleration limits, and nullspace references must be in the same order as
this sequence.
control_frame: `geometry.Frame` in which to interpet the Cartesian 6D
velocity command. If `None`, assumes that the control command is expressed
about the element's origin in the world orientation. Note that this is
different than expressing the Cartesian 6D velocity about the element's
own origin and orientation.
"""
element: _MjcfElement
joints: Sequence[_MjcfElement]
control_frame: Optional[geometry.Frame] = None
def set_qp_params(self, mjcf_model: mjcf.RootElement,
qp_params: _CartesianVelocityMapperParams):
xml_string = mjcf_model.to_xml_string()
assets = mjcf_model.get_assets()
qp_params.model = mujoco.wrapper.MjModel.from_xml_string(
xml_string, assets=assets)
qp_params.joint_ids = _get_joint_ids(qp_params.model, self.joints)
qp_params.object_type = _get_element_type(self.element)
qp_params.object_name = self.element.full_identifier
@dataclasses.dataclass
class ControlParams:
"""Helper class for the control parameters of Cartesian6dVelocityEffector.
Attributes:
control_timestep_seconds: expected amount of time that the computed joint
velocities will be held by the effector. If unsure, higher values are more
conservative.
max_lin_vel: (optional) linear velocity maximum magnitude.
max_rot_vel: (optional) rotational velocity maximum magnitude.
enable_joint_position_limits: (optional) whether to enable active joint
limit avoidance. Joint limits are deduced from the mjcf_model passed to
the after_compose function.
joint_position_limit_velocity_scale: (optional) value (0,1] that defines how
fast each joint is allowed to move towards the joint limits in each
iteration. Values lower than 1 are safer but may make the joints move
slowly. 0.95 is usually enough since it is not affected by Jacobian
linearization. Ignored if `enable_joint_position_limits` is false.
minimum_distance_from_joint_position_limit: (optional) offset in meters
(slide joints) or radians (hinge joints) to be added to the limits.
Positive values decrease the range of motion, negative values increase it
(i.e. negative values allow penetration). Ignored if
`enable_joint_position_limits` is false.
joint_velocity_limits: (optional) array of maximum allowed magnitudes of
joint velocities for each joint, in m/s (slide joints) or rad/s (hinge
joints). Must be ordered according to the `joints` parameter passed to the
Cartesian6dVelocityEffector during construction. If not specified, joint
velocity magnitudes will not be limited. Tune this if you see the robot
trace non-linear Cartesian paths for a constant Cartesian velocity
command.
joint_acceleration_limits: (optional) array of maximum allowed magnitudes of
joint acceleration for each controllable joint, in m/s^2 (slide joints) or
rad/s^2 (hinge joints). Must be ordered according to the `joints`
parameter passed to the Cartesian6dVelocityEffector during construction.
If limits are specified, the user must ensure that the `physics` object
used by the Cartesian6dVelocityEffector has accurate joint velocity
information at every timestep. If None, the joint acceleration will not be
limited. Note that collision avoidance and joint position limits, if
enabled, take precedence over these limits. This means that the joint
acceleration limits may be violated if it is necessary to come to an
immediate full-stop in order to avoid collisions.
regularization_weight: (optional) scalar regularizer for damping the
Jacobian solver.
nullspace_joint_position_reference: preferred joint positions, if
unspecified then the mid point of the joint ranges is used. Must be
ordered according to the `joints` parameter passed to the
Cartesian6dVelocityEffector during construction.
nullspace_gain: (optional) a gain (0, 1] for the secondary control
objective. Scaled by a factor of `1/control_timestep_seconds` internally.
Nullspace control will be disabled if the gain is None.
max_cartesian_velocity_control_iterations: maximum number of iterations that
the internal LSQP solver is allowed to spend on the Cartesian velocity
optimization problem (first hierarchy). If the internal solver is unable
to find a feasible solution to the first hierarchy (i.e. without
nullspace) within the specified number of iterations, it will set the
joint effector command to zero.
max_nullspace_control_iterations: maximum number of iterations that the
internal LSQP solver is allowed to spend on the nullspace optimization
problem (second hierarchy). If the internal solver is unable to find a
feasible solution to the second hierarchy within the specified number of
iterations, it will set the joint effector command to the solution of the
first hierarchy. Ignored if nullspace control is disabled.
"""
control_timestep_seconds: float
max_lin_vel: float = 0.5
max_rot_vel: float = 0.5
enable_joint_position_limits: bool = True
joint_position_limit_velocity_scale: float = 0.95
minimum_distance_from_joint_position_limit: float = 0.01
joint_velocity_limits: Optional[np.ndarray] = None
joint_acceleration_limits: Optional[np.ndarray] = None
regularization_weight: float = 0.01
nullspace_joint_position_reference: Optional[np.ndarray] = None
nullspace_gain: Optional[float] = 0.025
max_cartesian_velocity_control_iterations: int = 300
max_nullspace_control_iterations: int = 300
def set_qp_params(self, qp_params: _CartesianVelocityMapperParams):
"""Configures `qp_params` with the ControlParams fields.
Args:
qp_params: QP parameters structure on which to set the parameters. The
`model` and `joint_ids` must have been set.
"""
joint_argsort = np.argsort(qp_params.joint_ids)
qp_params.integration_timestep = self.control_timestep_seconds
# Set joint limit avoidance if enabled.
if self.enable_joint_position_limits:
qp_params.enable_joint_position_limits = True
qp_params.joint_position_limit_velocity_scale = (
self.joint_position_limit_velocity_scale)
qp_params.minimum_distance_from_joint_position_limit = (
self.minimum_distance_from_joint_position_limit)
else:
qp_params.enable_joint_position_limits = False
# Set velocity limits, if enabled.
# Note that we have to pass them in joint-ID ascending order to the mapper.
if self.joint_velocity_limits is not None:
qp_params.enable_joint_velocity_limits = True
qp_params.joint_velocity_magnitude_limits = (
self.joint_velocity_limits[joint_argsort].tolist())
else:
qp_params.enable_joint_velocity_limits = False
# Set acceleration limits, if enabled.
# Note that we have to pass them in joint-ID ascending order to the mapper.
if self.joint_acceleration_limits is not None:
qp_params.enable_joint_acceleration_limits = True
qp_params.remove_joint_acceleration_limits_if_in_conflict = True
qp_params.joint_acceleration_magnitude_limits = (
self.joint_acceleration_limits[joint_argsort].tolist())
else:
qp_params.enable_joint_acceleration_limits = False
# We always check the solution validity, and return a zero-vector if no
# valid solution was found.
qp_params.check_solution_validity = True
# Set Cartesian control iterations.
qp_params.max_cartesian_velocity_control_iterations = (
self.max_cartesian_velocity_control_iterations)
# Set regularization weight to prevent high joint velocities near singular
# configurations.
qp_params.regularization_weight = self.regularization_weight
# We always set our tolerance to 1.0e-3, as any value smaller than that is
# unlikely to make a difference.
qp_params.solution_tolerance = 1.0e-3
# Set nullspace control if gain is valid. If reference is None, set to
# middle of joint range. If nullspace fails, we simply return the
# minimum-norm least-squares solution to the Cartesian problem.
# Note that the nullspace reference is not sorted in ascending order yet, as
# the nullspace bias needs to be sorted after computing the velocities.
if self.nullspace_gain is not None and self.nullspace_gain > 0.0:
qp_params.enable_nullspace_control = True
qp_params.return_error_on_nullspace_failure = False
qp_params.nullspace_projection_slack = 1.0e-4
if self.nullspace_joint_position_reference is None:
self.nullspace_joint_position_reference = 0.5 * np.sum(
qp_params.model.jnt_range[qp_params.joint_ids, :], axis=1)
qp_params.max_nullspace_control_iterations = (
self.max_nullspace_control_iterations)
else:
qp_params.enable_nullspace_control = False
@dataclasses.dataclass
class CollisionParams:
"""Helper class for the collision parameters of Cartesian6dVelocityEffector.
Attributes:
collision_pairs: (optional) a sequence of collision pairs in which to
perform active collision avoidance. A collision pair is defined as a tuple
of two geom groups. A geom group is a sequence of geom names. For each
collision pair, the controller will attempt to avoid collisions between
every geom in the first pair with every geom in the second pair. Self
collision is achieved by adding a collision pair with the same geom group
in both tuple positions.
collision_avoidance_normal_velocity_scale: (optional) value between (0, 1]
that defines how fast each geom is allowed to move towards another in each
iteration. Values lower than 1 are safer but may make the geoms move
slower towards each other. In the literature, a common starting value is
0.85. Ignored if collision_pairs is None.
minimum_distance_from_collisions: (optional) defines the minimum distance
that the solver will attempt to leave between any two geoms. A negative
distance would allow the geoms to penetrate by the specified amount.
collision_detection_distance: (optional) defines the distance between two
geoms at which the active collision avoidance behaviour will start. A
large value will cause collisions to be detected early, but may incur high
computational costs. A negative value will cause the geoms to be detected
only after they penetrate by the specified amount.
"""
collision_pairs: Optional[Sequence[Tuple[Sequence[str],
Sequence[str]]]] = None
collision_avoidance_normal_velocity_scale: float = 0.85
minimum_distance_from_collisions: float = 0.05
collision_detection_distance: float = 0.5
def set_qp_params(self, qp_params: _CartesianVelocityMapperParams):
"""Configures `qp_params` with the CollisionParams fields."""
if self.collision_pairs:
qp_params.enable_collision_avoidance = True
qp_params.collision_avoidance_normal_velocity_scale = (
self.collision_avoidance_normal_velocity_scale)
qp_params.minimum_distance_from_collisions = (
self.minimum_distance_from_collisions)
qp_params.collision_detection_distance = (
qp_params.collision_detection_distance)
qp_params.collision_pairs = self.collision_pairs
else:
qp_params.enable_collision_avoidance = False
class Cartesian6dVelocityEffector(effector.Effector):
"""A Cartesian 6D velocity effector interface for a robot arm."""
def __init__(self,
robot_name: str,
joint_velocity_effector: effector.Effector,
model_params: ModelParams,
control_params: ControlParams,
collision_params: Optional[CollisionParams] = None,
log_nullspace_failure_warnings: bool = False):
"""Initializes a QP-based 6D Cartesian velocity effector.
Args:
robot_name: name of the robot the Cartesian effector controls.
joint_velocity_effector: `Effector` on the joint velocities being
controlled to achieve the target Cartesian velocity. This class takes
ownership of this effector, i.e. it will call `initialize_episode`
automatically.
model_params: parameters that describe the object being controlled.
control_params: parameters that describe how the element should be
controlled.
collision_params: parameters that describe the active collision avoidance
behaviour, if any.
log_nullspace_failure_warnings: if true, a warning will be logged
if the internal LSQP solver is unable to solve the nullspace
optimization problem (second hierarchy). Ignored if nullspace control is
disabled.
"""
self._effector_prefix = f'{robot_name}_twist'
self._joint_velocity_effector = joint_velocity_effector
self._joints = model_params.joints
self._model_params = model_params
self._control_params = control_params
self._collision_params = collision_params
self._control_frame = model_params.control_frame
self._log_nullspace_failure_warnings = log_nullspace_failure_warnings
# These are created in after_compose, once the mjcf_model is finalized.
self._qp_mapper = None
self._qp_frame = None
self._joints_argsort = None
def after_compile(self, mjcf_model: mjcf.RootElement) -> None:
# Construct the QP-based mapper.
qp_params = _CartesianVelocityMapperParams()
self._model_params.set_qp_params(mjcf_model, qp_params)
self._control_params.set_qp_params(qp_params)
if self._collision_params:
self._collision_params.set_qp_params(qp_params)
qp_params.log_nullspace_failure_warnings = (
self._log_nullspace_failure_warnings)
self._qp_mapper = _CartesianVelocityMapper(qp_params)
# Array of indices that would sort the joints in ascending order.
# This is necessary because the mapper's inputs and outputs are always in
# joint-ID ascending order, but the effector control should be passed
# in the same order as the joints.
self._joints_argsort = np.argsort(qp_params.joint_ids)
# The mapper always expects the Cartesian velocity target to be expressed in
# the element's origin in the world's orientation.
self._qp_frame = geometry.HybridPoseStamped(
pose=None,
frame=self._model_params.element,
quaternion_override=geometry.PoseStamped(None, None))
def initialize_episode(self, physics, random_state) -> None:
# Initialize the joint velocity effector.
self._joint_velocity_effector.initialize_episode(physics, random_state)
def action_spec(self, physics: mjcf.Physics) -> specs.BoundedArray:
lin = abs(self._control_params.max_lin_vel)
rot = abs(self._control_params.max_rot_vel)
max_6d_vel = np.asarray([lin, lin, lin, rot, rot, rot])
actuator_names = [(self.prefix + str(i)) for i in range(6)]
return specs.BoundedArray(
shape=(6,),
dtype=np.float32,
minimum=-1.0 * max_6d_vel,
maximum=max_6d_vel,
name='\t'.join(actuator_names))
def set_control(self, physics: mjcf.Physics, command: np.ndarray) -> None:
"""Sets a 6 DoF Cartesian velocity command at the current timestep.
Args:
physics: `mjcf.Physics` object with the updated environment state at the
current timestep.
command: array of size 6 describing the desired 6 DoF Cartesian target
[(lin_vel), (ang_vel)].
"""
if command.size != 6:
raise ValueError('set_control: command must be an np.ndarray of size 6. '
f'Got {command.size}.')
cartesian_6d_target = np.copy(command)
# If `control_frame` is None, we assume its frame to be the same as the
# QP, and thus no transformation is needed.
if self._control_frame is not None:
# Transform the command from the target frame to the QP frame.
stamped_command = geometry.TwistStamped(cartesian_6d_target,
self._control_frame)
cartesian_6d_target = stamped_command.get_relative_twist(
self._qp_frame, mujoco_physics.wrap(physics)).full
# Scale the Cartesian 6D velocity target if outside of Cartesian velocity
# limits.
cartesian_6d_target = _scale_cartesian_6d_velocity(
cartesian_6d_target, self._control_params.max_lin_vel,
self._control_params.max_rot_vel)
# Compute the joint velocities and set the control on underlying velocity
# effector.
self._joint_velocity_effector.set_control(
physics,
self._compute_joint_velocities(
physics=physics, cartesian_6d_target=cartesian_6d_target))
@property
def prefix(self) -> str:
return self._effector_prefix
@property
def control_frame(self) -> geometry.Frame:
"""Returns the frame in which actions are expected."""
return self._control_frame or self._qp_frame
def _compute_joint_velocities(self, physics: mjcf.Physics,
cartesian_6d_target: np.ndarray) -> np.ndarray:
"""Maps a Cartesian 6D target velocity to joint velocities.
Args:
physics: `mjcf.Physics` object with the updated environment state at the
current timestep.
cartesian_6d_target: array of size 6 describing the desired 6 DoF
Cartesian target [(lin_vel), (ang_vel)]. Must be expressed about the
element's origin in the world orientation.
Returns:
Computed joint velocities in the same order as the `joints` sequence
passed during construction.
"""
joints_binding = physics.bind(self._joints)
if joints_binding is None:
raise ValueError(
'_compute_joint_velocities: could not bind the joint elements passed '
'on construction to the physics object.')
joint_velocities = np.empty(len(self._joints), dtype=np.float32)
# Compute nullspace bias if gain is positive.
qdot_nullspace = None
if (self._control_params.nullspace_gain is not None and
self._control_params.nullspace_gain > 0.0 and
self._control_params.nullspace_joint_position_reference is not None):
qdot_nullspace = self._control_params.nullspace_gain * (
self._control_params.nullspace_joint_position_reference -
joints_binding.qpos) / self._control_params.control_timestep_seconds
# Reorder qdot_nullspace such that the nullspace bias is in ascending
# order relative to the joint IDs.
qdot_nullspace = qdot_nullspace[self._joints_argsort]
# Compute joint velocities. The Python bindings throw an exception whenever
# the mapper fails to find a solution, in which case we set the joint
# velocities to zero.
# We need to catch a general exception because the StatusOr->Exception
# conversion can result in a wide variety of different exceptions.
# The only special case is when the user calls CTRL-C, in which case we
# re-raise the KeyboardInterrupt exception arising from SIGINT.
try:
# Note that we need to make sure that the joint velocities are in the same
# order as the joints sequence, which may be different from that the QP
# returns, i.e. in ascending order.
joint_velocities[self._joints_argsort] = np.array(
self._qp_mapper.compute_joint_velocities(physics.data,
cartesian_6d_target.tolist(),
qdot_nullspace),
dtype=np.float32)
except KeyboardInterrupt:
logging.warning('_compute_joint_velocities: Computation interrupted!')
raise
except Exception as e: # pylint: disable=broad-except
joint_velocities.fill(0.0)
logging.warning(
('_compute_joint_velocities: Failed to compute joint velocities. '
'Setting joint velocities to zero. Error: [%s]'), str(e))
return joint_velocities
def limit_to_workspace(
cartesian_effector: Cartesian6dVelocityEffector,
element: _MjcfElement,
min_workspace_limits: np.ndarray,
max_workspace_limits: np.ndarray,
) -> effector.Effector:
"""Returns an effector that restricts the 6D actions to a workspace.
Constraining the rotation of the end effector is currently not
supported.
Args:
cartesian_effector: 6D cartesian effector.
element: `mjcf.Element` that defines the Cartesian frame about which the
Cartesian velocity is defined.
min_workspace_limits: Lower bound of the Cartesian workspace. Must be 3D.
max_workspace_limits: Upper bound of the Cartesian workspace. Must be 3D.
"""
if len(min_workspace_limits) != 3 or len(max_workspace_limits) != 3:
raise ValueError('The workspace limits must be 3D (X, Y, Z). Provided '
f'min: {min_workspace_limits} and max: '
f'{max_workspace_limits}')
def state_getter(physics):
pos = physics.bind(element).xpos
# Even when no wrist limits are provided, we need to supply a 6D state to
# match the action spec of the cartesian effector.
wrist_state = [0.0] * 3
return np.concatenate((pos, wrist_state))
# Provide unused wrist limits. They will be compared to a constant 0.0.
min_limits = np.concatenate((min_workspace_limits, [-1.] * 3))
max_limits = np.concatenate((max_workspace_limits, [1.] * 3))
return constrained_actions_effectors.ConstrainedActionEffector(
delegate=cartesian_effector,
min_limits=min_limits,
max_limits=max_limits,
state_getter=state_getter)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import sys
import mock
import unittest
from ebcli.lib import aws, elasticbeanstalk
from ..test_helper import EnvVarSubstitutor
if sys.platform.startswith('win32'):
HOME = [
environ
for environ in os.environ
if os.environ[environ] == os.path.expanduser('~')
][0]
else:
HOME = 'HOME'
NOT_FOUND_ERROR_TEMPLATE = '{missing_object} does not exist. '
'It is possible that implementation of related logic and'
'not just the method name has changed in a recent version '
'of `botocore`.'
try:
from botocore.endpoint import Endpoint
Endpoint._get_response
except (ModuleNotFoundError, AttributeError):
raise NotImplementedError(
NOT_FOUND_ERROR_TEMPLATE.format(missing_object='botocore.endpoint.Endpoint._get_response')
)
try:
from botocore.httpsession import URLLib3Session
URLLib3Session.send
except (ModuleNotFoundError, AttributeError):
raise NotImplementedError(
NOT_FOUND_ERROR_TEMPLATE.format(missing_object='botocore.httpsession.URLLib3Session.send')
)
class CredentialsEnvVarSubstituter(object):
def __init__(self, access_id, secret_key):
self.access_id = access_id
self.secret_key = secret_key
def __call__(self, func):
with EnvVarSubstitutor('AWS_ACCESS_KEY_ID', self.access_id):
with EnvVarSubstitutor('AWS_SECRET_ACCESS_KEY', self.secret_key):
func()
@unittest.skipIf(
not not os.environ.get('JENKINS_HOME') and sys.platform.startswith('win32'),
reason='There are issues being able to find the `~` directory '
'when run by the Jenkins service on Windows'
)
class TestProfileSelection(unittest.TestCase):
"""
Class hosts integration tests that interact with botocore to ensure
the right instance profile will be used. Order is defined in the following
link:
https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3-configuration.html#eb-cli3-credentials
"""
def assertCorrectProfileWasUsed(self, _get_response_mock, access_id='access_id'):
self.assertIn(
'AWS4-HMAC-SHA256 Credential={}'.format(access_id),
self._request_header_authorization(_get_response_mock)
)
def run(self, result=None):
aws._flush()
aws._profile_env_var = 'AWS_EB_PROFILE'
aws._region_name = 'us-west-2'
self.root_dir = os.getcwd()
if os.path.exists('testDir'):
shutil.rmtree('testDir', ignore_errors=True)
os.mkdir('testDir')
os.chdir('testDir')
try:
with EnvVarSubstitutor(HOME, os.getcwd()):
super(TestProfileSelection, self).run(result=result)
finally:
aws._flush()
os.chdir(self.root_dir)
shutil.rmtree('testDir', ignore_errors=True)
def _generic_response(self):
http_object = mock.MagicMock(status_code=200)
response_object_mock = {
'PlatformSummaryList': [],
'ResponseMetadata': {
'HTTPStatusCode': 200
}
}
return (http_object, response_object_mock), None
def _generate_profile_with_prefix(
self,
credentials_dir=None,
credentials_file_name='credentials',
profile='default',
access_id='access_id',
secret_key='secret_key'
):
self._generate_profile(
add_profile_prefix=True,
credentials_dir=credentials_dir,
credentials_file_name=credentials_file_name,
profile=profile,
access_id=access_id,
secret_key=secret_key,
)
def _generate_profile(
self,
add_profile_prefix=False,
credentials_dir=None,
credentials_file_name='credentials',
profile='default',
access_id='access_id',
secret_key='secret_key'
):
credentials_dir = credentials_dir or os.path.join(os.environ[HOME], '.aws')
os.mkdir(credentials_dir)
credentials_file = os.path.join(credentials_dir, credentials_file_name)
with open(credentials_file, 'w') as file:
file.write(
"""[{profile_prefix}{profile}]
aws_access_key_id = {access_id}
aws_secret_access_key = {secret_key}
""".format(
profile_prefix='profile ' if add_profile_prefix else '',
profile=profile,
access_id=access_id,
secret_key=secret_key,
)
)
def _request_header_authorization(self, _get_response_mock):
aws_prepared_request = _get_response_mock.call_args[0][0]
request_headers = aws_prepared_request.headers
return str(request_headers['Authorization'])
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_environment_variables_used(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are found
"""
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_environment_variables_not_found(
self,
send_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are not found
- attempt is made to communicate with IAM to assume role
"""
@CredentialsEnvVarSubstituter(None, None)
def invoke_api():
try:
elasticbeanstalk.list_platform_versions()
except Exception:
pass
iam_role_verification_request = send_mock.call_args[0][0]
self.assertIn(
'latest/meta-data/iam/security-credentials/',
iam_role_verification_request.url
)
def test_aws_eb_profile_environment_variable_found_but_profile_does_not_exist(self):
"""
- `--profile` is not passed and is not set in config.yml
- environment variable `AWS_EB_PROFILE` found, but profile does not exist
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` found but not used
"""
with EnvVarSubstitutor('AWS_EB_PROFILE', 'some_profile'):
with self.assertRaises(aws.InvalidProfileError) as context_manager:
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertEqual(
'The config profile (some_profile) could not be found',
str(context_manager.exception)
)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_aws_eb_profile_environment_variable_found__profile_exists_in_credentials_file(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variable `AWS_EB_PROFILE` found, profile exists in credentials file
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` found but not used
"""
self._generate_profile(profile='some_profile')
with EnvVarSubstitutor('AWS_EB_PROFILE', 'some_profile'):
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_aws_eb_profile_environment_variable_found__profile_exists_in_config_file(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variable `AWS_EB_PROFILE` found, profile exists in config file
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` found but not used
"""
self._generate_profile_with_prefix(profile='some_profile', credentials_file_name='config')
with EnvVarSubstitutor('AWS_EB_PROFILE', 'some_profile'):
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_default_profile_is_found_in_credentials_file(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are not found
- `default` profile is assumed and found in `$HOME/.aws/credentials` file
"""
self._generate_profile()
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter(None, None)
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_default_profile_is_found_in_config_file__profile_prefix_is_not_added(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are not found
- `default` profile is assumed and found in `$HOME/.aws/config` file; profile is identified as `[default]`
"""
self._generate_profile(credentials_file_name='config')
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter(None, None)
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_default_profile_is_found_in_config_file__profile_prefix_is_added(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are not found
- `default` profile is found in `$HOME/.aws/config` file; profile is identified as `[profile default]`
"""
self._generate_profile_with_prefix(credentials_file_name='config')
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter(None, None)
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
def test_profile_is_explicitly_passed_but_is_invalid(self):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are found
- `some_profile` profile is not found in `$HOME/.aws/credentials`
"""
aws._profile = 'some_profile'
with self.assertRaises(aws.InvalidProfileError) as context_manager:
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertEqual(
'The config profile (some_profile) could not be found',
str(context_manager.exception)
)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_profile_is_found_in_credentials_file(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are found
- `some_profile` profile is found in `$HOME/.aws/credentials`
"""
aws._profile = 'some_profile'
self._generate_profile(profile='some_profile')
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter('some_other_access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
@mock.patch('botocore.endpoint.Endpoint._get_response')
def test_profile_is_found_in_config_file(
self,
_get_response_mock
):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are found
- `some_profile` profile is found in `$HOME/.aws/config`
"""
aws._profile = 'some_profile'
self._generate_profile_with_prefix(credentials_file_name='config', profile='some_profile')
_get_response_mock.return_value = self._generic_response()
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertCorrectProfileWasUsed(_get_response_mock)
def test_profile_is_found_in_config_file_but_profile_prefix_is_absent(self):
"""
- `--profile` is not passed and is not set in config.yml
- environment variables `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID` are found
- `some_profile` profile is found in `$HOME/.aws/config` but profile prefix is absent
"""
aws._profile = 'some_profile'
self._generate_profile(credentials_file_name='config', profile='some_profile')
with self.assertRaises(aws.InvalidProfileError) as context_manager:
@CredentialsEnvVarSubstituter('access_id', 'secret_key')
def invoke_api():
elasticbeanstalk.list_platform_versions()
self.assertEqual(
'The config profile (some_profile) could not be found',
str(context_manager.exception)
)
|
<reponame>emilhe/dash
import json
import operator
import pytest
from dash import Dash, Input, Output, html, dcc, callback_context
from dash.exceptions import PreventUpdate, MissingCallbackContextException
import dash.testing.wait as wait
from selenium.webdriver.common.action_chains import ActionChains
def test_cbcx001_modified_response(dash_duo):
app = Dash(__name__)
app.layout = html.Div([dcc.Input(id="input", value="ab"), html.Div(id="output")])
@app.callback(Output("output", "children"), [Input("input", "value")])
def update_output(value):
callback_context.response.set_cookie("dash_cookie", value + " - cookie")
return value + " - output"
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "ab - output")
input1 = dash_duo.find_element("#input")
input1.send_keys("cd")
dash_duo.wait_for_text_to_equal("#output", "abcd - output")
cookie = dash_duo.driver.get_cookie("dash_cookie")
# cookie gets json encoded
assert cookie["value"] == '"abcd - cookie"'
assert not dash_duo.get_logs()
def test_cbcx002_triggered(dash_duo):
app = Dash(__name__)
btns = ["btn-{}".format(x) for x in range(1, 6)]
app.layout = html.Div(
[html.Div([html.Button(btn, id=btn) for btn in btns]), html.Div(id="output")]
)
@app.callback(Output("output", "children"), [Input(x, "n_clicks") for x in btns])
def on_click(*args):
if not callback_context.triggered:
raise PreventUpdate
trigger = callback_context.triggered[0]
return "Just clicked {} for the {} time!".format(
trigger["prop_id"].split(".")[0], trigger["value"]
)
dash_duo.start_server(app)
for i in range(1, 5):
for btn in btns:
dash_duo.find_element("#" + btn).click()
dash_duo.wait_for_text_to_equal(
"#output", "Just clicked {} for the {} time!".format(btn, i)
)
def test_cbcx003_no_callback_context():
for attr in ["inputs", "states", "triggered", "response"]:
with pytest.raises(MissingCallbackContextException):
getattr(callback_context, attr)
def test_cbcx004_triggered_backward_compat(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Button("click!", id="btn"), html.Div(id="out")])
@app.callback(Output("out", "children"), [Input("btn", "n_clicks")])
def report_triggered(n):
triggered = callback_context.triggered
bool_val = "truthy" if triggered else "falsy"
split_propid = json.dumps(triggered[0]["prop_id"].split("."))
full_val = json.dumps(triggered)
return "triggered is {}, has prop/id {}, and full value {}".format(
bool_val, split_propid, full_val
)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal(
"#out",
'triggered is falsy, has prop/id ["", ""], and full value '
'[{"prop_id": ".", "value": null}]',
)
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal(
"#out",
'triggered is truthy, has prop/id ["btn", "n_clicks"], and full value '
'[{"prop_id": "btn.n_clicks", "value": 1}]',
)
@pytest.mark.DASH1350
def test_cbcx005_grouped_clicks(dash_duo):
class context:
calls = 0
callback_contexts = []
clicks = dict()
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("Button 0", id="btn0"),
html.Div(
[
html.Button("Button 1", id="btn1"),
html.Div(
[html.Div(id="div3"), html.Button("Button 2", id="btn2")],
id="div2",
style=dict(backgroundColor="yellow", padding="50px"),
),
],
id="div1",
style=dict(backgroundColor="blue", padding="50px"),
),
],
id="div0",
style=dict(backgroundColor="red", padding="50px"),
)
@app.callback(
Output("div3", "children"),
[
Input("div1", "n_clicks"),
Input("div2", "n_clicks"),
Input("btn0", "n_clicks"),
Input("btn1", "n_clicks"),
Input("btn2", "n_clicks"),
],
prevent_initial_call=True,
)
def update(div1, div2, btn0, btn1, btn2):
context.calls = context.calls + 1
context.callback_contexts.append(callback_context.triggered)
context.clicks["div1"] = div1
context.clicks["div2"] = div2
context.clicks["btn0"] = btn0
context.clicks["btn1"] = btn1
context.clicks["btn2"] = btn2
def click(target):
ActionChains(dash_duo.driver).move_to_element_with_offset(
target, 5, 5
).click().perform()
dash_duo._wait_for_callbacks()
dash_duo.start_server(app)
click(dash_duo.find_element("#btn0"))
assert context.calls == 1
keys = list(map(operator.itemgetter("prop_id"), context.callback_contexts[-1:][0]))
assert len(keys) == 1
assert "btn0.n_clicks" in keys
assert context.clicks.get("btn0") == 1
assert context.clicks.get("btn1") is None
assert context.clicks.get("btn2") is None
assert context.clicks.get("div1") is None
assert context.clicks.get("div2") is None
click(dash_duo.find_element("#div1"))
assert context.calls == 2
keys = list(map(operator.itemgetter("prop_id"), context.callback_contexts[-1:][0]))
assert len(keys) == 1
assert "div1.n_clicks" in keys
assert context.clicks.get("btn0") == 1
assert context.clicks.get("btn1") is None
assert context.clicks.get("btn2") is None
assert context.clicks.get("div1") == 1
assert context.clicks.get("div2") is None
click(dash_duo.find_element("#btn1"))
assert context.calls == 3
keys = list(map(operator.itemgetter("prop_id"), context.callback_contexts[-1:][0]))
assert len(keys) == 2
assert "btn1.n_clicks" in keys
assert "div1.n_clicks" in keys
assert context.clicks.get("btn0") == 1
assert context.clicks.get("btn1") == 1
assert context.clicks.get("btn2") is None
assert context.clicks.get("div1") == 2
assert context.clicks.get("div2") is None
click(dash_duo.find_element("#div2"))
assert context.calls == 4
keys = list(map(operator.itemgetter("prop_id"), context.callback_contexts[-1:][0]))
assert len(keys) == 2
assert "div1.n_clicks" in keys
assert "div2.n_clicks" in keys
assert context.clicks.get("btn0") == 1
assert context.clicks.get("btn1") == 1
assert context.clicks.get("btn2") is None
assert context.clicks.get("div1") == 3
assert context.clicks.get("div2") == 1
click(dash_duo.find_element("#btn2"))
assert context.calls == 5
keys = list(map(operator.itemgetter("prop_id"), context.callback_contexts[-1:][0]))
assert len(keys) == 3
assert "btn2.n_clicks" in keys
assert "div1.n_clicks" in keys
assert "div2.n_clicks" in keys
assert context.clicks.get("btn0") == 1
assert context.clicks.get("btn1") == 1
assert context.clicks.get("btn2") == 1
assert context.clicks.get("div1") == 4
assert context.clicks.get("div2") == 2
@pytest.mark.DASH1350
def test_cbcx006_initial_callback_predecessor(dash_duo):
class context:
calls = 0
callback_contexts = []
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(
style={"display": "block"},
children=[
html.Div(
[
html.Label("ID: input-number-1"),
dcc.Input(id="input-number-1", type="number", value=0),
]
),
html.Div(
[
html.Label("ID: input-number-2"),
dcc.Input(id="input-number-2", type="number", value=0),
]
),
html.Div(
[
html.Label("ID: sum-number"),
dcc.Input(
id="sum-number", type="number", value=0, disabled=True
),
]
),
],
),
html.Div(id="results"),
]
)
@app.callback(
Output("sum-number", "value"),
[Input("input-number-1", "value"), Input("input-number-2", "value")],
)
def update_sum_number(n1, n2):
context.calls = context.calls + 1
context.callback_contexts.append(callback_context.triggered)
return n1 + n2
@app.callback(
Output("results", "children"),
[
Input("input-number-1", "value"),
Input("input-number-2", "value"),
Input("sum-number", "value"),
],
)
def update_results(n1, n2, nsum):
context.calls = context.calls + 1
context.callback_contexts.append(callback_context.triggered)
return [
"{} + {} = {}".format(n1, n2, nsum),
html.Br(),
"ctx.triggered={}".format(callback_context.triggered),
]
dash_duo.start_server(app)
# Initial Callbacks
wait.until(lambda: context.calls == 2, 2)
wait.until(lambda: len(context.callback_contexts) == 2, 2)
keys0 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[0]))
# Special case present for backward compatibility
assert len(keys0) == 1
assert "." in keys0
keys1 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[1]))
assert len(keys1) == 1
assert "sum-number.value" in keys1
# User action & followup callbacks
dash_duo.find_element("#input-number-1").click()
dash_duo.find_element("#input-number-1").send_keys("1")
wait.until(lambda: context.calls == 4, 2)
wait.until(lambda: len(context.callback_contexts) == 4, 2)
keys0 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[2]))
# Special case present for backward compatibility
assert len(keys0) == 1
assert "input-number-1.value" in keys0
keys1 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[3]))
assert len(keys1) == 2
assert "sum-number.value" in keys1
assert "input-number-1.value" in keys1
dash_duo.find_element("#input-number-2").click()
dash_duo.find_element("#input-number-2").send_keys("1")
wait.until(lambda: context.calls == 6, 2)
wait.until(lambda: len(context.callback_contexts) == 6, 2)
keys0 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[4]))
# Special case present for backward compatibility
assert len(keys0) == 1
assert "input-number-2.value" in keys0
keys1 = list(map(operator.itemgetter("prop_id"), context.callback_contexts[5]))
assert len(keys1) == 2
assert "sum-number.value" in keys1
assert "input-number-2.value" in keys1
|
<reponame>Tharun24/IRLI<gh_stars>0
from config import train_config as config
import tensorflow as tf
import glob
import argparse
import time
import numpy as np
import logging
from utils import _parse_function, _parse_function_dense
try:
from util import topK
except:
print('**********************CANNOT IMPORT topK***************************')
exit()
parser = argparse.ArgumentParser()
parser.add_argument("--repetition", help="which repetition?", default=0)
parser.add_argument("--gpu", default='0')
# parser.add_argument("--gpu_usage", default=0.22, type=float)
parser.add_argument("--load_epoch", default=0, type=int)
parser.add_argument("--k2", default=10, type=int, help='take top-k2 buckets of accumulated label vectors and assign it to the least occupied')
parser.add_argument("--n_epochs", default=30, type=int)
args = parser.parse_args()
if not args.gpu=='all':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
r = int(args.repetition) # which repetition
############################## Test code from here ################################
lookup = tf.Variable(np.load(config.lookups_loc+'epoch_'+str(args.load_epoch)+'/'+'bucket_order_'+str(r)+'.npy'))
train_files = glob.glob(config.tfrecord_loc+'*train*.tfrecords')
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.apply(tf.contrib.data.map_and_batch(
map_func=_parse_function, batch_size=config.batch_size))
dataset = dataset.prefetch(buffer_size=100)
dataset = dataset.shuffle(buffer_size=100)
iterator = dataset.make_initializable_iterator()
next_y_idxs, next_y_vals, next_x_idxs, next_x_vals = iterator.get_next()
###############
x_idxs = tf.stack([next_x_idxs.indices[:,0], next_x_idxs.values], axis=-1)
x_vals = next_x_vals.values
x = tf.SparseTensor(x_idxs, x_vals, [config.batch_size, config.inp_dim])
####
y_idxs = tf.stack([next_y_idxs.indices[:,0], tf.gather(lookup, next_y_idxs.values)], axis=-1)
y_vals = next_y_vals.values
y = tf.SparseTensor(y_idxs, y_vals, [config.batch_size, config.B])
y_ = tf.sparse_tensor_to_dense(y, validate_indices=False)
###############
if args.load_epoch>0:
params=np.load(config.model_save_loc+'r_'+str(r)+'_epoch_'+str(args.load_epoch)+'.npz')
#
W1_tmp = tf.placeholder(tf.float32, shape=[config.inp_dim, config.hidden_dim])
b1_tmp = tf.placeholder(tf.float32, shape=[config.hidden_dim])
W1 = tf.Variable(W1_tmp)
b1 = tf.Variable(b1_tmp)
hidden_layer = tf.nn.relu(tf.sparse_tensor_dense_matmul(x,W1)+b1)
#
W2_tmp = tf.placeholder(tf.float32, shape=[config.hidden_dim, config.B])
b2_tmp = tf.placeholder(tf.float32, shape=[config.B])
W2 = tf.Variable(W2_tmp)
b2 = tf.Variable(b2_tmp)
logits = tf.matmul(hidden_layer,W2)+b2
else:
W1 = tf.Variable(tf.truncated_normal([config.inp_dim, config.hidden_dim], stddev=0.05, dtype=tf.float32))
b1 = tf.Variable(tf.truncated_normal([config.hidden_dim], stddev=0.05, dtype=tf.float32))
hidden_layer = tf.nn.relu(tf.sparse_tensor_dense_matmul(x,W1)+b1)
#
W2 = tf.Variable(tf.truncated_normal([config.hidden_dim, config.B], stddev=0.05, dtype=tf.float32))
b2 = tf.Variable(tf.truncated_normal([config.B], stddev=0.05, dtype=tf.float32))
logits = tf.matmul(hidden_layer,W2)+b2
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y_))
train_op = tf.train.AdamOptimizer().minimize(loss)
sess = tf.Session(config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(allow_growth=True))) # , per_process_gpu_memory_fraction=args.gpu_usage
if args.load_epoch==0:
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.global_variables_initializer(),
feed_dict = {
W1_tmp:params['W1'],
b1_tmp:params['b1'],
W2_tmp:params['W2'],
b2_tmp:params['b2']})
del params
begin_time = time.time()
total_time = 0
logging.basicConfig(filename = config.logfolder+'logs_'+str(r), level=logging.INFO)
n_check = 1000
for curr_epoch in range(args.load_epoch+1,args.load_epoch+args.n_epochs+1):
sess.run(iterator.initializer)
count = 0
while True:
try:
sess.run(train_op)
count += 1
if count%n_check==0:
_, train_loss = sess.run([train_op, loss])
time_diff = time.time()-begin_time
total_time += time_diff
logging.info('finished '+str(count)+' steps. Time elapsed for last '+str(n_check)+' steps: '+str(time_diff)+' s')
logging.info('train_loss: '+str(train_loss))
begin_time = time.time()
count+=1
except tf.errors.OutOfRangeError:
break
logging.info('###################################')
logging.info('finished epoch '+str(curr_epoch))
logging.info('total time elapsed so far: '+str(total_time))
logging.info('###################################')
if curr_epoch%5==0:
params = sess.run([W1,b1,W2,b2])
np.savez_compressed(config.model_save_loc+'r_'+str(r)+'_epoch_'+str(curr_epoch)+'.npz',
W1=params[0],
b1=params[1],
W2=params[2],
b2=params[3])
del params
################
begin_time = time.time()
sess.run(iterator.initializer)
aff_mat = np.zeros([config.n_classes,config.B], dtype=np.float16)
while True:
try:
logits_, y_idxs_ = sess.run([logits, next_y_idxs])
temp = np.where(y_idxs_[0][:,1]==0)[0]
temp = np.concatenate([temp,y_idxs_[1].shape])
for j in range(y_idxs_[2][0]):
aff_mat[y_idxs_[1][temp[j]:temp[j+1]]] += np.tile(logits_[j].astype(np.float16)/100, (temp[j+1]-temp[j],1))
#######
except tf.errors.OutOfRangeError:
break
###
print('finished re-assigning labels')
print('time_elapsed for re-assignment:',time.time()-begin_time)
top_preds = np.zeros([config.n_classes,args.k2], dtype=int)
overall_count = 0
###
for i in range(aff_mat.shape[0]//config.batch_size):
start_idx = overall_count
end_idx = start_idx+config.batch_size
topK(aff_mat[start_idx:end_idx].astype(np.float32), top_preds[start_idx:end_idx], config.B, config.batch_size, args.k2, 2)
overall_count = end_idx
###
if overall_count<config.n_classes:
start_idx = overall_count
end_idx = config.n_classes
topK(aff_mat[start_idx:end_idx].astype(np.float32), top_preds[start_idx:end_idx], config.B, end_idx-start_idx, args.k2, 2)
overall_count = end_idx
###
counts = np.zeros(config.B+1, dtype=int)
bucket_order = np.zeros(config.n_classes, dtype=int)
for i in range(config.n_classes):
bucket = top_preds[i, np.argmin(counts[top_preds[i]+1])]
bucket_order[i] = bucket
counts[bucket+1] += 1
###
nothing = sess.run(tf.assign(lookup,bucket_order))
###
counts = np.cumsum(counts)
rolling_counts = np.zeros(config.B, dtype=int)
class_order = np.zeros(config.n_classes,dtype=int)
for i in range(config.n_classes):
temp = bucket_order[i]
class_order[counts[temp]+rolling_counts[temp]] = i
rolling_counts[temp] += 1
###
folder_path = config.lookups_loc+'epoch_'+str(curr_epoch)
if not os.path.isdir(folder_path):
os.system('mkdir '+folder_path)
np.save(folder_path+'/class_order_'+str(r)+'.npy', class_order)
np.save(folder_path+'/counts_'+str(r)+'.npy', counts)
np.save(folder_path+'/bucket_order_'+str(r)+'.npy', bucket_order)
################
begin_time = time.time()
|
<reponame>walsidalw/opencast-stats-app<filename>influxclient.py
"""
The Apereo Foundation licenses this file to you under the Educational
Community License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License
at:
http://opensource.org/licenses/ecl2.txt
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
from influxdb import DataFrameClient, InfluxDBClient
import pandas as pd
import json
import time
def get_dataset_client(cfg):
return InfluxDBClient(cfg['host'], cfg['port'], cfg['user'], cfg['password'], cfg['database'])
def get_dataframe_client(cfg):
return DataFrameClient(cfg['host'], cfg['port'], cfg['user'], cfg['password'], cfg['database'])
def get_views(client: DataFrameClient, rp, measurement, resource, res_id, orga_id):
"""
For a given resource type and Id, request number of plays, visitors and finishes grouped
by day.
:param client: InfluxDB DataFrame client for the query request
:param rp: Retention Policy
:param measurement: Name of the measurement under which the data is stored
:param resource: Type of resource. Can be "organizationId", "seriesId" or "eventId"
:param res_id: Unique identifier of resource
:param orga_id: OrganizationId of the requested resource
:return: DataFrame containing indexes for dates and columns for plays, visitors and finishes
"""
params = {'val1': orga_id,
'val2': res_id}
q = 'SELECT sum("finishes") AS "finishes", sum("plays") AS "plays", sum("visitors") AS "visitors" ' \
'FROM "{}"."{}" WHERE "organizationId"=$val1 AND "{}"=$val2 GROUP BY time(1d) FILL(0)'\
.format(rp, measurement, resource)
r = client.query(q, bind_params=params)
if r:
return r[measurement]
return pd.DataFrame()
def get_views_combined(client: DataFrameClient, rp, measurement, orga_id, events):
"""
For given list of episodes, request daily number of visitors. Join the resulting DataFrames
and fill NaNs with 0s.
:param client: InfluxDB DataFrame client for the query request
:param rp: Retention Policy
:param measurement: Name of the measurement under which the data is stored
:param orga_id: OrganizationId of the requested episodes
:param events: List of episodes, containing episodeIds and titles
:return: Combined DataFrame for all episodes in list, where indexes are dates and columns represent
visitors of each episode on given date
"""
df = pd.DataFrame()
val = []
col = []
for idx, name in events:
temp = get_views(client, rp, measurement, 'eventId', idx, orga_id)
if not temp.empty:
temp = temp.drop(columns=['plays', 'finishes'])
temp = temp.rename(columns={'visitors': idx})
df = df.join(temp, how='outer')
col.append(name)
df = df.fillna(0)
for column in df.columns:
val.append(list(df[column]))
return df.index, col, val
def get_totals(client: InfluxDBClient, rp, measurement, series_id, orga_id, events):
"""
For given seriesId, request all points from InfluxDB summed up and grouped by eventId.
Filter the result set by eventIds and build indexes and data tuples for each episode in given list.
:param client: Simple InfluxDB client for the query request
:param rp: Retention Policy
:param measurement: Name of the measurement under which the data is stored
:param series_id: Unique identifier for series
:param orga_id: OrganizationId of the requested series
:param events: List of episodes, containing episodeIds and titles
:return: Tuple of episode titles as indexes and data list containing aggregated numbers of plays, visitors
and finishes of corresponding episode
"""
params = {'val1': orga_id,
'val2': series_id}
q = 'SELECT sum("finishes") AS "finishes", sum("plays") AS "plays", sum("visitors") AS "visitors" ' \
'FROM "{}"."{}" WHERE "organizationId"=$val1 AND "seriesId"=$val2 GROUP BY "eventId"'\
.format(rp, measurement)
r = client.query(q, bind_params=params)
index = []
data = []
for idx, name in events:
points = list(r.get_points(measurement=measurement, tags={"eventId": idx}))
if points:
for point in points:
index.append(name)
data.append((point['plays'], point['visitors'], point['finishes']))
return index, data
def get_segments(client: InfluxDBClient, rp, measurement, event_id, orga_id):
"""
For given episode, request segment data from InfluxDB. Parse the returned JSON into human
readable form and build lists of indexes (time strings) and play rates for each segment.
:param client: Simple InfluxDB client for the query request
:param rp: Retention Policy
:param measurement: Name of the measurement under which the data is stored
:param event_id: Unique identifier for episode
:param orga_id: OrganizationId of the requested episode
:return: Pair of indexes (time segments) and play rates
"""""
params = {'val1': orga_id,
'val2': event_id}
q = 'SELECT "segments" FROM "{}"."{}" WHERE "organizationId"=$val1 AND "eventId"=$val2'.format(rp, measurement)
r = client.query(q, bind_params=params)
points = list(r.get_points())
if points:
index = []
data = []
r_json = json.loads((points[0])['segments'])
if "15" in (r_json[0])['label']:
seconds = 15
else:
seconds = 30
for i, elem in enumerate(r_json):
sec = (i + 1) * seconds
index.append((time.strftime("%H:%M:%S", time.gmtime(sec))))
data.append(elem['play_rate'])
return index, data
return []
|
import cairo
import colorsys
TITLE_HEIGHT = 25
EVENT_LABEL_HEIGHT = 20
SAMPLE_HEIGHT = 40
COLOUR_BLACK = (0.1,0.1,0.1)
COLOUR_WHITE = (1,1,1)
LABEL_X_OFFSET = 4
LABEL_OFFSET_Y = 4
TEXT_SIZE_LABEL = 13
TEXT_SIZE_TITLE = 17
TEXT_SIZE_DURATION = 10
TEXT_LABEL_DURATION_OFFSET_Y = 20
TEXT_SIZE_EVENT_LABEL = 10
EVENT_LABEL_OFFSET_X = 1
EVENT_LABEL_OFFSET_Y = 1
COUNTER_ROW_HEIGHT = 100
class RenderContext:
def __init__(self, cr, width, height, start_time, finish_time, offset_x, offset_y):
self.cr = cr
self.width = float(width)
self.height = float(height)
self.start_time = start_time
self.finish_time = finish_time
self.offset_x = offset_x
self.offset_y = offset_y
self._duration = max(0.001, float(finish_time - start_time))
def get_x_for_time(self, time):
if time <= self.start_time:
return 0
elif time >= self.finish_time:
return self.width
else:
return (time-self.start_time) * self.width / self._duration
def is_sample_visible(self, sample):
return not((sample.get_finish_time() < self.start_time) or (sample.get_start_time() > self.finish_time))
def is_sample_off_right_of_screen(self, sample):
return sample.get_start_time() > self.finish_time
def is_event_visible(self, event_sample):
time = event_sample.get_time()
return (time > self.start_time) and (time < self.finish_time)
def is_event_off_right_of_screen(self, event_sample):
time = event_sample.get_time()
return (time > self.finish_time)
def render_text(cr, label, font_size, x, y, width = None):
# render label using x,y as top-left co-ords
cr.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(font_size)
(label_x, label_y, label_width, label_height, label_dx, label_dy) = cr.text_extents(label)
if width and (label_width > (width-LABEL_X_OFFSET)):
return
cr.move_to(x + LABEL_X_OFFSET,y + LABEL_OFFSET_Y + label_height)
cr.show_text(label)
return (label_width, label_height)
def render_sample(render_context, sample, y):
if not render_context.is_sample_visible(sample):
return not render_context.is_sample_off_right_of_screen(sample)
cr = render_context.cr
start_time = sample.get_start_time()
finish_time = sample.get_finish_time()
start_x = render_context.get_x_for_time(start_time)
finish_x = render_context.get_x_for_time(finish_time)
width = finish_x - start_x
# make sure we always render at least something for a sample
width = max(width,0.5)
if width < 4:
# filled rectangle for this sample + all its' children
call_stack_depth = sample.get_child_call_stack_depth() + 1
cr.set_source_rgb(*render_context.sample_colour)
cr.rectangle(start_x,y, width, SAMPLE_HEIGHT * call_stack_depth)
cr.fill()
else:
# filled rectangle
cr.set_source_rgb(*render_context.sample_colour)
cr.rectangle(start_x,y, width, SAMPLE_HEIGHT)
cr.fill()
# black outline
cr.set_source_rgb(*COLOUR_BLACK)
cr.rectangle(start_x,y, width, SAMPLE_HEIGHT)
cr.stroke()
if width > 10:
# function name
label = sample.get_function().get_label()
render_text(cr, label, TEXT_SIZE_LABEL, start_x, y, width)
duration = sample.get_duration()
duration_label = '%.3fms' % ( sample.get_duration() / 1000.0 )
render_text(cr, duration_label, TEXT_SIZE_DURATION, start_x, y + TEXT_LABEL_DURATION_OFFSET_Y, width)
# recursive calls
children = sample.get_children()
for child in children:
if not render_sample( render_context, child, y+SAMPLE_HEIGHT):
return False
return True
def render_event(render_context, event_sample, y, height):
if not render_context.is_event_visible(event_sample):
return not render_context.is_event_off_right_of_screen(event_sample)
cr = render_context.cr
time = event_sample.get_time()
x = render_context.get_x_for_time(time)
cr.set_source_rgb(*COLOUR_WHITE)
cr.move_to(x,y)
cr.line_to(x,y+height)
cr.set_line_width(3)
cr.stroke()
cr.set_source_rgb(*COLOUR_BLACK)
cr.move_to(x,y)
cr.line_to(x,y+height)
cr.set_line_width(2)
cr.stroke()
event = event_sample.get_event()
label = event.get_label()
(label_width, label_height) = render_text(cr, label, TEXT_SIZE_EVENT_LABEL, x + EVENT_LABEL_OFFSET_X, y + EVENT_LABEL_OFFSET_Y)
cr.move_to(x,y)
cr.line_to(x + EVENT_LABEL_OFFSET_X + label_width, y)
cr.stroke()
class ProfileRenderCounter:
def __init__(self, counter_data, colour, background_colour):
self._counter_data = counter_data
self._colour = colour
self._background_colour = background_colour
self._height = TITLE_HEIGHT + COUNTER_ROW_HEIGHT
def render(self, render_context):
cr = render_context.cr
counter_data = self._counter_data
# render background colour
cr.set_source_rgb(*self._background_colour)
cr.rectangle(0, 0, render_context.width, self._height)
cr.fill()
# render title
title = "Counter: " + counter_data.get_label()
cr.set_source_rgb(*COLOUR_BLACK)
render_text(cr, title, TEXT_SIZE_TITLE, 0, 0)
# render values
samples = counter_data.get_samples()
if len(samples) > 0:
cr.set_source_rgb(*self._colour)
max_value = counter_data.get_max_value()
min_value = counter_data.get_min_value()
y_scale = float(COUNTER_ROW_HEIGHT) / (max_value-min_value)
min_value_height = -min_value * y_scale
cr.translate(0, TITLE_HEIGHT)
def render_sample(cr, last_x, x, value):
value_height = float(value) * y_scale
if value >= 0:
cr.rectangle(last_x, COUNTER_ROW_HEIGHT - min_value_height - value_height, 1+x-last_x, value_height)
else:
value_height = -value_height
cr.rectangle(last_x, COUNTER_ROW_HEIGHT - min_value_height, 1+x-last_x, value_height)
cr.fill()
last_sample = samples[0]
for sample in samples:
last_x = render_context.get_x_for_time(last_sample.get_time())
x = render_context.get_x_for_time(sample.get_time())
value = last_sample.get_value()
render_sample(cr, last_x, x, value)
last_sample = sample
end_x = render_context.get_x_for_time(render_context.finish_time)
# render the last sample
last_x = render_context.get_x_for_time(last_sample.get_time())
value = last_sample.get_value()
render_sample(cr, last_x, end_x, value)
# render the x-axis
line_y = COUNTER_ROW_HEIGHT - min_value_height
cr.set_line_width(1)
cr.move_to(0, line_y)
cr.line_to(end_x, line_y)
cr.stroke()
def get_height(self):
return self._height
class ProfileRenderThread:
def __init__(self, thread_data, colour, background_colour):
self._thread_data = thread_data
self._colour = colour
self._background_colour = background_colour
self._height = TITLE_HEIGHT + EVENT_LABEL_HEIGHT + (self._thread_data.get_max_stack_depth() * SAMPLE_HEIGHT)
def render(self, render_context):
cr = render_context.cr
# render background colour
cr.set_source_rgb(*self._background_colour)
cr.rectangle(0, 0, render_context.width, self._height)
cr.fill()
# render title
title = "Thread: " + self._thread_data.get_label()
cr.set_source_rgb(*COLOUR_BLACK)
render_text(cr, title, TEXT_SIZE_TITLE, 0, 0)
# render samples
render_context.sample_colour = self._colour
samples = self._thread_data.get_samples()
for sample in samples:
if not render_sample(render_context, sample, TITLE_HEIGHT + EVENT_LABEL_HEIGHT):
break
# render events
event_samples = self._thread_data.get_event_samples()
event_height = self.get_height()
for event_sample in event_samples:
if not render_event(render_context, event_sample, TITLE_HEIGHT, event_height):
break
def get_height(self):
""" return the height of this thread on screen, in pixels """
return self._height
class ProfileRenderObjects:
def __init__(self, profile_data):
self._counters = []
self._threads = []
num_counters = profile_data.get_num_counters()
num_threads = profile_data.get_num_threads()
num_rows = num_counters + num_threads
row_index_mutable = [0]
def get_row_colours():
row_index = row_index_mutable[0]
background_colour = (1.0,1.0,1.0) if (row_index % 2) else (243.0/255.0,245.0/255.0,220.0/255.0)
colour = colorsys.hls_to_rgb(float(row_index+1) / float(num_rows), 0.5, 0.5)
row_index_mutable[0] += 1
return (background_colour, colour)
for i in range(num_counters):
counter_data = profile_data.get_counter(i)
(background_colour, colour) = get_row_colours()
render_counter = ProfileRenderCounter(counter_data, colour, background_colour)
self._counters.append( render_counter )
for i in range(num_threads):
thread_data = profile_data.get_thread(i)
(background_colour, colour) = get_row_colours()
render_thread = ProfileRenderThread(thread_data, colour, background_colour)
self._threads.append( render_thread )
self._render_height = self._calculate_render_height()
def _render_background(self, render_context):
# Fill the background with white
cr = render_context.cr
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.rectangle(0, 0, render_context.width, render_context.height)
cr.fill()
def render(self, render_context):
cr = render_context.cr
self._render_background(render_context)
self._render_counters(render_context)
self._render_threads(render_context)
def _render_counters(self, render_context):
cr = render_context.cr
offset_x = render_context.offset_x
offset_y = render_context.offset_y
for render_counter in self._counters:
if offset_y > render_context.height:
break
if (offset_y + render_counter.get_height()) > 0:
cr.save()
cr.translate(offset_x,offset_y)
render_counter.render(render_context)
cr.restore()
offset_y += render_counter.get_height()
render_context.offset_x = offset_x
render_context.offset_y = offset_y
def _render_threads(self, render_context):
cr = render_context.cr
offset_x = render_context.offset_x
offset_y = render_context.offset_y
for render_thread in self._threads:
if offset_y > render_context.height:
break
if (offset_y + render_thread.get_height()) > 0:
cr.save()
cr.translate(offset_x,offset_y)
render_thread.render(render_context)
cr.restore()
offset_y += render_thread.get_height()
render_context.offset_x = offset_x
render_context.offset_y = offset_y
def _calculate_render_height(self):
# get the combined height of all the render counters & threads
render_height = 0
for counter in self._counters:
render_height += counter.get_height()
for thread in self._threads:
render_height += thread.get_height()
return render_height
def get_render_height(self):
return self._render_height
class ProfileRender:
""" Render the data for a profiling session """
def __init__(self, profile_data):
self._width = 0.0
self._height = 0.0
self._profile_data = profile_data
self._profile_data_objects = ProfileRenderObjects(profile_data)
self._offset_y = 0
# initialise times at the left + right edges of the window
self._start_time = profile_data.get_start_time()
self._finish_time = profile_data.get_finish_time()
def render(self, cr):
offset_y = self._offset_y
offset_x = 0
render_context = RenderContext( cr, self._width, self._height, self._start_time, self._finish_time, offset_x, offset_y)
self._profile_data_objects.render(render_context )
def render_pointer(self, cr, pointer):
(x,y) = pointer
t = self._get_time_at_x(x)
cr.set_source_rgb(0.0, 0.0, 0.0)
cr.move_to(x,0)
cr.line_to(x, self._height)
cr.stroke()
def resize(self, width, height):
self._width = float(width)
self._height = float(height)
self._validate_viewport()
def pan_by(self, dx, dy):
dt = self._get_dt_for_dx( dx )
if dt > 0:
dt = min(dt, self._start_time - self._profile_data.get_start_time())
else:
dt = max(dt, self._finish_time - self._profile_data.get_finish_time())
self._start_time -= dt
self._finish_time -= dt
self._offset_y += dy
self._validate_viewport()
def scale_at(self, scale_factor, x, y):
x = float(x)
x_time = self._get_time_at_x(x)
self._start_time = x_time - ((x_time - self._start_time) / scale_factor)
self._finish_time = x_time + ((self._finish_time - x_time) / scale_factor)
self._validate_viewport()
def _get_time_at_x(self, x):
if x <= 0:
return self._start_time
elif x >= self._width:
return self._finish_time
else:
duration = self._finish_time - self._start_time
return ((x/self._width) * duration) + self._start_time
def _get_dt_for_dx(self, dx):
time_per_pixel = (self._finish_time - self._start_time) / self._width
dt = dx * time_per_pixel
return dt
def _validate_viewport(self):
# validate start / finish time
profile_start_time = self._profile_data.get_start_time()
profile_finish_time = self._profile_data.get_finish_time()
if self._start_time < profile_start_time:
self._start_time = profile_start_time
if self._finish_time > profile_finish_time:
self._finish_time = profile_finish_time
# validate offset_y
profile_render_height = self._profile_data_objects.get_render_height()
offset_y = self._offset_y
bottom = self._offset_y + profile_render_height
if bottom < self._height:
offset_bottom = self._height - bottom
offset_y += offset_bottom
offset_y = min(0, offset_y)
self._offset_y = offset_y
|
<gh_stars>1-10
from selectolax.parser import Node as sNode
from selectolax.parser import HTMLParser
__version__ = '0.0.3'
html_tags = [
"figcaption",
"blockquote",
"textarea",
"progress",
"optgroup",
"noscript",
"fieldset",
"datalist",
"colgroup",
"summary",
"section",
"details",
"command",
"caption",
"article",
"address",
"submit",
"strong",
"source",
"select",
"script",
"output",
"option",
"legend",
"keygen",
"iframe",
"hgroup",
"header",
"footer",
"figure",
"canvas",
"button",
"video",
"track",
"title",
"thead",
"tfoot",
"tbody",
"table",
"style",
"small",
"param",
"meter",
"label",
"input",
"audio",
"aside",
"applet",
"object",
"basefont",
"center",
"embed",
"isindex",
"listing",
"menuitem",
"plaintext",
"strike",
"template",
"picture",
"dialog",
"time",
"span",
"samp",
"ruby",
"meta",
"menu",
"mark",
"link",
"html",
"head",
"form",
"font",
"code",
"cite",
"body",
"base",
"area",
"abbr",
"main",
"dir",
"wbr",
"var",
"sup",
"sub",
"nav",
"map",
"kbd",
"ins",
"img",
"div",
"dfn",
"del",
"col",
"bdo",
"bdi",
"pre",
"xmp",
"ul",
"tr",
"th",
"td",
"rt",
"rp",
"ol",
"li",
"hr",
"h6",
"h5",
"h4",
"h3",
"h2",
"h1",
"em",
"dt",
"dl",
"dd",
"br",
"u",
"s",
"q",
"p",
"i",
"b",
"a",
]
# big, blink, bold, tt, var, frameset
html_attributes = [
"accept",
"accesskey",
"loading",
"action",
"align",
"alt",
"async",
"autocomplete",
"autofocus",
"autoplay",
"bgcolor",
"border",
"charset",
"checked",
"cite",
"class",
"color",
"cols",
"colspan",
"content",
"contenteditable",
"controls",
"coords",
"data",
"datetime",
"default",
"defer",
"dir",
"dirname",
"disabled",
"download",
"draggable",
"enctype",
"for",
"form",
"formaction",
"headers",
"height",
"hidden",
"high",
"href",
"hreflang",
"id",
"ismap",
"kind",
"label",
"lang",
"list",
"loop",
"low",
"max",
"maxlength",
"media",
"method",
"min",
"multiple",
"muted",
"name",
"novalidate",
"onabort",
"onafterprint",
"onbeforeprint",
"onbeforeunload",
"onblur",
"oncanplay",
"oncanplaythrough",
"onchange",
"onclick",
"oncontextmenu",
"oncopy",
"oncuechange",
"oncut",
"ondblclick",
"ondrag",
"ondragend",
"ondragenter",
"ondragleave",
"ondragover",
"ondragstart",
"ondrop",
"ondurationchange",
"onemptied",
"onended",
"onerror",
"onfocus",
"onhashchange",
"oninput",
"oninvalid",
"onkeydown",
"onkeypress",
"onkeyup",
"onload",
"onloadeddata",
"onloadedmetadata",
"onloadstart",
"onmousedown",
"onmousemove",
"onmouseout",
"onmouseover",
"onmouseup",
"onmousewheel",
"onoffline",
"ononline",
"onpagehide",
"onpageshow",
"onpaste",
"onpause",
"onplay",
"onplaying",
"onpopstate",
"onprogress",
"onratechange",
"onreset",
"onresize",
"onscroll",
"onsearch",
"onseeked",
"onseeking",
"onselect",
"onstalled",
"onstorage",
"onsubmit",
"onsuspend",
"ontimeupdate",
"ontoggle",
"onunload",
"onvolumechange",
"onwaiting",
"onwheel",
"open",
"optimum",
"pattern",
"placeholder",
"poster",
"preload",
"readonly",
"rel",
"required",
"reversed",
"rows",
"rowspan",
"sandbox",
"scope",
"selected",
"shape",
"size",
"sizes",
"span",
"spellcheck",
"src",
"srcdoc",
"srclang",
"srcset",
"start",
"step",
"style",
"tabindex",
"target",
"title",
"translate",
"type",
"usemap",
"value",
"width",
"wrap",
"property",
"integrity",
"crossorigin",
"nonce",
"autocapitalize",
"enterkeyhint",
"inputmode",
"is",
"itemid",
"itemprop",
"itemref",
"itemscope",
"itemtype",
"part",
"slot",
"spellcheck",
"alink",
"nowrap",
"vlink",
"vspace",
"language",
"clear",
"hspace",
"xmlns",
"about",
"allowtransparency",
"datatype",
"inlist",
"prefix",
"resource",
"rev",
"typeof",
"vocab", # rdfa
"playsinline",
"autopictureinpicture",
"buffered",
"controlslist",
"disableremoteplayback", # video
]
class Node():
ELEMENT_NODE: int = 1
TEXT_NODE: int = 3
CDATA_SECTION_NODE: int = 4
PROCESSING_INSTRUCTION_NODE: int = 7
COMMENT_NODE: int = 8
DOCUMENT_NODE: int = 9
DOCUMENT_TYPE_NODE: int = 10
DOCUMENT_FRAGMENT_NODE: int = 11
'''
def __format__(self, format_spec):
# return f"<{self.name}{self.__attributes}>{self.content}</{self.name}>"
# http://tidy.sourceforge.net/docs/quickref.html
BASE_OPTIONS = {
# "indent": 1, # Pretty; not too much of a performance hit
# "tidy-mark": 0, # No tidy meta tag in output
# "wrap": 0, # No wrapping
# "alt-text": "", # Help ensure validation
# "doctype": 'strict', # Little sense in transitional for tool-generated markup...
# "force-output": 1, # May not get what you expect but you will get something
# HTML, XHTML, XML Options Reference
# 'anchor-as-name': 0, #?,
'doctype': 'auto',
'drop-empty-paras': 0,
'fix-backslash': 0,
'fix-bad-comments': 0,
'fix-uri':0,
'hide-endtags': 1, #?,
'input-xml': 1, #?,
'join-styles': 0,
'literal-attributes': 1,
'lower-literals': 0,
'merge-divs': 0,
# 'merge-spans': 0,
'output-html': 1,
# 'preserve-entities': 1,
'quote-ampersand': 0,
'quote-nbsp': 0,
# 'show-body-only': 'auto',
# Diagnostics Options Reference
'show-errors': 0,
'show-warnings': 0,
# Pretty Print Options Reference
'break-before-br': 1,
'indent': 1,
'indent-attributes': 0, #default,
'indent-spaces': 4,
'tab-size': 4,
'wrap': 132,
'wrap-asp': 0,
'wrap-jste': 0,
'wrap-php': 0,
'wrap-sections': 0,
# Character Encoding Options Reference
'char-encoding': 'utf8',
# Miscellaneous Options Reference
'force-output': 1,
'quiet': 1,
'tidy-mark': 0,
}
# from tidylib import tidy_document
# import tidylib
# tidylib.BASE_OPTIONS = BASE_OPTIONS
# tidylib.BASE_OPTIONS = {}
# print('BEFORE::', self._node.html)
# document, errors = tidy_document(self._node.html,
# options=BASE_OPTIONS)
# print document
# print errors
# https://github.com/nijel/utidylib
import tidy
doc = tidy.parseString(
self._node.html,
output_html=1,
# output_xhtml=1,
# add_xml_decl=1,
indent=1,
tidy_mark=0,
doctype="auto", #'html5' - fails?
)
# https://github.com/Kijewski/pytidyhtml5/blob/master/basic-sanity-test.py
return str(doc)
'''
def __str__(self):
# return f"<{self.name}{self.__attributes}>{self.content}</{self.name}>"
# return self._node.html
return self._node.html
def __init__(self, *args, **kwargs) -> None:
self.args = args
self.kwargs = kwargs
if getattr(self, 'name', None) is None:
self.name = ''
# if user doesn't put underscore (dont advertise this as still has issues.)
new_kwargs = {}
for k, v in kwargs.items():
if k[0] != "_":
new_kwargs[f"_{k}"] = v
else:
new_kwargs[k] = v
self.kwargs = new_kwargs
try:
self.content = ''.join([each.__str__() for each in args])
self.__attributes__ = ''.join([''' %s="%s"''' % (key.split('_', 1)[1], value) for key, value in self.kwargs.items()])
except IndexError as e:
# from domonic.html import TemplateError
# raise TemplateError(e)
print('template error!')
raise Exception(e)
self._doc = HTMLParser(f"<{self.name}{self.__attributes__}>{self.content}</{self.name}>")#.root
if self.name == 'html':
self._node = self._doc
# elif self.name == 'head':
# self._node = self._doc
else:
self._node = self._doc.tags(self.name)[0]
# if self._doc.body.child is not None:
# self._node = self._doc.body.child
# print(dir(self._node))
# super().__init__(*args, **kwargs)
# class closed_tag(Node):
# def __str__(self):
# return f"<{self.name}{self.__attributes__}/>"
class Element(Node):
# __slots__ = ('_id')
def __init__(self, *args, **kwargs):
# self.content = None
# self.attributes = None
# if self.hasAttribute('id'):
# self.id = self.id # ''#None
# self.lang = None
# self.tabIndex = None
# if self.hasAttribute('title'):
# self.title = self.title
# if self.hasAttribute('class'):
# self.className = self.className
# self.classList = self.classList
# self.tagName
self.style = None # Style(self) # = #'test'#Style()
# self.shadowRoot = None
# self.dir = None
super().__init__(*args, **kwargs)
class Document(Element,):
URL = None
def __init__(self, *args, **kwargs):
""" Constructor for Document objects """
self.args = args
self.kwargs = kwargs
# self.documentURI = uri
# self.documentElement = self
self.stylesheets = None
self.doctype = None
super().__init__(*args, **kwargs)
# try:
# global document
# document = self
# except Exception as e:
# print('failed to set document', e)
# document can be set manually but will get set each time a new Document is created.
# global document
# document = Document()
class HTMLElement(Element): # TODO - check
name = ''
class HTMLAnchorElement(HTMLElement): # TODO - check
name = 'a'
# def __init__(self, *args, **kwargs):
# self.url = urllib.parse.urlsplit(url)
# self.href = url # self.url.geturl()
# self.protocol = self.url.scheme
# self.hostname = self.url.hostname
# self.port = self.url.port
# self.host = self.url.hostname
# self.pathname = self.url.path
# self.hash = ''
# self.search = self.url.query
# self._searchParams = URLSearchParams(self.url.query)
# super().__init__(*args, **kwargs)
class HTMLAreaElement(HTMLElement): # TODO - check
name = 'area'
class HTMLAudioElement(HTMLElement):
name = 'audio'
class HTMLBRElement(HTMLElement):
name = 'br'
class HTMLBaseElement(HTMLElement):
name = 'base'
class HTMLBaseFontElement(HTMLElement): # TODO - check
name = 'basefont'
class HTMLBodyElement(HTMLElement):
name = 'body'
class HTMLButtonElement(HTMLElement):
name = 'button'
class HTMLCanvasElement(HTMLElement):
name = 'canvas'
class HTMLContentElement(HTMLElement): # TODO - check
name = 'content'
class HTMLDListElement(HTMLElement):
name = 'dl'
class HTMLDataElement(HTMLElement):
name = 'data'
class HTMLDataListElement(HTMLElement):
name = 'datalist'
class HTMLDialogElement(HTMLElement):
name = 'dialog'
class HTMLDivElement(HTMLElement):
name = 'div'
class HTMLDocument(Document):
name = 'html'
class HTMLEmbedElement(HTMLElement):
name = 'embed'
class HTMLFieldSetElement(HTMLElement): # TODO - check
name = 'fieldset'
class HTMLFormControlsCollection(HTMLElement): # TODO - check
name = 'formcontrols'
class HTMLFormElement(HTMLElement):
name = 'form'
class HTMLFrameSetElement(HTMLElement): # TODO - check
name = 'frameset'
class HTMLHRElement(HTMLElement):
name = 'hr'
class HTMLHeadElement(HTMLElement):
name = 'head'
class HTMLHeadingElement(HTMLElement):
name = 'h1'
class HTMLIFrameElement(HTMLElement):
name = 'iframe'
class HTMLImageElement(HTMLElement):
name = 'img'
class HTMLInputElement(HTMLElement):
name = 'input'
class HTMLIsIndexElement(HTMLElement): # TODO - check
name = ''
class HTMLKeygenElement(HTMLElement):
name = 'keygen'
class HTMLLIElement(HTMLElement):
name = 'li'
class HTMLLabelElement(HTMLElement):
name = 'label'
class HTMLLegendElement(HTMLElement):
name = 'legend'
class HTMLLinkElement(HTMLElement):
name = 'link'
class HTMLMapElement(HTMLElement): # TODO - check
name = 'map'
class HTMLMediaElement(HTMLElement): # TODO - check
name = 'media'
class HTMLMetaElement(HTMLElement):
name = 'meta'
class HTMLMeterElement(HTMLElement):
name = 'meter'
class HTMLModElement(HTMLElement): # TODO - check
name = 'mod'
class HTMLOListElement(HTMLElement):
name = 'ol'
class HTMLObjectElement(HTMLElement):
name = 'object'
class HTMLOptGroupElement(HTMLElement):
name = 'optgroup'
class HTMLOptionElement(HTMLElement):
name = 'option'
# class HTMLOptionsCollection(HTMLElement): # TODO - check
# name = 'options'
class HTMLOutputElement(HTMLElement):
name = 'output'
class HTMLParagraphElement(HTMLElement):
name = 'p'
class HTMLParamElement(HTMLElement): # TODO - check
name = 'param'
class HTMLPictureElement(HTMLElement):
name = 'picture'
class HTMLPreElement(HTMLElement):
name = 'pre'
class HTMLProgressElement(HTMLElement):
name = 'progress'
class HTMLQuoteElement(HTMLElement): # TODO - check
name = 'q'
class HTMLScriptElement(HTMLElement):
name = 'script'
class HTMLSelectElement(HTMLElement):
name = 'select'
class HTMLShadowElement(HTMLElement): # TODO - check
name = 'shadow'
class HTMLSourceElement(HTMLElement): # TODO - check
name = 'source'
class HTMLSpanElement(HTMLElement):
name = 'span'
class HTMLStyleElement(HTMLElement):
name = 'style'
class HTMLTableCaptionElement(HTMLElement): # TODO - check
name = 'caption'
class HTMLTableCellElement(HTMLElement): # TODO - check
name = 'td'
class HTMLTableColElement(HTMLElement):
name = 'col'
class HTMLTableDataCellElement(HTMLElement): # TODO - check
name = 'td'
class HTMLTableElement(HTMLElement):
name = 'table'
class HTMLTableHeaderCellElement(HTMLElement):
name = 'th'
class HTMLTableRowElement(HTMLElement):
name = 'tr'
class HTMLTableSectionElement(HTMLElement):
name = 'tbody'
class HTMLTemplateElement(HTMLElement): # TODO - check
name = 'template'
class HTMLTextAreaElement(HTMLElement):
name = 'textarea'
class HTMLTimeElement(HTMLElement):
name = 'time'
class HTMLTitleElement(HTMLElement):
name = 'title'
class HTMLTrackElement(HTMLElement):
name = 'track'
class HTMLUListElement(HTMLElement):
name = 'ul'
class HTMLUnknownElement(HTMLElement):
name = 'unknown'
class HTMLVideoElement(HTMLElement):
name = 'video'
html = type('html', (Document,), {'name': 'html'})
body = type('body', (Element,), {'name': 'body'})
head = type('head', (Element,), {'name': 'head'})
script = type('script', (Element,), {'name': 'script'})
style = type('style', (Element,), {'name': 'style'})
h1 = type('h1', (Element,), {'name': 'h1'})
h2 = type('h2', (Element,), {'name': 'h2'})
h3 = type('h3', (Element,), {'name': 'h3'})
h4 = type('h4', (Element,), {'name': 'h4'})
h5 = type('h5', (Element,), {'name': 'h5'})
h6 = type('h6', (Element,), {'name': 'h6'})
p = type('p', (Element,), {'name': 'p'})
i = type('i', (Element,), {'name': 'i'})
b = type('b', (Element,), {'name': 'b'})
a = type('a', (HTMLAnchorElement,), {'name': 'a'})
ul = type('ul', (Element,), {'name': 'ul'})
ol = type('ol', (Element,), {'name': 'ol'})
li = type('li', (Element,), {'name': 'li'})
hr = type('hr', (Element,), {'name': 'hr'})
img = type('img', (Element,), {'name': 'img'})
div = type('div', (Element,), {'name': 'div'})
span = type('span', (Element,), {'name': 'span'})
strong = type('strong', (Element,), {'name': 'strong'})
blockquote = type('blockquote', (Element,), {'name': 'blockquote'})
table = type('table', (Element,), {'name': 'table'})
tr = type('tr', (Element,), {'name': 'tr'})
td = type('td', (Element,), {'name': 'td'})
title = type('title', (Element,), {'name': 'title'})
# meta = type('meta', (Element,), {'name': 'meta'})
form = type('form', (Element,), {'name': 'form'})
label = type("label", (Element,), {"name": "label"})
submit = type("submit", (Element,), {"name": "submit"})
# title = type("title", (Element,), {"name": "title"})
noscript = type("noscript", (Element,), {"name": "noscript"})
section = type("section", (Element,), {"name": "section"})
nav = type("nav", (Element,), {"name": "nav"})
article = type("article", (Element,), {"name": "article"})
aside = type("aside", (Element,), {"name": "aside"})
hgroup = type("hgroup", (Element,), {"name": "hgroup"})
address = type("address", (Element,), {"name": "address"})
pre = type("pre", (Element,), {"name": "pre"})
dl = type("dl", (Element,), {"name": "dl"})
dt = type("dt", (Element,), {"name": "dt"})
dd = type("dd", (Element,), {"name": "dd"})
figure = type("figure", (Element,), {"name": "figure"})
figcaption = type("figcaption", (Element,), {"name": "figcaption"})
em = type("em", (Element,), {"name": "em"})
small = type("small", (Element,), {"name": "small"})
s = type("s", (Element,), {"name": "s"})
cite = type("cite", (Element,), {"name": "cite"})
q = type("q", (Element,), {"name": "q"})
dfn = type("dfn", (Element,), {"name": "dfn"})
abbr = type("abbr", (Element,), {"name": "abbr"})
code = type("code", (Element,), {"name": "code"})
var = type("var", (Element,), {"name": "var"})
samp = type("samp", (Element,), {"name": "samp"})
kbd = type("kbd", (Element,), {"name": "kbd"})
sub = type("sub", (Element,), {"name": "sub"})
sup = type("sup", (Element,), {"name": "sup"})
u = type("u", (Element,), {"name": "u"})
mark = type("mark", (Element,), {"name": "mark"})
ruby = type("ruby", (Element,), {"name": "ruby"})
rt = type("rt", (Element,), {"name": "rt"})
rp = type("rp", (Element,), {"name": "rp"})
bdi = type("bdi", (Element,), {"name": "bdi"})
bdo = type("bdo", (Element,), {"name": "bdo"})
span = type("span", (Element,), {"name": "span"})
ins = type("ins", (Element,), {"name": "ins"})
iframe = type("iframe", (Element,), {"name": "iframe"})
video = type("video", (Element,), {"name": "video"})
audio = type("audio", (Element,), {"name": "audio"})
canvas = type("canvas", (Element,), {"name": "canvas"})
caption = type("caption", (Element,), {"name": "caption"})
colgroup = type("colgroup", (Element,), {"name": "colgroup"})
tbody = type("tbody", (Element,), {"name": "tbody"})
thead = type("thead", (Element,), {"name": "thead"})
tfoot = type("tfoot", (Element,), {"name": "tfoot"})
th = type("th", (Element,), {"name": "th"})
fieldset = type("fieldset", (Element,), {"name": "fieldset"})
legend = type("legend", (Element,), {"name": "legend"})
button = type("button", (Element,), {"name": "button"})
select = type("select", (Element,), {"name": "select"})
datalist = type("datalist", (Element,), {"name": "datalist"})
optgroup = type("optgroup", (Element,), {"name": "optgroup"})
option = type("option", (Element,), {"name": "option"})
textarea = type("textarea", (Element,), {"name": "textarea"})
output = type("output", (Element,), {"name": "output"})
progress = type("progress", (Element,), {"name": "progress"})
meter = type("meter", (Element,), {"name": "meter"})
details = type("details", (Element,), {"name": "details"})
summary = type("summary", (Element,), {"name": "summary"})
menu = type("menu", (Element,), {"name": "menu"})
menuitem = type("menuitem", (Element,), {"name": "menuitem"}) # dead but may be used
font = type("font", (Element,), {"name": "font"})
header = type("header", (Element,), {"name": "header"})
footer = type("footer", (Element,), {"name": "footer"})
# CLOSED TAGS
base = type("base", (Element,), {"name": "base"})
link = type("link", (Element,), {"name": "link"}) # HTMLLinkElement TODO - closed tags
meta = type("meta", (Element,), {"name": "meta"}) # HTMLMetaElement TODO - closed tags
hr = type("hr", (Element,), {"name": "hr"})
br = type("br", (Element,), {"name": "br"})
wbr = type("wbr", (Element,), {"name": "wbr"})
img = type("img", (Element,), {"name": "img"}) # HTMLImageElement TODO - closed tags
param = type("param", (Element,), {"name": "param"})
source = type("source", (Element,), {"name": "source"})
track = type("track", (Element,), {"name": "track"})
area = type("area", (Element,), {"name": "area"})
col = type("col", (Element,), {"name": "col"})
input = type("input", (Element,), {"name": "input"})
keygen = type("keygen", (Element,), {"name": "keygen"})
command = type("command", (Element,), {"name": "command"})
main = type("main", (Element,), {"name": "main"})
# obsolete
applet = type("applet", (Element,), {"name": "applet"})
# object = type('object', (Element,), {'name': 'object'})
basefont = type("basefont", (Element,), {"name": "basefont"})
center = type("center", (Element,), {"name": "center"})
# dir = type('dir', (Element,), {'name': 'dir'})
embed = type("embed", (Element,), {"name": "embed"})
isindex = type("isindex", (Element,), {"name": "isindex"})
listing = type("listing", (Element,), {"name": "listing"})
plaintext = type("plaintext", (Element,), {"name": "plaintext"})
s = type("s", (Element,), {"name": "s"})
u = type("u", (Element,), {"name": "u"})
strike = type("strike", (Element,), {"name": "strike"})
xmp = type("xmp", (Element,), {"name": "xmp"})
template = type("template", (Element,), {"name": "template"})
picture = type("picture", (Element,), {"name": "picture"})
dialog = type("dialog", (Element,), {"name": "dialog"})
class Comment(Node):
nodeType: int = Node.COMMENT_NODE
__slots__ = ('data')
def __init__(self, data) -> None:
self.data = data
super().__init__()
def toString(self) -> str:
return f'<!--{self.data}-->'
__str__ = toString
class CDATASection(Node):
nodeType: int = Node.CDATA_SECTION_NODE
__slots__ = ('data')
def __init__(self, data) -> None:
self.data = data
def toString(self) -> str:
return f'<![CDATA[{self.data}]]>'
__str__ = toString
class DocumentType(Node):
nodeType = Node.DOCUMENT_TYPE_NODE
__slots__ = ('name', 'publicId', 'systemId')
def __init__(self, name: str = "html", publicId: str = "", systemId: str = "") -> None:
self.name: str = name # A DOMString, eg "html" for <!DOCTYPE HTML>.
self.publicId: str = publicId # eg "-//W3C//DTD HTML 4.01//EN", empty string for HTML5.
self.systemId: str = systemId # eg "http://www.w3.org/TR/html4/strict.dtd", empty string for HTML5.
super().__init__()
def internalSubset(self):
''' A DOMString of the internal subset, or None. Eg "<!ELEMENT foo (bar)>".'''
if self.systemId:
return self.systemId
else:
return None
# def notations(self) -> NamedNodeMap:
# """ A NamedNodeMap with notations declared in the DTD. """
# nnm = NamedNodeMap()
# for item in self.ownerDocument.args:
# if item.nodeType == Node.NOTATION_NODE:
# nnm.append(item)
# return nnm
# @property
# def nodeType(self):
# return Node.DOCUMENT_TYPE_NODE
def __str__(self) -> str:
return f"<!DOCTYPE {self.name} {self.publicId} {self.systemId}>"
doctype = DocumentType
comment = Comment
def create_element(name="custom_tag", *args, **kwargs):
"""
A method for creating custom tags
tag name needs to be set due to custom tags with hyphens can't be classnames.
i.e. hypenated tags <some-custom-tag></some-custom-tag>
"""
# checks if already exists
if name in html_tags:
return globals()[name](*args, **kwargs)
custom_tag = type("custom_tag", (Element,), {"name": name})
new_tag = custom_tag(*args, **kwargs)
new_tag.name = name
return new_tag
|
<gh_stars>1-10
from __future__ import print_function
from sympy import symbols, sin, cos, sinh, cosh, trigsimp, S
from galgebra.printer import Format, xpdf, Get_Program, Print_Function
from galgebra.ga import Ga
def Product_of_Rotors():
Print_Function()
(na,nb,nm,alpha,th,th_a,th_b) = symbols('n_a n_b n_m alpha theta theta_a theta_b',\
real = True)
g = [[na, 0, alpha],[0, nm, 0],[alpha, 0, nb]] #metric tensor
"""
Values of metric tensor components
[na,nm,nb] = [+1/-1,+1/-1,+1/-1] alpha = ea|eb
"""
(g3d, ea, em, eb) = Ga.build('e_a e_m e_b', g=g)
print('g =',g3d.g)
print(r'%n_{a} = \bm{e}_{a}^{2}\;\;n_{b} = \bm{e}_{b}^{2}\;\;n_{m} = \bm{e}_{m}^{2}'+\
r'\;\;\alpha = \bm{e}_{a}\cdot\bm{e}_{b}')
(ca,cb,sa,sb) = symbols('c_a c_b s_a s_b',real=True)
Ra = ca + sa*ea*em # Rotor for ea^em plane
Rb = cb + sb*em*eb # Rotor for em^eb plane
print(r'%\mbox{Rotor in }\bm{e}_{a}\bm{e}_{m}\mbox{ plane } R_{a} =',Ra)
print(r'%\mbox{Rotor in }\bm{e}_{m}\bm{e}_{b}\mbox{ plane } R_{b} =',Rb)
Rab = Ra*Rb # Compound Rotor
"""
Show that compound rotor is scalar plus bivector
"""
print(r'%R_{a}R_{b} = S+\bm{B} =', Rab)
Rab2 = Rab.get_grade(2)
print(r'%\bm{B} =',Rab2)
Rab2sq = Rab2*Rab2 # Square of compound rotor bivector part
Ssq = (Rab.scalar())**2 # Square of compound rotor scalar part
Bsq = Rab2sq.scalar()
print(r'%S^{2} =',Ssq)
print(r'%\bm{B}^{2} =',Bsq)
Dsq = (Ssq-Bsq).expand().simplify()
print('%S^{2}-B^{2} =', Dsq)
Dsq = Dsq.subs(nm**2,S(1)) # (e_m)**4 = 1
print('%S^{2}-B^{2} =', Dsq)
Cases = [S(-1),S(1)] # -1/+1 squares for each basis vector
print(r'#Consider all combinations of $\bm{e}_{a}^{2}$, $\bm{e}_{b}^{2}$'+\
r' and $\bm{e}_{m}^2$:')
for Na in Cases:
for Nb in Cases:
for Nm in Cases:
Ba_sq = -Na*Nm
Bb_sq = -Nb*Nm
if Ba_sq < 0:
Ca_th = cos(th_a)
Sa_th = sin(th_a)
else:
Ca_th = cosh(th_a)
Sa_th = sinh(th_a)
if Bb_sq < 0:
Cb_th = cos(th_b)
Sb_th = sin(th_b)
else:
Cb_th = cosh(th_b)
Sb_th = sinh(th_b)
print(r'%\left [ \bm{e}_{a}^{2},\bm{e}_{b}^{2},\bm{e}_{m}^2\right ] =',\
[Na,Nb,Nm])
Dsq_tmp = Dsq.subs({ca:Ca_th,sa:Sa_th,cb:Cb_th,sb:Sb_th,na:Na,nb:Nb,nm:Nm})
print(r'%S^{2}-\bm{B}^{2} =',Dsq_tmp,' =',trigsimp(Dsq_tmp))
print(r'#Thus we have shown that $R_{a}R_{b} = S+\bm{D} = e^{\bm{C}}$ where $\bm{C}$'+\
r' is a bivector blade.')
return
def dummy():
return
def main():
Get_Program()
Format()
Product_of_Rotors()
# xpdf(paper=(8.5,11))
xpdf(pdfprog=None, paper=(8.5,11))
return
if __name__ == "__main__":
main()
|
<gh_stars>1-10
'''
Custom tags. Make sure you register new custom tags at the bottom.
'''
import bs4
from inline_markdown import inline_markdown_parser, soup
src_options = ["url", "href", "src", ""]
def _get_src(tagline):
opts = tagline["options"]
link = ""
for key in src_options:
if key in opts:
link = opts[key]
opts['_' + key] = link
del opts[key]
return link
elif '_' + key in opts:
return opts['_' + key]
return link
def background_video(tagline):
tag = soup.new_tag("video")
tag["autoplay"] = None
tag["loop"] = None
tag["muted"] = None
tag["class"] = ["background-video"]
source = soup.new_tag("source")
source['src'] = _get_src(tagline)
tag.append(source)
return tag
def background(tagline):
tag = soup.new_tag("span")
tag["class"] = ["background", ]
url = _get_src(tagline)
tag["style"] = '''background-image:url('{}')'''. format(url)
return tag
def unsplash(tagline):
key = _get_src(tagline)
tagline['options']["url"] = "https://source.unsplash.com/{}".format(key)
return background(tagline)
def figure(tagline):
tag = soup.new_tag("figure")
img = soup.new_tag("img")
img['src'] = _get_src(tagline)
img['style'] = []
if 'height' in tagline["options"]:
val = tagline["options"].pop('height')
try:
float(val)
val += "%"
except:
pass
opt = "height:{}".format(val)
img['style'].append(opt)
if 'width' in tagline["options"]:
val = tagline["options"].pop('width')
try:
float(val)
val += "%"
except:
pass
opt = "width:{}".format(val)
img['style'].append(opt)
tag.append(img)
# Potential to add figure caption here!
if tagline["text"]:
caption = soup.new_tag("figcaption")
text = inline_markdown_parser(tagline["text"])
caption.append(bs4.BeautifulSoup(text, 'lxml'))
tagline["text"] = ""
tag.append(caption)
return tag
def img(tagline):
img = soup.new_tag("img")
img['src'] = _get_src(tagline)
img['style'] = []
if 'height' in tagline["options"]:
val = tagline["options"].pop('height')
try:
float(val)
val += "%"
except:
pass
opt = "height:{}".format(val)
img['style'].append(opt)
if 'width' in tagline["options"]:
val = tagline["options"].pop('width')
try:
float(val)
val += "%"
except:
pass
opt = "width:{}".format(val)
img['style'].append(opt)
return img
def line(tagline):
return soup.new_tag("hr")
def button(tagline):
tag = soup.new_tag("a")
tag["class"] = ["button", ]
tag["href"] = _get_src(tagline)
return tag
def codeblock(tagline):
tag = soup.new_tag("pre")
tag["class"] = ["prettyprint", ]
tag.string = tagline["text"].replace('__CODE_BLOCK_SPACE', '\n').strip()
# tagline["text"] = None
tagline["text"] = ""
return tag
#
# Register new custom tags here
_registered_custom_tags = {
"background": background,
"unsplash": unsplash,
"background_video": background_video,
"line": line,
"button": button,
"codeblock": codeblock,
"figure": figure,
"img": img,
}
|
<filename>src/AB3DMOT/evaluation/evaluate_kitti3dmot.py
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import matplotlib; matplotlib.use('Agg')
import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
from box_util import boxoverlap, box3doverlap
num_sample_pts = 41.0
class tData:
"""
Utility class to load data.
"""
def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
"""
Constructor, initializes the object given the parameters.
"""
# init object data
self.frame = frame
self.track_id = track_id
self.obj_type = obj_type
self.truncation = truncation
self.occlusion = occlusion
self.obs_angle = obs_angle
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.w = w
self.h = h
self.l = l
self.X = X
self.Y = Y
self.Z = Z
self.yaw = yaw
self.score = score
self.ignored = False
self.valid = False
self.tracker = -1
def __str__(self):
"""
Print read data.
"""
attrs = vars(self)
return '\n'.join("%s: %s" % item for item in attrs.items())
class trackingEvaluation(object):
""" tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
MOTA - Multi-object tracking accuracy in [0,100]
MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches)
id-switches - number of id switches
fragments - number of fragmentations
MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories
recall - recall = percentage of detected targets
precision - precision = percentage of correctly detected targets
FAR - number of false alarms per frame
falsepositives - number of false positives (FP)
missed - number of missed targets (FN)
"""
def __init__(self, t_sha, gt_path="./evaluation", max_truncation = 0, min_height = 25, max_occlusion = 2, mail=None, cls="car", eval_3diou=True, eval_2diou=False):
# get number of sequences and
# get number of frames per sequence from test mapping
# (created while extracting the benchmark)
filename_test_mapping = os.path.join(gt_path, 'evaluate_tracking.seqmap.val')
self.n_frames = []
self.sequence_name = []
with open(filename_test_mapping, "r") as fh:
for i,l in enumerate(fh):
fields = l.split(" ")
self.sequence_name.append("%04d" % int(fields[0]))
self.n_frames.append(int(fields[3]) - int(fields[2])+1)
fh.close()
self.n_sequences = i+1
# mail object
self.mail = mail
# class to evaluate, i.e. pedestrian or car
self.cls = cls
# data and parameter
self.gt_path = os.path.join(gt_path, "label")
self.t_sha = t_sha
self.t_path = os.path.join("./results", t_sha, "data")
# statistics and numbers for evaluation
self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
self.n_igt = 0 # number of ignored ground truth detections
self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE
self.n_gt_trajectories = 0
self.n_gt_seq = []
self.n_tr = 0 # number of tracker detections minus ignored tracker detections
self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE
self.n_itr = 0 # number of ignored tracker detections
self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
self.n_tr_trajectories = 0
self.n_tr_seq = []
self.MOTA = 0
self.MOTP = 0
self.MOTAL = 0
self.MODA = 0
self.MODP = 0
self.MODP_t = []
self.recall = 0
self.precision = 0
self.F1 = 0
self.FAR = 0
self.total_cost = 0
self.itp = 0 # number of ignored true positives
self.itps = [] # number of ignored true positives PER SEQUENCE
self.tp = 0 # number of true positives including ignored true positives!
self.tps = [] # number of true positives including ignored true positives PER SEQUENCE
self.fn = 0 # number of false negatives WITHOUT ignored false negatives
self.fns = [] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
self.ifn = 0 # number of ignored false negatives
self.ifns = [] # number of ignored false negatives PER SEQUENCE
self.fp = 0 # number of false positives
# a bit tricky, the number of ignored false negatives and ignored true positives
# is subtracted, but if both tracker detection and ground truth detection
# are ignored this number is added again to avoid double counting
self.fps = [] # above PER SEQUENCE
self.mme = 0
self.fragments = 0
self.id_switches = 0
self.MT = 0
self.PT = 0
self.ML = 0
self.eval_2diou = eval_2diou
self.eval_3diou = eval_3diou
if eval_2diou:
self.min_overlap = 0.5 # minimum bounding box overlap for 3rd party metrics
elif eval_3diou:
self.min_overlap = 0.25 # minimum bounding box overlap for 3rd party metrics
else: assert False
# print('min overlap creteria is %f' % self.min_overlap)
self.max_truncation = max_truncation # maximum truncation of an object for evaluation
self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation
self.min_height = min_height # minimum height of an object for evaluation
self.n_sample_points = 500
# this should be enough to hold all groundtruth trajectories
# is expanded if necessary and reduced in any case
self.gt_trajectories = [[] for x in range(self.n_sequences)]
self.ign_trajectories = [[] for x in range(self.n_sequences)]
def loadGroundtruth(self):
"""
Helper function to load ground truth.
"""
try:
self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
except IOError:
return False
return True
def loadTracker(self):
"""
Helper function to load tracker data.
"""
try:
if not self._loadData(self.t_path, cls=self.cls, loading_groundtruth=False):
return False
except IOError:
return False
return True
def _loadData(self, root_dir, cls, min_score=-1000, loading_groundtruth=False):
"""
Generic loader for ground truth and tracking data.
Use loadGroundtruth() or loadTracker() to load this data.
Loads detections in KITTI format from textfiles.
"""
# construct objectDetections object to hold detection data
t_data = tData()
data = []
eval_2d = True
eval_3d = True
seq_data = []
n_trajectories = 0
n_trajectories_seq = []
for seq, s_name in enumerate(self.sequence_name):
i = 0
filename = os.path.join(root_dir, "%s.txt" % s_name)
f = open(filename, "r")
f_data = [[] for x in range(self.n_frames[seq])] # current set has only 1059 entries, sufficient length is checked anyway
ids = []
n_in_seq = 0
id_frame_cache = []
for line in f:
# KITTI tracking benchmark data format:
# (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
line = line.strip()
fields = line.split(" ")
# classes that should be loaded (ignored neighboring classes)
if "car" in cls.lower():
classes = ["car","van"]
elif "pedestrian" in cls.lower():
classes = ["pedestrian","person_sitting"]
else:
classes = [cls.lower()]
classes += ["dontcare"]
if not any([s for s in classes if s in fields[2].lower()]):
continue
# get fields from table
t_data.frame = int(float(fields[0])) # frame
t_data.track_id = int(float(fields[1])) # id
t_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...]
t_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2]
t_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2]
t_data.obs_angle = float(fields[5]) # observation angle [rad]
t_data.x1 = float(fields[6]) # left [px]
t_data.y1 = float(fields[7]) # top [px]
t_data.x2 = float(fields[8]) # right [px]
t_data.y2 = float(fields[9]) # bottom [px]
t_data.h = float(fields[10]) # height [m]
t_data.w = float(fields[11]) # width [m]
t_data.l = float(fields[12]) # length [m]
t_data.X = float(fields[13]) # X [m]
t_data.Y = float(fields[14]) # Y [m]
t_data.Z = float(fields[15]) # Z [m]
t_data.yaw = float(fields[16]) # yaw angle [rad]
if not loading_groundtruth:
if len(fields) == 17:
t_data.score = -1
elif len(fields) == 18:
t_data.score = float(fields[17]) # detection score
else:
self.mail.msg("file is not in KITTI format")
return
# do not consider objects marked as invalid
if t_data.track_id is -1 and t_data.obj_type != "dontcare":
continue
idx = t_data.frame
# check if length for frame data is sufficient
if idx >= len(f_data):
print("extend f_data", idx, len(f_data))
f_data += [[] for x in range(max(500, idx-len(f_data)))]
try:
id_frame = (t_data.frame,t_data.track_id)
if id_frame in id_frame_cache and not loading_groundtruth:
self.mail.msg("track ids are not unique for sequence %d: frame %d" % (seq,t_data.frame))
self.mail.msg("track id %d occured at least twice for this frame" % t_data.track_id)
self.mail.msg("Exiting...")
#continue # this allows to evaluate non-unique result files
return False
id_frame_cache.append(id_frame)
f_data[t_data.frame].append(copy.copy(t_data))
except:
print(len(f_data), idx)
raise
if t_data.track_id not in ids and t_data.obj_type!="dontcare":
ids.append(t_data.track_id)
n_trajectories +=1
n_in_seq +=1
# check if uploaded data provides information for 2D and 3D evaluation
if not loading_groundtruth and eval_2d is True and(t_data.x1==-1 or t_data.x2==-1 or t_data.y1==-1 or t_data.y2==-1):
eval_2d = False
if not loading_groundtruth and eval_3d is True and(t_data.X==-1000 or t_data.Y==-1000 or t_data.Z==-1000):
eval_3d = False
# only add existing frames
n_trajectories_seq.append(n_in_seq)
seq_data.append(f_data)
f.close()
if not loading_groundtruth:
self.tracker=seq_data
self.n_tr_trajectories=n_trajectories
self.eval_2d = eval_2d
self.eval_3d = eval_3d
self.n_tr_seq = n_trajectories_seq
if self.n_tr_trajectories==0:
return False
else:
# split ground truth and DontCare areas
self.dcareas = []
self.groundtruth = []
for seq_idx in range(len(seq_data)):
seq_gt = seq_data[seq_idx]
s_g, s_dc = [],[]
for f in range(len(seq_gt)):
all_gt = seq_gt[f]
g,dc = [],[]
for gg in all_gt:
if gg.obj_type=="dontcare":
dc.append(gg)
else:
g.append(gg)
s_g.append(g)
s_dc.append(dc)
self.dcareas.append(s_dc)
self.groundtruth.append(s_g)
self.n_gt_seq=n_trajectories_seq
self.n_gt_trajectories=n_trajectories
return True
def getThresholds(self, scores, num_gt, num_sample_pts=num_sample_pts):
# based on score of true positive to discretize the recall
# not necessarily have data on all points due to not fully recall the results, all the results point has zero precision
# compute the recall based on the gt positives
# scores: the list of scores of the matched true positives
scores = np.array(scores)
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
recalls = []
for i, score in enumerate(scores):
l_recall = (i + 1) / float(num_gt)
if i < (len(scores) - 1):
r_recall = (i + 2) / float(num_gt)
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall)) and (i < (len(scores) - 1))):
continue
thresholds.append(score)
recalls.append(current_recall)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds[1:], recalls[1:] # throw the first one with 0 recall
def reset(self):
self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
self.n_igt = 0 # number of ignored ground truth detections
self.n_tr = 0 # number of tracker detections minus ignored tracker detections
self.n_itr = 0 # number of ignored tracker detections
self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
self.MOTA = 0
self.MOTP = 0
self.MOTAL = 0
self.MODA = 0
self.MODP = 0
self.MODP_t = []
self.recall = 0
self.precision = 0
self.F1 = 0
self.FAR = 0
self.total_cost = 0
self.itp = 0
self.tp = 0
self.fn = 0
self.ifn = 0
self.fp = 0
self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE
self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE
self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
self.itps = [] # number of ignored true positives PER SEQUENCE
self.tps = [] # number of true positives including ignored true positives PER SEQUENCE
self.fns = [] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
self.ifns = [] # number of ignored false negatives PER SEQUENCE
self.fps = [] # above PER SEQUENCE
self.fragments = 0
self.id_switches = 0
self.MT = 0
self.PT = 0
self.ML = 0
self.gt_trajectories = [[] for x in range(self.n_sequences)]
self.ign_trajectories = [[] for x in range(self.n_sequences)]
return
def compute3rdPartyMetrics(self, threshold=-10000, recall_thres=1.0):
# def compute3rdPartyMetrics(self, threshold=3):
"""
Computes the metrics defined in
- Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics
MOTA, MOTAL, MOTP
- Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows
MT/PT/ML
"""
# construct Munkres object for Hungarian Method association
hm = Munkres()
max_cost = 1e9
self.scores = list()
# go through all frames and associate ground truth and tracker results
# groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections
fr, ids = 0,0
for seq_idx in range(len(self.groundtruth)):
seq_gt = self.groundtruth[seq_idx]
seq_dc = self.dcareas[seq_idx] # don't care areas
seq_tracker_before = self.tracker[seq_idx]
# remove the tracks with low confidence for each frame
tracker_id_score = dict()
for frame in range(len(seq_tracker_before)):
tracks_tmp = seq_tracker_before[frame]
for index in range(len(tracks_tmp)):
trk_tmp = tracks_tmp[index]
id_tmp = trk_tmp.track_id
score_tmp = trk_tmp.score
if id_tmp not in tracker_id_score.keys():
tracker_id_score[id_tmp] = list()
tracker_id_score[id_tmp].append(score_tmp)
id_average_score = dict()
to_delete_id = list()
for track_id, score_list in tracker_id_score.items():
average_score = sum(score_list) / float(len(score_list))
id_average_score[track_id] = average_score
if average_score < threshold:
to_delete_id.append(track_id)
seq_tracker = list()
for frame in range(len(seq_tracker_before)):
seq_tracker_frame = list()
tracks_tmp = seq_tracker_before[frame]
for index in range(len(tracks_tmp)):
trk_tmp = tracks_tmp[index]
id_tmp = trk_tmp.track_id
average_score = id_average_score[id_tmp]
trk_tmp.score = average_score
if id_tmp not in to_delete_id:
seq_tracker_frame.append(trk_tmp)
seq_tracker.append(seq_tracker_frame)
seq_trajectories = defaultdict(list)
seq_ignored = defaultdict(list)
# statistics over the current sequence, check the corresponding
# variable comments in __init__ to get their meaning
seqtp = 0
seqitp = 0
seqfn = 0
seqifn = 0
seqfp = 0
seqigt = 0
seqitr = 0
last_ids = [[],[]]
n_gts = 0
n_trs = 0
for f in range(len(seq_gt)): # go through each frame
g = seq_gt[f]
dc = seq_dc[f]
t = seq_tracker[f]
# counting total number of ground truth and tracker objects
self.n_gt += len(g)
self.n_tr += len(t)
n_gts += len(g)
n_trs += len(t)
# use hungarian method to associate, using boxoverlap 0..1 as cost
# build cost matrix
# row is gt, column is det
cost_matrix = []
this_ids = [[],[]]
for gg in g:
# save current ids
this_ids[0].append(gg.track_id)
this_ids[1].append(-1)
gg.tracker = -1
gg.id_switch = 0
gg.fragmentation = 0
cost_row = []
for tt in t:
if self.eval_2diou:
c = 1 - boxoverlap(gg, tt)
elif self.eval_3diou:
c = 1 - box3doverlap(gg, tt)
else:
assert False, 'error'
# gating for boxoverlap
if c <= 1 - self.min_overlap:
cost_row.append(c)
else:
cost_row.append(max_cost) # = 1e9
cost_matrix.append(cost_row)
# all ground truth trajectories are initially not associated
# extend groundtruth trajectories lists (merge lists)
seq_trajectories[gg.track_id].append(-1)
seq_ignored[gg.track_id].append(False)
if len(g) == 0:
cost_matrix=[[]]
# associate
association_matrix = hm.compute(cost_matrix)
# tmp variables for sanity checks and MODP computation
tmptp = 0
tmpfp = 0
tmpfn = 0
tmpc = 0 # this will sum up the overlaps for all true positives
tmpcs = [0]*len(g) # this will save the overlaps for all true positives
# the reason is that some true positives might be ignored
# later such that the corrsponding overlaps can
# be subtracted from tmpc for MODP computation
# mapping for tracker ids and ground truth ids
for row,col in association_matrix:
# apply gating on boxoverlap
c = cost_matrix[row][col]
if c < max_cost:
g[row].tracker = t[col].track_id
this_ids[1][row] = t[col].track_id
t[col].valid = True
g[row].distance = c
self.total_cost += 1-c
tmpc += 1-c
tmpcs[row] = 1-c
seq_trajectories[g[row].track_id][-1] = t[col].track_id
# true positives are only valid associations
self.tp += 1
tmptp += 1
self.scores.append(t[col].score)
else:
g[row].tracker = -1
self.fn += 1
tmpfn += 1
# associate tracker and DontCare areas
# ignore tracker in neighboring classes
nignoredtracker = 0 # number of ignored tracker detections
ignoredtrackers = dict() # will associate the track_id with -1
# if it is not ignored and 1 if it is
# ignored;
# this is used to avoid double counting ignored
# cases, see the next loop
for tt in t:
ignoredtrackers[tt.track_id] = -1
# ignore detection if it belongs to a neighboring class or is
# smaller or equal to the minimum height
tt_height = abs(tt.y1 - tt.y2)
if ((self.cls=="car" and tt.obj_type=="van") or (self.cls=="pedestrian" and tt.obj_type=="person_sitting") or tt_height<=self.min_height) and not tt.valid:
nignoredtracker+= 1
tt.ignored = True
ignoredtrackers[tt.track_id] = 1
continue
for d in dc:
# as KITTI does not provide ground truth 3D box for DontCare objects, we have to use
# 2D IoU here and a threshold of 0.5 for 2D IoU.
overlap = boxoverlap(tt, d, "a")
if overlap > 0.5 and not tt.valid:
tt.ignored = True
nignoredtracker += 1
ignoredtrackers[tt.track_id] = 1
break
# check for ignored FN/TP (truncation or neighboring object class)
ignoredfn = 0 # the number of ignored false negatives
nignoredtp = 0 # the number of ignored true positives
nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive
# which is ignored but where the associated tracker
# detection has already been ignored
gi = 0
for gg in g:
if gg.tracker < 0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
ignoredfn += 1
elif gg.tracker>=0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
nignoredtp += 1
# if the associated tracker detection is already ignored,
# we want to avoid double counting ignored detections
if ignoredtrackers[gg.tracker] > 0:
nignoredpairs += 1
# for computing MODP, the overlaps from ignored detections
# are subtracted
tmpc -= tmpcs[gi]
gi += 1
# the below might be confusion, check the comments in __init__
# to see what the individual statistics represent
# nignoredtp is already associated, but should ignored
# ignoredfn is already missed, but should ignored
# correct TP by number of ignored TP due to truncation
# ignored TP are shown as tracked in visualization
tmptp -= nignoredtp
# count the number of ignored true positives
self.itp += nignoredtp
# adjust the number of ground truth objects considered
# self.n_gt_adjusted = self.n_gt
self.n_gt -= (ignoredfn + nignoredtp)
# count the number of ignored ground truth objects
self.n_igt += ignoredfn + nignoredtp
# count the number of ignored tracker objects
self.n_itr += nignoredtracker
# count the number of ignored pairs, i.e. associated tracker and
# ground truth objects that are both ignored
self.n_igttr += nignoredpairs
# false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes
#
# explanation of fn
# the original fn is in the matched gt where the score is not high enough
# len(g) - len(association amtrix), means that some gt is not matched in hungarian
# further - ignoredfn, means that some gt can be ignored
tmpfn += len(g)-len(association_matrix)-ignoredfn
self.fn += len(g)-len(association_matrix)-ignoredfn
# self.fn += len(g)-len(association_matrix)
self.ifn += ignoredfn
# false positives = tracker bboxes - associated tracker bboxes
# mismatches (mme_t)
tmpfp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
self.fp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
#tmpfp = len(t) - tmptp - nignoredtp # == len(t) - (tp - ignoredtp) - ignoredtp
#self.fp += len(t) - tmptp - nignoredtp
# update sequence data
seqtp += tmptp
seqitp += nignoredtp
seqfp += tmpfp
seqfn += tmpfn
seqifn += ignoredfn
seqigt += ignoredfn + nignoredtp
seqitr += nignoredtracker
# sanity checks
# - the number of true positives minues ignored true positives
# should be greater or equal to 0
# - the number of false negatives should be greater or equal to 0
# - the number of false positives needs to be greater or equal to 0
# otherwise ignored detections might be counted double
# - the number of counted true positives (plus ignored ones)
# and the number of counted false negatives (plus ignored ones)
# should match the total number of ground truth objects
# - the number of counted true positives (plus ignored ones)
# and the number of counted false positives
# plus the number of ignored tracker detections should
# match the total number of tracker detections; note that
# nignoredpairs is subtracted here to avoid double counting
# of ignored detection sin nignoredtp and nignoredtracker
if tmptp<0:
print(tmptp, nignoredtp)
raise NameError("Something went wrong! TP is negative")
if tmpfn<0:
print(tmpfn, len(g), len(association_matrix), ignoredfn, nignoredpairs)
raise NameError("Something went wrong! FN is negative")
if tmpfp<0:
print(tmpfp, len(t), tmptp, nignoredtracker, nignoredtp, nignoredpairs)
raise NameError("Something went wrong! FP is negative")
if tmptp + tmpfn != len(g)-ignoredfn-nignoredtp:
print("seqidx", seq_idx)
print("frame ", f)
print("TP ", tmptp)
print("FN ", tmpfn)
print("FP ", tmpfp)
print("nGT ", len(g))
print("nAss ", len(association_matrix))
print("ign GT", ignoredfn)
print("ign TP", nignoredtp)
raise NameError("Something went wrong! nGroundtruth is not TP+FN")
if tmptp+tmpfp+nignoredtp+nignoredtracker-nignoredpairs != len(t):
print(seq_idx, f, len(t), tmptp, tmpfp)
print(len(association_matrix), association_matrix)
raise NameError("Something went wrong! nTracker is not TP+FP")
# check for id switches or Fragmentations
# frag will be more than id switch, switch happens only when id is different but detection exists
# frag happens when id switch or detection is missing
for i,tt in enumerate(this_ids[0]):
# print(i)
# print(tt)
if tt in last_ids[0]:
idx = last_ids[0].index(tt)
tid = this_ids[1][i] # id in current tracker corresponding to the gt tt
lid = last_ids[1][idx] # id in last frame tracker corresponding to the gt tt
if tid != lid and lid != -1 and tid != -1:
if g[i].truncation<self.max_truncation:
g[i].id_switch = 1
ids +=1
if tid != lid and lid != -1:
if g[i].truncation<self.max_truncation:
g[i].fragmentation = 1
fr +=1
# save current index
last_ids = this_ids
# compute MOTP_t
MODP_t = 1
if tmptp!=0:
MODP_t = tmpc/float(tmptp)
self.MODP_t.append(MODP_t)
# remove empty lists for current gt trajectories
self.gt_trajectories[seq_idx] = seq_trajectories
self.ign_trajectories[seq_idx] = seq_ignored
# self.num_gt += n_gts
# gather statistics for "per sequence" statistics.
self.n_gts.append(n_gts)
self.n_trs.append(n_trs)
self.tps.append(seqtp)
self.itps.append(seqitp)
self.fps.append(seqfp)
self.fns.append(seqfn)
self.ifns.append(seqifn)
self.n_igts.append(seqigt)
self.n_itrs.append(seqitr)
# compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories
n_ignored_tr_total = 0
for seq_idx, (seq_trajectories,seq_ignored) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)):
if len(seq_trajectories)==0:
continue
tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0]*5
n_ignored_tr = 0
for g, ign_g in zip(seq_trajectories.values(), seq_ignored.values()):
# all frames of this gt trajectory are ignored
if all(ign_g):
n_ignored_tr+=1
n_ignored_tr_total+=1
continue
# all frames of this gt trajectory are not assigned to any detections
if all([this==-1 for this in g]):
tmpML+=1
self.ML+=1
continue
# compute tracked frames in trajectory
last_id = g[0]
# first detection (necessary to be in gt_trajectories) is always tracked
tracked = 1 if g[0]>=0 else 0
lgt = 0 if ign_g[0] else 1
for f in range(1,len(g)):
if ign_g[f]:
last_id = -1
continue
lgt+=1
if last_id != g[f] and last_id != -1 and g[f] != -1 and g[f-1] != -1:
tmpId_switches += 1
self.id_switches += 1
if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1:
tmpFragments += 1
self.fragments += 1
if g[f] != -1:
tracked += 1
last_id = g[f]
# handle last frame; tracked state is handled in for loop (g[f]!=-1)
if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]:
tmpFragments += 1
self.fragments += 1
# compute MT/PT/ML
tracking_ratio = tracked / float(len(g) - sum(ign_g))
if tracking_ratio > 0.8:
tmpMT += 1
self.MT += 1
elif tracking_ratio < 0.2:
tmpML += 1
self.ML += 1
else: # 0.2 <= tracking_ratio <= 0.8
tmpPT += 1
self.PT += 1
if (self.n_gt_trajectories-n_ignored_tr_total)==0:
self.MT = 0.
self.PT = 0.
self.ML = 0.
else:
self.MT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total)
# precision/recall etc.
if (self.fp+self.tp)==0 or (self.tp+self.fn)==0:
self.recall = 0.
self.precision = 0.
else:
self.recall = self.tp/float(self.tp+self.fn)
self.precision = self.tp/float(self.fp+self.tp)
if (self.recall+self.precision)==0:
self.F1 = 0.
else:
self.F1 = 2.*(self.precision*self.recall)/(self.precision+self.recall)
if sum(self.n_frames)==0:
self.FAR = "n/a"
else:
self.FAR = self.fp/float(sum(self.n_frames))
# compute CLEARMOT
if self.n_gt==0:
self.MOTA = -float("inf")
self.MODA = -float("inf")
self.sMOTA = -float("inf")
else:
self.MOTA = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt)
self.sMOTA = min(1, max(0, 1 - (self.fn + self.fp + self.id_switches - (1 - recall_thres) * self.n_gt) / float(recall_thres * self.n_gt)))
if self.tp==0:
self.MOTP = 0
else:
self.MOTP = self.total_cost / float(self.tp)
if self.n_gt!=0:
if self.id_switches==0:
self.MOTAL = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
else:
self.MOTAL = 1 - (self.fn + self.fp + math.log10(self.id_switches))/float(self.n_gt)
else:
self.MOTAL = -float("inf")
if sum(self.n_frames)==0:
self.MODP = "n/a"
else:
self.MODP = sum(self.MODP_t)/float(sum(self.n_frames))
self.num_gt = self.tp + self.fn
return True
def createSummary_details(self):
"""
Generate and mail a summary of the results.
If mailpy.py is present, the summary is instead printed.
"""
summary = ""
summary += "evaluation: best results with single threshold".center(80,"=") + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n"
summary += self.printEntry("Multiple Object Tracking Precision (MOTP)", float(self.MOTP)) + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n"
summary += self.printEntry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n"
summary += self.printEntry("Multiple Object Detection Precision (MODP)", float(self.MODP)) + "\n"
summary += "\n"
summary += self.printEntry("Recall", self.recall) + "\n"
summary += self.printEntry("Precision", self.precision) + "\n"
summary += self.printEntry("F1", self.F1) + "\n"
summary += self.printEntry("False Alarm Rate", self.FAR) + "\n"
summary += "\n"
summary += self.printEntry("Mostly Tracked", self.MT) + "\n"
summary += self.printEntry("Partly Tracked", self.PT) + "\n"
summary += self.printEntry("Mostly Lost", self.ML) + "\n"
summary += "\n"
summary += self.printEntry("True Positives", self.tp) + "\n"
#summary += self.printEntry("True Positives per Sequence", self.tps) + "\n"
summary += self.printEntry("Ignored True Positives", self.itp) + "\n"
#summary += self.printEntry("Ignored True Positives per Sequence", self.itps) + "\n"
summary += self.printEntry("False Positives", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("Ignored False Negatives", self.ifn) + "\n"
#summary += self.printEntry("Ignored False Negatives per Sequence", self.ifns) + "\n"
# summary += self.printEntry("Missed Targets", self.fn) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
summary += self.printEntry("Fragmentations", self.fragments) + "\n"
summary += "\n"
summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n"
#summary += self.printEntry("Ground Truth Objects (Total) per Sequence", self.n_gts) + "\n"
summary += self.printEntry("Ignored Ground Truth Objects", self.n_igt) + "\n"
#summary += self.printEntry("Ignored Ground Truth Objects per Sequence", self.n_igts) + "\n"
summary += self.printEntry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n"
summary += "\n"
summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n"
#summary += self.printEntry("Tracker Objects (Total) per Sequence", self.n_trs) + "\n"
summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n"
#summary += self.printEntry("Ignored Tracker Objects per Sequence", self.n_itrs) + "\n"
summary += self.printEntry("Tracker Trajectories", self.n_tr_trajectories) + "\n"
#summary += "\n"
#summary += self.printEntry("Ignored Tracker Objects with Associated Ignored Ground Truth Objects", self.n_igttr) + "\n"
summary += "="*80
return summary
def createSummary_simple(self, threshold, recall):
"""
Generate and mail a summary of the results.
If mailpy.py is present, the summary is instead printed.
"""
summary = ""
summary += ("evaluation with confidence threshold %f, recall %f" % (threshold, recall)).center(80,"=") + "\n"
summary += ' sMOTA MOTA MOTP MT ML IDS FRAG F1 Prec Recall FAR TP FP FN\n'
summary += '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:5d} {:5d} {:.4f} {:.4f} {:.4f} {:.4f} {:5d} {:5d} {:5d}\n'.format( \
self.sMOTA, self.MOTA, self.MOTP, self.MT, self.ML, self.id_switches, self.fragments, \
self.F1, self.precision, self.recall, self.FAR, self.tp, self.fp, self.fn)
summary += "="*80
return summary
def printEntry(self, key, val,width=(70,10)):
"""
Pretty print an entry in a table fashion.
"""
s_out = key.ljust(width[0])
if type(val)==int:
s = "%%%dd" % width[1]
s_out += s % val
elif type(val)==float:
s = "%%%d.4f" % (width[1])
s_out += s % val
else:
s_out += ("%s"%val).rjust(width[1])
return s_out
def saveToStats(self, dump, threshold=None, recall=None):
"""
Save the statistics in a whitespace separate file.
"""
if threshold is None: summary = self.createSummary_details()
else: summary = self.createSummary_simple(threshold, recall)
mail.msg(summary) # mail or print the summary.
print(summary, file=dump)
class stat:
"""
Utility class to load data.
"""
def __init__(self, t_sha, cls, suffix, dump):
"""
Constructor, initializes the object given the parameters.
"""
# init object data
self.mota = 0
self.motp = 0
self.F1 = 0
self.precision = 0
self.fp = 0
self.fn = 0
self.sMOTA = 0
self.mota_list = list()
self.motp_list = list()
self.sMOTA_list = list()
self.f1_list = list()
self.precision_list = list()
self.fp_list = list()
self.fn_list = list()
self.recall_list = list()
self.t_sha = t_sha
self.cls = cls
self.suffix = suffix
self.dump = dump
def update(self, data):
self.mota += data['mota']
self.motp += data['motp']
self.F1 += data['F1']
# self.moda += data['moda']
# self.modp += data['modp']
self.precision += data['precision']
self.fp += data['fp']
self.fn += data['fn']
self.sMOTA += data['sMOTA']
self.mota_list.append(data['mota'])
self.sMOTA_list.append(data['sMOTA'])
self.motp_list.append(data['motp'])
self.f1_list.append(data['F1'])
self.precision_list.append(data['precision'])
self.fp_list.append(data['fp'])
self.fn_list.append(data['fn'])
self.recall_list.append(data['recall'])
def output(self):
self.sAMOTA = self.sMOTA / (num_sample_pts - 1)
self.amota = self.mota / (num_sample_pts - 1)
self.amotp = self.motp / (num_sample_pts - 1)
def print_summary(self):
summary = ""
summary += ("evaluation: average over recall").center(80,"=") + "\n"
summary += ' sAMOTA AMOTA AMOTP \n'
summary += '{:.4f} {:.4f} {:.4f}\n'.format(self.sAMOTA, self.amota, self.amotp)
summary += "="*80
print(summary, file=self.dump)
return summary
def plot_over_recall(self, data_list, title, y_name, save_path):
# add extra zero at the end
largest_recall = self.recall_list[-1]
extra_zero = np.arange(largest_recall, 1, 0.01).tolist()
len_extra = len(extra_zero)
y_zero = [0] * len_extra
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.array(self.recall_list + extra_zero), np.array(data_list + y_zero))
# ax.set_title(title, fontsize=20)
ax.set_ylabel(y_name, fontsize=20)
ax.set_xlabel('Recall', fontsize=20)
ax.set_xlim(0.0, 1.0)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.tight_layout()
if y_name in ['sMOTA', 'MOTA', 'MOTP', 'F1', 'Precision']:
ax.set_ylim(0.0, 1.0)
else:
ax.set_ylim(0.0, max(data_list))
if y_name in ['MOTA', 'F1']:
max_ind = np.argmax(np.array(data_list))
# print(max_ind)
plt.axvline(self.recall_list[max_ind], ymax=data_list[max_ind], color='r')
plt.plot(self.recall_list[max_ind], data_list[max_ind], 'or', markersize=12)
plt.text(self.recall_list[max_ind]-0.05, data_list[max_ind]+0.03, '%.2f' % (data_list[max_ind] * 100), fontsize=20)
fig.savefig(save_path)
plt.close()
# zxc
def plot(self):
save_dir = os.path.join("./results", self.t_sha)
self.plot_over_recall(self.mota_list, 'MOTA - Recall Curve', 'MOTA', os.path.join(save_dir, 'MOTA_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.sMOTA_list, 'sMOTA - Recall Curve', 'sMOTA', os.path.join(save_dir, 'sMOTA_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.motp_list, 'MOTP - Recall Curve', 'MOTP', os.path.join(save_dir, 'MOTP_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.f1_list, 'F1 - Recall Curve', 'F1', os.path.join(save_dir, 'F1_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.fp_list, 'False Positive - Recall Curve', 'False Positive', os.path.join(save_dir, 'FP_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.fn_list, 'False Negative - Recall Curve', 'False Negative', os.path.join(save_dir, 'FN_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
self.plot_over_recall(self.precision_list, 'Precision - Recall Curve', 'Precision', os.path.join(save_dir, 'precision_recall_curve_%s_%s.pdf' % (self.cls, self.suffix)))
def evaluate(result_sha,mail,eval_3diou,eval_2diou):
"""
Entry point for evaluation, will load the data and start evaluation for
CAR and PEDESTRIAN if available.
"""
# start evaluation and instanciated eval object
if eval_3diou:
mail.msg("Processing Result for KITTI 3D MOT Benchmark")
elif eval_2diou:
mail.msg("Processing Result for KITTI 2D MOT Benchmark")
else:
assert False, 'error'
classes = []
for c in ("car", "pedestrian", "cyclist"):
# for c in ("car"):
e = trackingEvaluation(t_sha=result_sha, mail=mail,cls=c,eval_3diou=eval_3diou,eval_2diou=eval_2diou)
# load tracker data and check provided classes
try:
if not e.loadTracker():
continue
mail.msg("Loading Results - Success")
mail.msg("Evaluate Object Class: %s" % c.upper())
classes.append(c)
except:
mail.msg("Feel free to contact us (<EMAIL>), if you receive this error message:")
mail.msg(" Caught exception while loading result data.")
break
# load groundtruth data for this class
if not e.loadGroundtruth():
raise ValueError("Ground truth not found.")
mail.msg("Loading Groundtruth - Success")
# sanity checks
if len(e.groundtruth) != len(e.tracker):
mail.msg("The uploaded data does not provide results for every sequence: %d vs %d" % (len(e.groundtruth), len(e.tracker)))
return False
mail.msg("Loaded %d Sequences." % len(e.groundtruth))
mail.msg("Start Evaluation...")
if eval_3diou: suffix = 'eval3D'
else: suffix = 'eval2D'
filename = os.path.join(e.t_path, "../summary_%s_average_%s.txt" % (c, suffix)); dump = open(filename, "w+")
stat_meter = stat(t_sha=result_sha, cls=c, suffix=suffix, dump=dump)
e.compute3rdPartyMetrics()
# evaluate the mean average metrics
best_mota, best_threshold = 0, -10000
threshold_list, recall_list = e.getThresholds(e.scores, e.num_gt)
for threshold_tmp, recall_tmp in zip(threshold_list, recall_list):
data_tmp = dict()
e.reset()
e.compute3rdPartyMetrics(threshold_tmp, recall_tmp)
data_tmp['mota'], data_tmp['motp'], data_tmp['moda'], data_tmp['modp'], data_tmp['precision'], \
data_tmp['F1'], data_tmp['fp'], data_tmp['fn'], data_tmp['recall'], data_tmp['sMOTA'] = \
e.MOTA, e.MOTP, e.MODA, e.MODP, e.precision, e.F1, e.fp, e.fn, e.recall, e.sMOTA
stat_meter.update(data_tmp)
mota_tmp = e.MOTA
if mota_tmp > best_mota:
best_threshold = threshold_tmp
best_mota = mota_tmp
e.saveToStats(dump, threshold_tmp, recall_tmp)
e.reset()
e.compute3rdPartyMetrics(best_threshold)
e.saveToStats(dump)
stat_meter.output()
summary = stat_meter.print_summary()
stat_meter.plot()
mail.msg(summary) # mail or print the summary.
dump.close()
# finish
if len(classes)==0:
mail.msg("The uploaded results could not be evaluated. Check for format errors.")
return False
mail.msg("Thank you for participating in our benchmark!")
return True
#########################################################################
# entry point of evaluation script
# input:
# - result_sha (unique key of results)
# - 2D or 3D (using 2D or 3D MOT evaluation system)
if __name__ == "__main__":
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
if len(sys.argv)!=2 and len(sys.argv)!=3:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# get unique sha key of submitted results
result_sha = sys.argv[1]
mail = mailpy.Mail("")
#
if len(sys.argv)==3:
if sys.argv[2] == '2D':
eval_3diou, eval_2diou = False, True # eval 2d
elif sys.argv[2] == '3D':
eval_3diou, eval_2diou = True, False # eval 3d
else:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
else:
eval_3diou, eval_2diou = True, False # eval 3d
# evaluate results
success = evaluate(result_sha,mail,eval_3diou,eval_2diou)
|
<filename>codenerix_pos/urls.py
# -*- coding: utf-8 -*-
#
# django-codenerix-pos
#
# Codenerix GNU
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import re_path as url
from django.views.generic import TemplateView
from .views import POSZoneList, POSZoneCreate, POSZoneCreateModal, POSZoneUpdate, POSZoneUpdateModal, POSZoneDelete, POSZoneDetails
from .views import POSHardwareList, POSHardwareCreate, POSHardwareCreateModal, POSHardwareUpdate, POSHardwareUpdateModal, POSHardwareDelete, POSHardwareSubList, POSHardwareDetails, POSHardwareDetailModal
from .views import POSHardwareForeign, POSHardwareProfiles
from .views import POSList, POSCreate, POSCreateModal, POSUpdate, POSUpdateModal, POSDelete, POSSubList, POSDetails, POSDetailModal, POSCommits
from .views import POSSlotList, POSSlotCreate, POSSlotCreateModal, POSSlotUpdate, POSSlotUpdateModal, POSSlotDelete, POSSlotSubList, POSSlotDetails, POSSlotDetailModal
from .views import POSPlantList, POSPlantCreate, POSPlantCreateModal, POSPlantUpdate, POSPlantUpdateModal, POSPlantDelete, POSPlantDetails
from .views import POSProductList, POSProductCreate, POSProductCreateModal, POSProductUpdate, POSProductUpdateModal, POSProductDelete, POSProductSubList, POSProductDetails, POSProductDetailModal
from .views import POSLogList
from .views import POSOperatorList, POSOperatorCreate, POSOperatorCreateModal, POSOperatorUpdate, POSOperatorUpdateModal, POSOperatorDelete, POSOperatorSubList, POSOperatorDetails, POSOperatorDetailModal
from .views import POSSession
from .views import POSGroupProductList, POSGroupProductCreate, POSGroupProductCreateModal, POSGroupProductUpdate, POSGroupProductUpdateModal, POSGroupProductDelete, POSGroupProductSubList, POSGroupProductDetails, POSGroupProductDetailModal
from .views import POSSlotForeign
from .views import OpenCashRegister
class ExampleView(TemplateView):
template_name = "codenerix_pos/example.html"
def get_context_data(self, **kwargs):
context = super(ExampleView, self).get_context_data(**kwargs)
context['url'] = self.request.META.get("HTTP_HOST")
return context
urlpatterns = [
url(r'^example$', ExampleView.as_view(), name='CDNX_pos_example'),
url(r'^posplants$', POSPlantList.as_view(), name='CDNX_posplants_list'),
url(r'^posplants/add$', POSPlantCreate.as_view(), name='CDNX_posplants_add'),
url(r'^posplants/addmodal$', POSPlantCreateModal.as_view(), name='CDNX_posplants_addmodal'),
url(r'^posplants/(?P<pk>\w+)$', POSPlantDetails.as_view(), name='CDNX_posplants_details'),
url(r'^posplants/(?P<pk>\w+)/edit$', POSPlantUpdate.as_view(), name='CDNX_posplants_edit'),
url(r'^posplants/(?P<pk>\w+)/editmodal$', POSPlantUpdateModal.as_view(), name='CDNX_posplants_editmodal'),
url(r'^posplants/(?P<pk>\w+)/delete$', POSPlantDelete.as_view(), name='CDNX_posplants_delete'),
url(r'^poszones$', POSZoneList.as_view(), name='CDNX_poszones_list'),
url(r'^poszones/add$', POSZoneCreate.as_view(), name='CDNX_poszones_add'),
url(r'^poszones/addmodal$', POSZoneCreateModal.as_view(), name='CDNX_poszones_addmodal'),
url(r'^poszones/(?P<pk>\w+)$', POSZoneDetails.as_view(), name='CDNX_poszones_details'),
url(r'^poszones/(?P<pk>\w+)/edit$', POSZoneUpdate.as_view(), name='CDNX_poszones_edit'),
url(r'^poszones/(?P<pk>\w+)/editmodal$', POSZoneUpdateModal.as_view(), name='CDNX_poszones_editmodal'),
url(r'^poszones/(?P<pk>\w+)/delete$', POSZoneDelete.as_view(), name='CDNX_poszones_delete'),
url(r'^poshardwares$', POSHardwareList.as_view(), name='CDNX_poshardwares_list'),
url(r'^poshardwares/add$', POSHardwareCreate.as_view(), name='CDNX_poshardwares_add'),
url(r'^poshardwares/addmodal$', POSHardwareCreateModal.as_view(), name='CDNX_poshardwares_addmodal'),
url(r'^poshardwares/(?P<pk>\w+)$', POSHardwareDetails.as_view(), name='CDNX_poshardwares_details'),
url(r'^poshardwares/(?P<pk>\w+)/edit$', POSHardwareUpdate.as_view(), name='CDNX_poshardwares_edit'),
url(r'^poshardwares/(?P<pk>\w+)/editmodal$', POSHardwareUpdateModal.as_view(), name='CDNX_poshardwares_editmodal'),
url(r'^poshardwares/(?P<pk>\w+)/delete$', POSHardwareDelete.as_view(), name='CDNX_poshardwares_delete'),
url(r'^poshardwares/(?P<pk>\w+)/sublist$', POSHardwareSubList.as_view(), name='CDNX_poshardwares_sublist'),
url(r'^poshardwares/(?P<pk>\w+)/sublist/add$', POSHardwareCreateModal.as_view(), name='CDNX_poshardwares_sublist_add'),
url(r'^poshardwares/(?P<pk>\w+)/sublist/addmodal$', POSHardwareCreateModal.as_view(), name='CDNX_poshardwares_sublist_addmodal'),
url(r'^poshardwares/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSHardwareDetailModal.as_view(), name='CDNX_poshardwares_sublist_details'),
url(r'^poshardwares/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSHardwareUpdateModal.as_view(), name='CDNX_poshardwares_sublist_edit'),
url(r'^poshardwares/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSHardwareUpdateModal.as_view(), name='CDNX_poshardwares_sublist_editmodal'),
url(r'^poshardwares/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSHardwareDelete.as_view(), name='CDNX_poshardwares_sublist_delete'),
url(r'^poshardwares/foreign/(?P<search>[\w\W]+|\*)$', POSHardwareForeign.as_view(), name='CDNX_poshardwares_foreign'),
url(r'^poshardwares/profiles/(?P<search>[\w\W]+|\*)$', POSHardwareProfiles.as_view(), name='CDNX_poshardwares_profiles'),
url(r'^poss$', POSList.as_view(), name='CDNX_poss_list'),
url(r'^poss/add$', POSCreate.as_view(), name='CDNX_poss_add'),
url(r'^poss/addmodal$', POSCreateModal.as_view(), name='CDNX_poss_addmodal'),
url(r'^poss/(?P<pk>\w+)$', POSDetails.as_view(), name='CDNX_poss_details'),
url(r'^poss/(?P<pk>\w+)/edit$', POSUpdate.as_view(), name='CDNX_poss_edit'),
url(r'^poss/(?P<pk>\w+)/editmodal$', POSUpdateModal.as_view(), name='CDNX_poss_editmodal'),
url(r'^poss/(?P<pk>\w+)/delete$', POSDelete.as_view(), name='CDNX_poss_delete'),
url(r'^poss/(?P<pk>\w+)/sublist$', POSSubList.as_view(), name='CDNX_poss_sublist'),
url(r'^poss/(?P<pk>\w+)/sublist/add$', POSCreateModal.as_view(), name='CDNX_poss_sublist_add'),
url(r'^poss/(?P<pk>\w+)/sublist/addmodal$', POSCreateModal.as_view(), name='CDNX_poss_sublist_addmodal'),
url(r'^poss/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSDetailModal.as_view(), name='CDNX_poss_sublist_details'),
url(r'^poss/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSUpdateModal.as_view(), name='CDNX_poss_sublist_edit'),
url(r'^poss/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSUpdateModal.as_view(), name='CDNX_poss_sublist_editmodal'),
url(r'^poss/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSDelete.as_view(), name='CDNX_poss_sublist_delete'),
url(r'^poss/commits/(?P<search>[\w\W]+|\*)$', POSCommits.as_view(), name='CDNX_poss_commits'),
url(r'^posslots$', POSSlotList.as_view(), name='CDNX_posslots_list'),
url(r'^posslots/add$', POSSlotCreate.as_view(), name='CDNX_posslots_add'),
url(r'^posslots/addmodal$', POSSlotCreateModal.as_view(), name='CDNX_posslots_addmodal'),
url(r'^posslots/(?P<pk>\w+)$', POSSlotDetails.as_view(), name='CDNX_posslots_details'),
url(r'^posslots/(?P<pk>\w+)/edit$', POSSlotUpdate.as_view(), name='CDNX_posslots_edit'),
url(r'^posslots/(?P<pk>\w+)/editmodal$', POSSlotUpdateModal.as_view(), name='CDNX_posslots_editmodal'),
url(r'^posslots/(?P<pk>\w+)/delete$', POSSlotDelete.as_view(), name='CDNX_posslots_delete'),
url(r'^posslots/(?P<pk>\w+)/sublist$', POSSlotSubList.as_view(), name='CDNX_posslots_sublist'),
url(r'^posslots/(?P<pk>\w+)/sublist/add$', POSSlotCreateModal.as_view(), name='CDNX_posslots_sublist_add'),
url(r'^posslots/(?P<pk>\w+)/sublist/addmodal$', POSSlotCreateModal.as_view(), name='CDNX_posslots_sublist_addmodal'),
url(r'^posslots/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSSlotDetailModal.as_view(), name='CDNX_posslots_sublist_details'),
url(r'^posslots/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSSlotUpdateModal.as_view(), name='CDNX_posslots_sublist_edit'),
url(r'^posslots/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSSlotUpdateModal.as_view(), name='CDNX_posslots_sublist_editmodal'),
url(r'^posslots/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSSlotDelete.as_view(), name='CDNX_posslots_sublist_delete'),
url(r'^posslots/foreign/(?P<search>[\w\W]+|\*)$', POSSlotForeign.as_view(), name='CDNX_posslots_foreign'),
url(r'^posproducts$', POSProductList.as_view(), name='CDNX_posproducts_list'),
url(r'^posproducts/add$', POSProductCreate.as_view(), name='CDNX_posproducts_add'),
url(r'^posproducts/addmodal$', POSProductCreateModal.as_view(), name='CDNX_posproducts_addmodal'),
url(r'^posproducts/(?P<pk>\w+)$', POSProductDetails.as_view(), name='CDNX_posproducts_details'),
url(r'^posproducts/(?P<pk>\w+)/edit$', POSProductUpdate.as_view(), name='CDNX_posproducts_edit'),
url(r'^posproducts/(?P<pk>\w+)/editmodal$', POSProductUpdateModal.as_view(), name='CDNX_posproducts_editmodal'),
url(r'^posproducts/(?P<pk>\w+)/delete$', POSProductDelete.as_view(), name='CDNX_posproducts_delete'),
url(r'^posproducts/(?P<pk>\w+)/sublist$', POSProductSubList.as_view(), name='CDNX_posproducts_sublist'),
url(r'^posproducts/(?P<pk>\w+)/sublist/add$', POSProductCreateModal.as_view(), name='CDNX_posproducts_sublist_add'),
url(r'^posproducts/(?P<pk>\w+)/sublist/addmodal$', POSProductCreateModal.as_view(), name='CDNX_posproducts_sublist_addmodal'),
url(r'^posproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSProductDetailModal.as_view(), name='CDNX_posproducts_sublist_details'),
url(r'^posproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSProductUpdateModal.as_view(), name='CDNX_posproducts_sublist_edit'),
url(r'^posproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSProductUpdateModal.as_view(), name='CDNX_posproducts_sublist_editmodal'),
url(r'^posproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSProductDelete.as_view(), name='CDNX_posproducts_sublist_delete'),
url(r'^poslogs$', POSLogList.as_view(), name='CDNX_poslogs_list'),
url(r'^posoperators$', POSOperatorList.as_view(), name='CDNX_posoperators_list'),
url(r'^posoperators/add$', POSOperatorCreate.as_view(), name='CDNX_posoperators_add'),
url(r'^posoperators/addmodal$', POSOperatorCreateModal.as_view(), name='CDNX_posoperators_addmodal'),
url(r'^posoperators/(?P<pk>\w+)$', POSOperatorDetails.as_view(), name='CDNX_posoperators_details'),
url(r'^posoperators/(?P<pk>\w+)/edit$', POSOperatorUpdate.as_view(), name='CDNX_posoperators_edit'),
url(r'^posoperators/(?P<pk>\w+)/editmodal$', POSOperatorUpdateModal.as_view(), name='CDNX_posoperators_editmodal'),
url(r'^posoperators/(?P<pk>\w+)/delete$', POSOperatorDelete.as_view(), name='CDNX_posoperators_delete'),
url(r'^posoperators/(?P<pk>\w+)/sublist$', POSOperatorSubList.as_view(), name='CDNX_posoperators_sublist'),
url(r'^posoperators/(?P<pk>\w+)/sublist/add$', POSOperatorCreateModal.as_view(), name='CDNX_posoperators_sublist_add'),
url(r'^posoperators/(?P<pk>\w+)/sublist/addmodal$', POSOperatorCreateModal.as_view(), name='CDNX_posoperators_sublist_addmodal'),
url(r'^posoperators/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSOperatorDetailModal.as_view(), name='CDNX_posoperators_sublist_details'),
url(r'^posoperators/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSOperatorUpdateModal.as_view(), name='CDNX_posoperators_sublist_edit'),
url(r'^posoperators/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSOperatorUpdateModal.as_view(), name='CDNX_posoperators_sublist_editmodal'),
url(r'^posoperators/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSOperatorDelete.as_view(), name='CDNX_posoperators_sublist_delete'),
url(r'^pos_session$', POSSession.as_view(), name='CDNX_pos_session'),
# POSGroupProduct
url(r'^posgroupproducts$', POSGroupProductList.as_view(), name='CDNX_posgroupproducts_list'),
url(r'^posgroupproducts/add$', POSGroupProductCreate.as_view(), name='CDNX_posgroupproducts_add'),
url(r'^posgroupproducts/addmodal$', POSGroupProductCreateModal.as_view(), name='CDNX_posgroupproducts_addmodal'),
url(r'^posgroupproducts/(?P<pk>\w+)$', POSGroupProductDetails.as_view(), name='CDNX_posgroupproducts_details'),
url(r'^posgroupproducts/(?P<pk>\w+)/edit$', POSGroupProductUpdate.as_view(), name='CDNX_posgroupproducts_edit'),
url(r'^posgroupproducts/(?P<pk>\w+)/editmodal$', POSGroupProductUpdateModal.as_view(), name='CDNX_posgroupproducts_editmodal'),
url(r'^posgroupproducts/(?P<pk>\w+)/delete$', POSGroupProductDelete.as_view(), name='CDNX_posgroupproducts_delete'),
url(r'^posgroupproducts/(?P<pk>\w+)/sublist$', POSGroupProductSubList.as_view(), name='CDNX_posgroupproducts_sublist'),
url(r'^posgroupproducts/(?P<pk>\w+)/sublist/add$', POSGroupProductCreateModal.as_view(), name='CDNX_posgroupproducts_sublist_add'),
url(r'^posgroupproducts/(?P<pk>\w+)/sublist/addmodal$', POSGroupProductCreateModal.as_view(), name='CDNX_posgroupproducts_sublist_addmodal'),
url(r'^posgroupproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)$', POSGroupProductDetailModal.as_view(), name='CDNX_posgroupproducts_sublist_details'),
url(r'^posgroupproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/edit$', POSGroupProductUpdateModal.as_view(), name='CDNX_posgroupproducts_sublist_edit'),
url(r'^posgroupproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/editmodal$', POSGroupProductUpdateModal.as_view(), name='CDNX_posgroupproducts_sublist_editmodal'),
url(r'^posgroupproducts/(?P<cpk>\w+)/sublist/(?P<pk>\w+)/delete$', POSGroupProductDelete.as_view(), name='CDNX_posgroupproducts_sublist_delete'),
# Utilities
url(r'^open_cash_register$', OpenCashRegister.as_view(), name='CDNX_POS_open_cash_register'),
url(r'^open_cash_register/$', OpenCashRegister.as_view(), name='CDNX_POS_open_cash_register_'),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 17:44:51 2017
@author: <NAME>
"""
#from math import sqrt
#from mpl_toolkits.mplot3d import Axes3D
from pose_sim import *
if __name__ == '__main__':
#camera position in world coordinates
x = 0.
y = -0.8
z = 2.
# Create a camera
cam = Camera()
cam.set_K(fx = 800., fy = 800., cx = 320., cy = 240.) #Camera Matrix
cam.img_width = 320.*2.
cam.img_height = 240.*2.
#Camera looking straight down to the world center
cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(160.0))
#World position is defined after rotation matrix
cam.set_t(x,y,z,frame='world')
cam.set_P() # create projection matrix
#Create a plane with 4 points
pl = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.6,0.6), n = (8,8))
pl.uniform()
#pl.update_random(n = 16, r = 0.05, min_sep = 0.05)
objectPoints = pl.get_points()
#create_uniform_cam_poses()
#run_point_distribution_test(cam, objectPoints, False)
iters = 5000
n_range = np.arange(0,0.2,0.01)
ippe_tvec_error_avg = list()
ippe_rmat_error_avg = list()
pnp_tvec_error_avg = list()
pnp_rmat_error_avg = list()
for n in n_range:
ippe_tvec_error_sum = 0
ippe_rmat_error_sum = 0
pnp_tvec_error_sum = 0
pnp_rmat_error_sum = 0
for m in range(iters):
if n==0:
pl.uniform()
else:
pl.uniform_with_distortion(mean = 0, sd = n)
objectPoints = pl.get_points()
ippe_tvec_error, ippe_rmat_error, pnp_tvec_error, pnp_rmat_error = run_single(cam, objectPoints, noise = 0, quant_error = True, plot = False, debug = False)
ippe_tvec_error_sum += ippe_tvec_error
ippe_rmat_error_sum +=ippe_rmat_error
pnp_tvec_error_sum += pnp_tvec_error
pnp_rmat_error_sum += pnp_rmat_error
ippe_tvec_error_avg.append(ippe_tvec_error_sum/iters)
ippe_rmat_error_avg.append(ippe_rmat_error_sum/iters)
pnp_tvec_error_avg.append(pnp_tvec_error_sum/iters)
pnp_rmat_error_avg.append(pnp_rmat_error_sum/iters)
#%%
plt.figure()
plt.title("Effect of the distribution of points on the translation estimation")
plt.xlabel("Amount of deviation from uniform pattern (noise pixels)")
plt.ylabel("percent error in t")
#plt.plot(n_range, ippe_tvec_error_avg, label = "ippe")
plt.plot(n_range, pnp_tvec_error_avg, label = "solvepnp")
plt.legend()
plt.figure()
plt.title("Effect of the distribution of points on the rotation estimation")
plt.xlabel("Amount of deviation from uniform pattern (noise pixels)")
plt.ylabel("rotation angle error (degrees)")
#plt.plot(n_range, ippe_rmat_error_avg, label = "ippe")
plt.plot(n_range, pnp_rmat_error_avg, label = "solvepnp")
plt.legend()
plt.figure()
imagePoints = np.array(cam.project(objectPoints,quant_error = False))
cam.plot_image(imagePoints, points_color = 'blue')
#%%
plt.figure()
plt.title("Model plane points and deformation for n=0.1 (uniform)")
pl.uniform()
objectPoints = pl.get_points()
plt.plot(objectPoints[0,:], objectPoints[1,:],'o')
plt.xlim(-0.8,0.8)
plt.ylim(-0.8,0.8)
pl.uniform_with_distortion(mean = 0, sd = 0.1)
objectPoints = pl.get_points()
plt.plot(objectPoints[0,:], objectPoints[1,:],'rx')
|
# -*- coding: utf-8 -*-
"""
Holds the code for cleaning out unwanted tags from the lxml
dom xpath.
"""
import copy
from .utils import ReplaceSequence
class DocumentCleaner(object):
def __init__(self, config):
"""Set appropriate tag names and regexes of tags to remove
from the HTML
"""
self.config = config
self.parser = self.config.get_parser()
self.remove_nodes_re = (
"^side$|combx|retweet|mediaarticlerelated|menucontainer|"
"navbar|storytopbar-bucket|utility-bar|inline-share-tools"
"|comment|PopularQuestions|contact|foot|footer|Footer|footnote"
"|cnn_strycaptiontxt|cnn_html_slideshow|cnn_strylftcntnt"
"|links|meta$|shoutbox|sponsor"
"|tags|socialnetworking|socialNetworking|cnnStryHghLght"
"|cnn_stryspcvbx|^inset$|pagetools|post-attributes"
"|welcome_form|contentTools2|the_answers"
"|communitypromo|runaroundLeft|subscribe(?!r-hider|-truncate)|vcard|articleheadings"
"|date(?!line-storybody)|^print$|popup|author-dropdown|tools|socialtools|byline"
"|konafilter|KonaFilter|breadcrumbs|^fn$|wp-caption-text"
"|legende|ajoutVideo|timestamp|js_replies"
)
self.regexp_namespace = "http://exslt.org/regular-expressions"
self.nauthy_ids_re = ("//*[re:test(@id, '%s', 'i')]" %
self.remove_nodes_re)
self.nauthy_classes_re = ("//*[re:test(@class, '%s', 'i')]" %
self.remove_nodes_re)
self.nauthy_names_re = ("//*[re:test(@name, '%s', 'i')]" %
self.remove_nodes_re)
self.div_to_p_re = r"<(a|blockquote|dl|div|img|ol|p|pre|table|ul)"
self.caption_re = "^caption$"
self.google_re = " google "
self.entries_re = "^[^entry-]more.*$"
self.facebook_re = "[^-]facebook"
self.facebook_broadcasting_re = "facebook-broadcasting"
self.twitter_re = "[^-]twitter"
self.tablines_replacements = ReplaceSequence()\
.create("\n", "\n\n")\
.append("\t")\
.append("^\\s+$")
self.contains_article = './/article|.//*[@id="article"]|.//*[@itemprop="articleBody"]'
def clean(self, doc_to_clean):
"""Remove chunks of the DOM as specified
"""
doc_to_clean = self.clean_body_classes(doc_to_clean)
doc_to_clean = self.clean_article_tags(doc_to_clean)
doc_to_clean = self.clean_em_tags(doc_to_clean)
doc_to_clean = self.remove_drop_caps(doc_to_clean)
doc_to_clean = self.remove_scripts_styles(doc_to_clean)
doc_to_clean = self.clean_bad_tags(doc_to_clean)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.caption_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.google_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.entries_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.facebook_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean,
self.facebook_broadcasting_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.twitter_re)
doc_to_clean = self.clean_para_spans(doc_to_clean)
doc_to_clean = self.div_to_para(doc_to_clean, 'div')
doc_to_clean = self.div_to_para(doc_to_clean, 'span')
doc_to_clean = self.div_to_para(doc_to_clean, 'section')
return doc_to_clean
def clean_body_classes(self, doc):
"""Removes the `class` attribute from the <body> tag because
if there is a bad match, the entire DOM will be empty!
"""
elements = self.parser.getElementsByTag(doc, tag="body")
if elements:
self.parser.delAttribute(elements[0], attr="class")
return doc
def clean_article_tags(self, doc):
articles = self.parser.getElementsByTag(doc, tag='article')
for article in articles:
for attr in ['id', 'name', 'class']:
self.parser.delAttribute(article, attr=attr)
return doc
def clean_em_tags(self, doc):
ems = self.parser.getElementsByTag(doc, tag='em')
for node in ems:
images = self.parser.getElementsByTag(node, tag='img')
if len(images) == 0:
self.parser.drop_tag(node)
return doc
def remove_drop_caps(self, doc):
items = self.parser.css_select(doc, 'span[class~=dropcap], '
'span[class~=drop_cap]')
for item in items:
self.parser.drop_tag(item)
return doc
def remove_scripts_styles(self, doc):
# remove scripts
scripts = self.parser.getElementsByTag(doc, tag='script')
for item in scripts:
self.parser.remove(item)
# remove styles
styles = self.parser.getElementsByTag(doc, tag='style')
for item in styles:
self.parser.remove(item)
# remove comments
comments = self.parser.getComments(doc)
for item in comments:
self.parser.remove(item)
return doc
def clean_bad_tags(self, doc):
# ids
naughty_list = self.parser.xpath_re(doc, self.nauthy_ids_re)
for node in naughty_list:
if not node.xpath(self.contains_article):
self.parser.remove(node)
# class
naughty_classes = self.parser.xpath_re(doc, self.nauthy_classes_re)
for node in naughty_classes:
if not node.xpath(self.contains_article) and node.get('itemprop') != 'articleBody':
self.parser.remove(node)
# name
naughty_names = self.parser.xpath_re(doc, self.nauthy_names_re)
for node in naughty_names:
if not node.xpath(self.contains_article):
self.parser.remove(node)
return doc
def remove_nodes_regex(self, doc, pattern):
for selector in ['id', 'class']:
reg = "//*[re:test(@%s, '%s', 'i')]" % (selector, pattern)
naughty_list = self.parser.xpath_re(doc, reg)
for node in naughty_list:
self.parser.remove(node)
return doc
def clean_para_spans(self, doc):
spans = self.parser.css_select(doc, 'p span')
for item in spans:
self.parser.drop_tag(item)
return doc
def get_flushed_buffer(self, replacement_text, doc):
return self.parser.textToPara(replacement_text)
def replace_walk_left_right(self, kid, kid_text,
replacement_text, nodes_to_remove):
kid_text_node = kid
replace_text = self.tablines_replacements.replaceAll(kid_text)
if len(replace_text) > 1:
prev_node = self.parser.previousSibling(kid_text_node)
while prev_node is not None \
and self.parser.getTag(prev_node) == "a" \
and self.parser.getAttribute(
prev_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(prev_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(prev_node)
self.parser.setAttribute(prev_node, attr='grv-usedalready',
value='yes')
prev_node = self.parser.previousSibling(prev_node)
replacement_text.append(replace_text)
next_node = self.parser.nextSibling(kid_text_node)
while next_node is not None \
and self.parser.getTag(next_node) == "a" \
and self.parser.getAttribute(
next_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(next_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(next_node)
self.parser.setAttribute(next_node, attr='grv-usedalready',
value='yes')
next_node = self.parser.nextSibling(next_node)
def get_replacement_nodes(self, doc, div):
replacement_text = []
nodes_to_return = []
nodes_to_remove = []
kids = self.parser.childNodesWithText(div)
for kid in kids:
# The node is a <p> and already has some replacement text
if self.parser.getTag(kid) == 'p' and len(replacement_text) > 0:
new_node = self.get_flushed_buffer(
''.join(replacement_text), doc)
nodes_to_return.append(new_node)
replacement_text = []
nodes_to_return.append(kid)
# The node is a text node
elif self.parser.isTextNode(kid):
kid_text = self.parser.getText(kid)
self.replace_walk_left_right(kid, kid_text, replacement_text,
nodes_to_remove)
else:
nodes_to_return.append(kid)
# flush out anything still remaining
if(len(replacement_text) > 0):
new_node = self.get_flushed_buffer(''.join(replacement_text), doc)
nodes_to_return.append(new_node)
replacement_text = []
for n in nodes_to_remove:
self.parser.remove(n)
return nodes_to_return
def replace_with_para(self, doc, div):
self.parser.replaceTag(div, 'p')
def div_to_para(self, doc, dom_type):
bad_divs = 0
else_divs = 0
divs = self.parser.getElementsByTag(doc, tag=dom_type)
tags = ['a', 'blockquote', 'dl', 'div', 'img', 'ol', 'p',
'pre', 'table', 'ul']
for div in divs:
items = self.parser.getElementsByTags(div, tags)
if div is not None and len(items) == 0:
self.replace_with_para(doc, div)
bad_divs += 1
elif div is not None:
replace_nodes = self.get_replacement_nodes(doc, div)
replace_nodes = [n for n in replace_nodes if n is not None]
attrib = copy.deepcopy(div.attrib)
div.clear()
for i, node in enumerate(replace_nodes):
div.insert(i, node)
for name, value in attrib.items():
div.set(name, value)
else_divs += 1
return doc
|
<gh_stars>0
#!usr/bin/python
# -*- coding: utf-8 -*-
#
import os
import sys
import json
import unittest
def isOneMesh(plyPath):
u"""
ワンメッシュモデルかどうかを判定
"""
# ハイフンが無かったらワンメッシュモデル
return plyPath[-6] != "-"
def getCharacterId(plyPath):
u"""
plyのキャラID部を取得
ex : p000
"""
filename = os.path.basename(plyPath)
return filename[0:4]
def getCharacterVersion(plyPath):
u"""
plyのキャラバージョン部を取得
p002_01-2.ply の01部分
"""
filename = os.path.basename(plyPath)
return filename[5:7]
def getPartsIndexStr(plyPath):
u"""
plyのファイル名からインデックス部を取得
p005_03-X.plyのXの部分
ワンメッシュは-1を返す
"""
if isOneMesh(plyPath):
return "-1"
return plyPath[-5]
def getPartsIndexInt(plyPath):
u"""
plyのファイル名からインデックス部を取得
p005_03-0.plyの0の部分
"""
return int(getPartsIndexStr(plyPath))
def getCategoryShort(plyPath):
u"""
plyのファイル名からカテゴリ部を取得
Returns :
p : player
e : enemy
n : npc
"""
id = getCharacterId(plyPath)
category = id[0]
return category
def getCategoryLong(plyPath):
longCategoryMap = {
"p" : "Player",
"e" : "Enemy",
"n" : "Npc",
}
shortCategory = getCategoryShort(plyPath)
longCategory = longCategoryMap[shortCategory] if shortCategory in longCategoryMap else "Others"
return longCategory
def getPartsName(plyPath):
u""" plyのパスからパーツ名を取得
"""
filename = os.path.basename(plyPath)
indexfile = os.path.join(os.path.dirname(plyPath), "plyindex.json")
if os.path.exists(indexfile):
with open(indexfile) as f:
indexJson = json.load(f)
partsIndex = getPartsIndexStr(plyPath)
partsname = indexJson[partsIndex]
if partsname != "":
return normalizePartsName(partsname)
return getPartsNameDefault(getPartsIndexInt(plyPath))
def getPartsNameDefault(index):
partsNameList = ["Lower", "Upper", "Face", "Hair", "Accessory", "OneModel"]
assert index < len(partsNameList), u"getPartsNameDefault : index は {} 未満である必要があります : 引数 {}".format(len(partsNameList), index)
return partsNameList[index]
def normalizePartsName(partsName):
if partsName.lower() == "hut":
return "Accessory"
return partsName[0].upper() + partsName[1:].lower()
def makeContentsFileName(characterId, version, partsName, ext):
u"""
fbxのファイル名を生成
"""
if partsName.lower() == "onemodel":
formatStr = "{}_{}_{}.{}"
else:
formatStr = "Parts{}_{}_{}.{}"
return formatStr.format(partsName, characterId, version, ext)
def makeContentsInfo(plyPath):
charId = getCharacterId(plyPath)
charVer = getCharacterVersion(plyPath)
partsName = getPartsName(plyPath)
return (charId, charVer, partsName)
def makeRelativeContentPath(plyPath, resType, ext):
u"""
Parts/Lower/[resType]/PartsLower_pl000_01.[ext]
OneModel/[resType]/[name].[ext]
"""
charId, charVer, partsName = makeContentsInfo(plyPath)
fileName = makeContentsFileName(charId, charVer, partsName, ext)
if partsName.lower() == "onemodel":
category = getCategoryLong(plyPath)
return os.path.join(partsName, category, resType, fileName)
else:
return os.path.join("Parts/", partsName, resType, fileName)
def makeRelativeFbxContentsPath(plyPath):
u"""
Parts/Lower/Textures/Lower_pl000_01.fbx
"""
return makeRelativeContentPath(plyPath, "Meshes", "fbx")
def makeRelativeTexContentsPath(plyPath):
u"""
Parts/Lower/Textures/Lower_pl000_01.png
"""
return makeRelativeContentPath(plyPath, "Textures", "png")
class TestPlyPath(unittest.TestCase):
def test_plyindex(self):
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-0.ply", 0, "Accessory")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-1.ply", 1, "Lower")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-2.ply", 2, "Upper")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-3.ply", 3, "Face")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-4.ply", 4, "Hair")
def test_default(self):
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-0.ply", 0, "Lower")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-1.ply", 1, "Upper")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-2.ply", 2, "Face")
self.check("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-3.ply", 3, "Hair")
def test_contents_fbx(self):
self.assertEqual(makeRelativeFbxContentsPath("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-1.ply"), os.path.normpath("Lower/Meshes/Lower_p005_03.fbx"))
self.assertEqual(makeRelativeFbxContentsPath("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-1.ply"), os.path.normpath("Upper/Meshes/Upper_p001_01.fbx"))
def test_contents_tex(self):
self.assertEqual(makeRelativeTexContentsPath("D:/prog/0_myprogram/bishrpg_resources/models/characters/p005_lingling/03_tbs2/p005_03-1.ply"), os.path.normpath("Lower/Textures/Lower_p005_03.png"))
self.assertEqual(makeRelativeTexContentsPath("D:/prog/0_myprogram/bishrpg_resources/models/characters/p001_hug/01_otnk/p001_01-1.ply"), os.path.normpath("Upper/Textures/Upper_p001_01.png"))
def check(self, path, index, partsName):
self.assertEqual(getPartsIndexInt(path), index)
self.assertEqual(getPartsName(path), partsName)
def main():
unittest.main()
return 0
if __name__ == "__main__":
exit(main())
|
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg
from layers.functions.prior_box import PriorBox
from utils.nms_wrapper import nms
# from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.faceboxes import FaceBoxes
from utils.box_utils import decode
from utils.timer import Timer
import time
import copy
parser = argparse.ArgumentParser(description='FaceBoxes')
parser.add_argument('-m', '--trained_model', default='/home/yana/FaceBoxes/weights_bak/FaceBoxes_epoch_295.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--dataset', default='PASCAL', type=str, choices=['AFW', 'PASCAL', 'FDDB'], help='dataset')
parser.add_argument('--confidence_threshold', default=0.5, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=100, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.2, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=10, type=int, help='keep_top_k')
parser.add_argument('-s', '--show_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
# net and model
net = FaceBoxes(phase='test', size=None, num_classes=3) # initialize detector
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
resize = 1
# testing begin
# for i, img_name in enumerate(test_dataset):
image_path = '/home/yana/FaceBoxes/test/'
images = os.listdir(image_path)
for image in images:
impPath = os.path.join(image_path, image)
img_raw = cv2.imread(impPath, cv2.IMREAD_COLOR)
img_raw = cv2.resize(img_raw, (960, 960))
begin = time.time()
img = np.float32(img_raw)
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
# print(im_height, im_width)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
loc, conf = net(img) # forward pass
conf = conf.view(conf.shape[1], conf.shape[2])
index = conf.argmax(dim = 1)
background_scores = conf.squeeze(0).data.cpu().numpy()[:, 0]
background_scores = background_scores[np.newaxis,:]
face_scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
face_mask_scores = conf.squeeze(0).data.cpu().numpy()[:, 2]
target = np.maximum(face_scores,face_mask_scores)
target = target[np.newaxis,:]
conf = torch.tensor(np.concatenate((background_scores, target)).T)
resume = time.time()-begin
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
# mask
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
index = index[inds]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
index = index.cpu().numpy()[order]
boxes = boxes[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis], index[:, np.newaxis])).astype(np.float32, copy=False)
# print(dets)
# keep = py_cpu_nms(dets, args.nms_threshold)
keep = nms(dets, args.nms_threshold)
dets = dets[keep, :]
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
print('FPS: ', 1 / (time.time() - begin))
for det in dets:
index = det[5]
if index == 1:
# f.write('face ' + str(round(det[4],3)) + ' ' + str(det[0]) + ' ' + str(det[1]) + ' ' + str(det[2]) + ' ' + str(det[3]) + '\n')
cv2.rectangle(img_raw, (det[0], det[1]), (det[2], det[3]), (0, 0, 255), 2)
cv2.putText(img_raw, 'No_mask' + str(round(det[4], 2)), (det[0], det[1]), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
else:
# f.write('face_mask '+ str(round(det[4],3)) + ' ' + str(det[0]) + ' ' + str(det[1]) + ' ' + str(det[2]) + ' ' + str(det[3]) + '\n')
cv2.rectangle(img_raw, (det[0], det[1]), (det[2], det[3]), (0, 255, 0), 2)
cv2.putText(img_raw, 'Mask' + str(round(det[4], 2)), (det[0], det[1]), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imwrite(image, img_raw)
|
from jesse.helpers import get_candle_source, slice_candles
import talib
import numpy as np
from typing import Union
import math
from numba import njit
def ef(candles: np.ndarray, lp_per: int = 10, hp_per: int = 30, f_type: str = "Ehlers", normalize: bool = False, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
# added to definition : use_comp: bool = False, comp_intensity: float = 90.0,
"""
https://www.tradingview.com/script/kPe86Nbc-Roofing-Filter-DW/
compression function not working
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if f_type == "Ehlers":
roof = erf( source, hp_per, lp_per)
elif f_type == "Gaussian":
roof = grf( source, hp_per, lp_per)
elif f_type == "Butterworth":
roof = brf( source, hp_per, lp_per)
rms = RMS(source, roof, np.round((hp_per + lp_per)/2))
if roof[-1] > 0:
norm_roof = roof/rms
elif roof[-1] < 0:
norm_roof = -np.abs(roof)/rms
else:
norm_roof = 0
if normalize:
filt = norm_roof
else:
filt = roof
if sequential:
return filt
else:
return filt[-1]
#jesse backtest '2021-01-03' '2021-03-02'
@njit
def grf(x,t_hp, t_lp):
beta1 = (1 - np.cos(4*np.arcsin(1)/t_hp))/(np.sqrt(2)-1)
alpha1 = -beta1 + np.sqrt(np.power(beta1,2) + 2*beta1)
beta2 = (1 - np.cos(4*np.arcsin(1)/t_lp))/(np.sqrt(2)-1)
alpha2 = -(beta2) + np.sqrt(np.power(beta2,2) + 2*beta2)
ghp = np.zeros_like(x)
grf = np.zeros_like(x)
for i in range(0,x.shape[0]):
ghp[i] = (1 - np.power(alpha1, 2))*x[i] + 2*(1-alpha1) * (ghp[i-1] - x[i-1]) + np.power(1 - alpha1, 2) * (x[i-2] - ghp[i-2])
grf[i] = np.power(alpha2, 2)*ghp[i] + 2*(1 - alpha2)*grf[i-1] - np.power(1 - alpha2, 2)* grf[i-2]
return grf
@njit
def erf( x, t_hp, t_lp):
omega1 = 4*np.arcsin(1)/t_hp
omega2 = 4*np.arcsin(1)/t_lp
alpha = (np.cos((np.sqrt(2)/2)*omega1) + np.sin((np.sqrt(2)/2)*omega1) - 1)/np.cos((np.sqrt(2)/2)*omega1)
hp = np.zeros_like(x)
erf = np.zeros_like(x)
for i in range(0,x.shape[0]):
hp[i] = np.power(1 - alpha/2, 2)*(x[i] - 2*x[i-1] + x[i-2]) + 2*(1 - alpha) * (hp[i-1]) - np.power(1 - alpha,2) * (hp[i-2])
a1 = np.exp(-np.sqrt(2)*2*np.arcsin(1)/t_lp)
b1 = 2*a1*np.cos((np.sqrt(2)/2)*omega2)
c2 = b1
c3 = -np.power(a1,2)
c1 = 1 - c2 - c3
for i in range(x.shape[0]):
erf[i] = c1*hp[i]+c2*erf[i-1] + c3*erf[i-2]
return erf
@njit
def brf(x, t_hp, t_lp):
a1 = np.exp(-np.sqrt(2)*2*np.arcsin(1)/t_hp)
b1 = 2*a1*np.cos(np.sqrt(2)*2*np.arcsin(1)/t_hp)
c1 = np.power(a1, 2)
d1 = ((1 - b1 + c1)/4)
a2 = np.exp(-np.sqrt(2)*2*np.arcsin(1)/t_lp)
b2 = 2*a2*np.cos(np.sqrt(2)*2*np.arcsin(1)/t_lp)
c2 = np.power(a2, 2)
d2 = ((1 - b2 + c2)/4)
bhp = np.zeros_like(x)
brf = np.zeros_like(x)
for i in range(x.shape[0]):
bhp[i] = b1 * bhp[i-1] - c1*bhp[i-2] + (1 - d1)*x[i] - (b1 + 2*d1) * x[i-1] + (c1 - d1)*x[i-2]
brf[i] = b2 * brf[i-1] - c2*brf[i-2] + d2*(bhp[i] + 2*bhp[i-1] + bhp[i-2])
return brf
def RMS(source,x,t):
rms = np.full_like(source, 0)
rms = np.sqrt(talib.SMA(np.power(x,2),t))
return rms
|
import os
import profile
import time
import threading
import configparser
import controller
controller = controller.Control()
working_path = os.path.dirname(os.path.abspath(__file__)) + "/queue"
class thread_working(threading.Thread):
def __init__(self, threadID, name, stopper):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.work_list = []
self.stopper = stopper
print("INIT THREADING")
self.profile_manager = profile.Profile()
def run(self):
self.process = thread_process(2, "Process", 0)
self.process.start()
self.process.join()
if os.path.exists(working_path + "/work.ini"):
self.work_list = self.profile_manager.read_config(working_path + "/work.ini")
print(self.name, "Threading")
all_round = int(self.work_list[0]) + int(self.work_list[1])
print("All Round : ", all_round)
# Loop in section 1
if int(self.work_list[0]) > 0:
# controller.motor1.change_duty(float(self.work_list[2]) / 2)
controller.motor1.l_drive(float(self.work_list[2]))
while (controller.sensor3.get_value() and controller.sensor4.get_value()):
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
controller.motor1.stop()
self.stopper.wait(1)
print("Thread Doing", self.work_list)
for i in range(int(self.work_list[0])):
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
finish = forward = backward = False
controller.motor1.r_drive(float(self.work_list[2]) / 4)
while finish is False:
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
if not(controller.sensor5.get_value() or controller.sensor6.get_value()):
if forward is False:
controller.motor1.stop()
self.stopper.wait(1)
controller.motor1.l_drive(float(self.work_list[2]))
forward = True
elif forward and not(controller.sensor3.get_value() or
controller.sensor4.get_value()):
backward = True
finish = forward and backward
print("End Round : ", i)
self.process = thread_process(
2, "Process", ((i + 1) / all_round) * 100)
self.process.start()
controller.motor1.stop()
self.stopper.wait(3)
controller.motor1.stop()
# Loop in section 2
if int(self.work_list[1]) > 0:
controller.motor2.r_drive()
while controller.sensor1.get_value():
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
controller.motor2.stop()
self.stopper.wait(1)
for i in range(int(self.work_list[1])):
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
finish = forward = backward = False
controller.motor2.l_drive()
while finish is False:
if self.stopper.is_set():
controller.motor1.stop()
controller.motor2.stop()
break
if not controller.sensor2.get_value():
if forward is False:
controller.motor2.stop()
self.stopper.wait(1)
controller.motor2.r_drive()
forward = True
elif forward and not controller.sensor1.get_value():
backward = True
finish = forward and backward
print("End Round : ", i)
self.process = thread_process(
2, "Process", ((i + 1) / all_round) * 100)
self.process.start()
controller.motor2.stop()
self.stopper.wait(3)
controller.motor2.stop()
if os.path.exists(working_path + "/work.ini"):
os.remove(working_path + "/work.ini")
if os.path.exists(working_path + "/process.ini"):
os.remove(working_path + "/process.ini")
if os.path.exists(working_path + "/status.ini"):
cfg = configparser.ConfigParser()
with open(working_path + "/status.ini", 'r') as process_file:
cfg.read_file(process_file)
cfg['status']['active'] = str(0)
cfg['status']['command'] = str(1)
with open(working_path + "/status.ini", 'w') as status_file:
cfg.write(status_file)
# Write Process File
class thread_process(threading.Thread):
def __init__(self, threadID, name, process):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.process = process
self.cfg = configparser.ConfigParser()
self.cfg['process'] = {}
self.cfg['process']['success'] = str(self.process)
def run(self):
print(self.name, "Threading", self.process)
with open(os.path.dirname(os.path.abspath(__file__)) +
"/queue/process.ini", 'w') as process_file:
self.cfg.write(process_file)
time.sleep(2)
print("FIN")
def main():
if os.path.exists(working_path + "/work.ini"):
os.remove(working_path + "/work.ini")
if os.path.exists(working_path + "/process.ini"):
os.remove(working_path + "/process.ini")
last_command = 100
cfg = configparser.ConfigParser()
if os.path.exists(working_path + "/status.ini"):
pass
else:
cfg['status'] = {}
cfg['status']['active'] = str(0)
cfg['status']['command'] = str(100)
with open(working_path + "/status.ini", 'w') as status_file:
cfg.write(status_file)
stopper = threading.Event()
while 1:
if os.path.exists(working_path + "/status.ini"):
with open(working_path + "/status.ini", 'r') as status_file:
cfg.read_file(status_file)
active = cfg['status']['active']
new_command = cfg['status']['command']
if new_command != last_command:
if new_command == '0':
print("Working")
stopper.clear()
working = thread_working(threadID=1, name="work",
stopper=stopper)
working.start()
# worker = StatusChecker(worker=working, stopper=stopper)
# worker.start()
elif new_command == '1':
if active:
print("Terminate")
stopper.set()
elif new_command == '2':
print("Reset")
stopper.clear()
if os.path.exists(working_path + "/work.ini"):
os.remove(working_path + "/work.ini")
os.remove(working_path + "/process.ini")
last_command = new_command
time.sleep(0.5)
else:
cfg['status'] = {}
cfg['status']['active'] = str(0)
cfg['status']['command'] = str(100)
with open(working_path + "/status.ini", 'w') as status_file:
cfg.write(status_file)
if __name__ == '__main__':
main()
|
import unittest
import mock
from ioteclabs_wrapper.core.access import get_labs_dal, LabsDAL
from ioteclabs_wrapper.core.exceptions import LabsBadRequest, LabsException, LabsNotAuthenticated, \
LabsPermissionDenied, LabsResourceNotFound, LabsAPIException
class TestLabsDalPrivateCall(unittest.TestCase):
def setUp(self):
self.dal = LabsDAL()
def test_bad_response_400(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 400
with self.assertRaises(LabsBadRequest):
self.dal._call('get', [])
def test_bad_response_401(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 401
with self.assertRaises(LabsNotAuthenticated):
self.dal._call('get', [])
def test_bad_response_403(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 403
with self.assertRaises(LabsPermissionDenied):
self.dal._call('get', [])
def test_bad_response_404(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 404
with self.assertRaises(LabsResourceNotFound):
self.dal._call('get', [])
def test_bad_response_500(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 500
with self.assertRaises(LabsAPIException):
self.dal._call('get', [])
def test_bad_response_unsupported_exception(self):
self.dal.endpoint_url = '' # for safety
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 418 # i'm a teapot
with self.assertRaises(LabsException):
self.dal._call('get', [])
def test_good_response(self):
self.dal.endpoint_url = ''
with mock.patch('ioteclabs_wrapper.core.access.LabsDAL.session', new_callable=mock.PropertyMock) as p:
p.return_value = p.get = p
p.status_code = 200
self.assertEqual(self.dal._call('get', []), p)
class TestLabsDalAuthenticate(unittest.TestCase):
def setUp(self):
self.dal = LabsDAL()
self.dal._call = mock.Mock()
self.dal._call.return_value = self.dal._call
# noinspection PyUnresolvedReferences
self.dal._call.json.return_value = {'token': 'token'}
def test_get_credentials(self):
with mock.patch('ioteclabs_wrapper.core.access.get_labs_credentials') as f:
self.dal.authenticate()
self.assertEqual(f.called, True)
# noinspection PyUnresolvedReferences
self.assertEqual(self.dal._call.called, True)
def test_not_get_credentials(self):
with mock.patch('ioteclabs_wrapper.core.access.get_labs_credentials') as f:
self.dal.authenticate('username', 'password')
self.assertEqual(f.called, False)
# noinspection PyUnresolvedReferences
self.assertEqual(self.dal._call.called, True)
class TestLabsDalCall(unittest.TestCase):
def setUp(self):
self.dal = LabsDAL()
self.dal.authenticate = mock.Mock()
def test_recurse(self):
self.dal._call = mock.Mock(side_effect=[LabsNotAuthenticated('Test'), None])
self.dal.call('method', [])
# noinspection PyUnresolvedReferences
self.assertEqual(self.dal.authenticate.called, True)
class TestGetLabsDal(unittest.TestCase):
def test_singleton(self):
dal1 = get_labs_dal()
dal2 = get_labs_dal()
self.assertIs(dal1, dal2)
|
<reponame>Nicolas-Lefort/conv_neural_net_time_serie
# convolutional neural net, time serie classification, stocks
# as expected, the model performed poorly (random walk ?), but we saw a
# possible way to treat a multivariate time serie classification problem
# improvement: labeling, wavelet transform
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import numpy as np
import joblib
from keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.layers import Flatten
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from utils import clean, augment, split
from labeling import generate_labels
class Time_serie():
def __init__(self, df, train_size=0.9, val_size=0.1, sequence_length=101):
self.train_size = train_size
self.val_size = val_size
self.target = "signal"
self.df = df
self.sequence_length = sequence_length
self.x_train = None
self.y_train = None
self.x_val = None
self.y_val = None
self.x_test = None
self.y_test = None
self.y_train = None
# filename to save model
@property
def model_filename(self) -> str:
return "classif_multi.h5"
# number of different labels
@property
def number_of_class(self) -> int:
return len(np.unique(self.y_train))
# weight/count of each label in the training set
@property
def weights(self) -> dict:
unique, counts = np.unique(self.y_train, return_counts=True)
return dict(zip(unique, counts))
# weight correction for loss computation
@property
def balance_weights(self) -> dict:
balance_weights = {k: round(1 / v * 10000, 2) for k, v in self.weights.items()}
print("weights for loss correction : ", balance_weights)
return balance_weights
# create train, val, test dataset
def preprocess_data(self) -> None:
# data augmentation
df_augment = augment(self.df)
# labeling
df_signal = generate_labels(df_augment.copy(), self.sequence_length)
# cleaning
df_final, df_signal = clean(df_augment.copy(), df_signal.copy())
# train dataframe
train_df_scaled = self.build_train_df(df_final, df_signal)
# validation dataframe
val_df_scaled = self.build_val_df(df_final, df_signal)
# test dataframe
test_df_scaled = self.build_test_df(df_final, df_signal)
# transfrom dataframes into sequence of length sequence_length
self.x_train, self.y_train = self.make_dataset(train_df_scaled)
self.x_val, self.y_val = self.make_dataset(val_df_scaled)
self.x_test, self.y_test = self.make_dataset(test_df_scaled)
def build_train_df(self, df, df_signal) -> pd.DataFrame:
# split data
train_df, _, _ = split(df, train_size=self.train_size, val_size=self.val_size)
signal, _, _ = split(df_signal, train_size=self.train_size, val_size=self.val_size)
# scaling minmax
scaler = MinMaxScaler()
train_df_scaled = scaler.fit_transform(train_df)
# convert into pd.dataframe
train_df_scaled = pd.DataFrame(train_df_scaled, index=train_df.index, columns=train_df.columns)
# save scaler for val and test sets
joblib.dump(scaler, 'scaler.save')
# add signal
train_df_scaled = train_df_scaled.join(signal)
return train_df_scaled
def build_val_df(self, df, df_signal) -> pd.DataFrame:
# split data
_, val_df, _ = split(df, train_size=self.train_size, val_size=self.val_size)
_, signal, _ = split(df_signal, train_size=self.train_size, val_size=self.val_size)
# load scaler
scaler = joblib.load('scaler.save')
# fit val data
val_df_scaled = scaler.transform(val_df)
# convert into pd.dataframe
val_df_scaled = pd.DataFrame(val_df_scaled, index=val_df.index, columns=val_df.columns)
# add signal
val_df_scaled = val_df_scaled.join(signal)
return val_df_scaled
def build_test_df(self, df, df_signal) -> pd.DataFrame:
# split data
_, _, test_df = split(df, train_size=self.train_size, val_size=self.val_size)
_, _, signal = split(df_signal, train_size=self.train_size, val_size=self.val_size)
# load scaler
scaler = joblib.load('scaler.save')
# fit test data
test_df_scaled = scaler.transform(test_df)
# convert into pd.dataframe
test_df_scaled = pd.DataFrame(test_df_scaled, index=test_df.index, columns=test_df.columns)
# add signal
test_df_scaled = test_df_scaled.join(signal)
return test_df_scaled
def make_dataset(self, df) -> np.array:
df = df.copy()
# prepare dataframe for TimeseriesGenerator object
target = df[self.target]
df.drop(columns=[self.target], inplace=True)
# convert to numpy
data = np.array(df, dtype=np.float32)
target = np.array(target, dtype=np.float32)
# create sequences, output shape -> (n_example, n_time_step, n_features)
dataset = TimeseriesGenerator(
data=data,
targets=target,
length=self.sequence_length,
stride=1,
shuffle=True,
batch_size=len(data))
X, y = dataset[0]
return X, y
def build_nn(self) -> None:
# conv net : the intuition is that input (n_time_step,n_features) is like an image
# but without chanels. 1D convolution should caputure the patterns if there are any
input_shape = (self.x_train.shape[1], self.x_train.shape[2])
inputs = Input(shape=input_shape)
x = Conv1D(filters=64, kernel_size=3, activation='relu')(inputs)
x = Conv1D(filters=64, kernel_size=3, activation='relu')(x)
x = Dropout(0.5)(x)
x = MaxPooling1D(pool_size=2)(x)
x = Flatten()(x)
x = Dense(128, activation="relu", kernel_initializer="he_normal")(x)
x = Dense(54, activation="relu", kernel_initializer="he_normal")(x)
# size of output equal to number of class
outputs = Dense(units=self.number_of_class, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs)
model.summary()
opt = keras.optimizers.Adam(learning_rate=0.00001)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
self.model = model
def train(self, epoch=5, batch_size=128) -> None:
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=5,
mode='min')
self.history = self.model.fit(x=self.x_train,
y=self.y_train,
epochs=epoch,
validation_data=(self.x_val, self.y_val),
callbacks=[early_stopping],
class_weight=self.balance_weights,
batch_size=batch_size)
self.model.save(self.model_filename)
def test(self) -> None:
model = keras.models.load_model(self.model_filename)
y_pred_prob = model.predict(self.x_test)
y_pred = np.argmax(y_pred_prob, axis=1)
cm = confusion_matrix(self.y_test, y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=np.unique(self.y_train))
disp.plot(cmap=plt.cm.Blues)
plt.show()
if __name__ == "__main__":
project_name = "time_serie"
pd.options.display.max_columns = 10
df = pd.read_csv('data_5_min.csv')
model = Time_serie(df=df.head(200000).copy(),
train_size=0.8,
val_size=0.1,
sequence_length=401, )
model.preprocess_data()
model.build_nn()
model.train(epoch=10, batch_size=128)
model.test()
|
<gh_stars>0
from unittest.mock import patch
from .test_base import BaseTestCase
from .test_data import (invalid_facebook_token, invalid_google_token, invalid_twitter_tokens,
one_twitter_token, social_reg_data, social_reg_no_email_data)
from authors.apps.authentication.social_registration import register_social_user, settings
from authors.apps.authentication.tests.test_data import test_user_data
class TestSocialAuth(BaseTestCase):
def test_invalid_facebook_token(self):
response = self.client.post('/api/users/login/facebook', invalid_facebook_token, format='json')
self.assertEqual(response.data['auth_token'], 'Please provide a valid token')
def test_invalid_google_token(self):
response = self.client.post('/api/users/login/google', invalid_google_token, format='json')
self.assertEqual(response.data['auth_token'], 'Please provide a valid token')
def test_invalid_twitter_tokens(self):
response = self.client.post('/api/users/login/twitter', invalid_twitter_tokens, format='json')
self.assertEqual(response.data['auth_token'], 'Please provide valid tokens')
def test_one_twitter_token(self):
response = self.client.post('/api/users/login/twitter', one_twitter_token, format='json')
self.assertEqual(response.data['auth_token'], 'Please provide two tokens')
def test_with_valid_google_token_succeeds(self):
class MockGoogleAuth:
@classmethod
def verify_oauth2_token(cls, token, request):
return {
'iss': 'accounts.google.com',
'email': '<EMAIL>',
'given_name': 'testname'
}
with patch('authors.apps.authentication.serializers.id_token', new_callable=MockGoogleAuth):
response = self.client.post('/api/users/login/google', invalid_google_token, format='json')
self.assertEqual(response.data['auth_token']['email'], '<EMAIL>')
def test_with_invalid_iss_fails(self):
class MockGoogleAuth:
@classmethod
def verify_oauth2_token(cls, token, request):
return {
'iss': 'invalid_iss.com',
'email': '<EMAIL>',
'given_name': 'testname'
}
with patch('authors.apps.authentication.serializers.id_token', new_callable=MockGoogleAuth):
response = self.client.post('/api/users/login/google', invalid_google_token, format='json')
self.assertEqual(response.data['auth_token'], 'Please provide a valid token')
def test_with_valid_facebook_token_succeeds(self):
class MockFacebookAuth:
def __init__(self, *args, **kwargs):
pass
@classmethod
def request(cls, link):
return {
'email': '<EMAIL>',
'name': 'testfbname'
}
def __call__(self, *args, **kwargs):
return self
with patch('authors.apps.authentication.serializers.facebook.GraphAPI', new_callable=MockFacebookAuth):
response = self.client.post('/api/users/login/facebook', invalid_facebook_token, format='json')
self.assertEqual(response.data['auth_token']['email'], '<EMAIL>')
def test_with_valid_twitter_token_succeeds(self):
class MockTwitterAuth:
def __init__(self, *args, **kwargs):
pass
class VerifyCredentials:
def __init__(self, *args, **kwargs):
pass
@property
def __dict__(self):
return {
'email': '<EMAIL>',
'screen_name': 'testtwname'
}
def __call__(self, *args, **kwargs):
return self
with patch('authors.apps.authentication.serializers.twitter.Api', new_callable=MockTwitterAuth):
response = self.client.post('/api/users/login/twitter', invalid_twitter_tokens, format='json')
self.assertEqual(response.data['auth_token']['email'], '<EMAIL>')
class TestSocialRegistration(BaseTestCase):
def test_social_registration_with_no_email(self):
email = social_reg_no_email_data['email']
username = social_reg_no_email_data['username']
response = register_social_user(email, username)
self.assertEqual(response, 'no email provided')
def test_social_registration_with_no_SOCIAL_USER_PASS(self):
class MockSettings:
SOCIAL_USER_PASS = False
with patch('authors.apps.authentication.social_registration.settings', new_callable=MockSettings):
response = register_social_user('email', 'username')
self.assertEqual(response, 'password not provided')
def test_social_registration_with_an_existing_user(self):
self.create_user(test_user_data)
response = register_social_user(test_user_data['user']['email'], 'username')
self.assertEqual(response, 'A user with this email already exists, Please login using that account')
def test_social_registration(self):
email = social_reg_data['email']
username = social_reg_data['username']
response = register_social_user(email, username)
response2 = register_social_user(email, username)
self.assertEqual(response["email"], "<EMAIL>")
self.assertEqual(response2["username"], "testname")
|
<reponame>bogatyy/cs224d
import numpy as np
import random
from q2_sigmoid import sigmoid, sigmoid_grad
# First implement a gradient checker by filling in the following functions
def gradcheck_naive(f_and_grad, x):
"""
Gradient check for a function f
- f_and_grad should be a function that takes a single argument and outputs the cost and its gradients
- x is the point (numpy array) to check the gradient at
"""
rndstate = random.getstate()
random.setstate(rndstate)
fx, grad = f_and_grad(x) # Evaluate function value at original point
y = np.copy(x)
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
### try modifying x[ix] with h defined above to compute numerical gradients
### make sure you call random.setstate(rndstate) before calling f(x) each time, this will make it
### possible to test cost functions with built in randomness later
reldiff = 1.0
for negative_log_h in xrange(2, 22):
h = 0.5 ** negative_log_h
y[ix] = x[ix] + h
random.setstate(rndstate)
fy, _ = f_and_grad(y)
y[ix] = x[ix]
numgrad = (fy - fx) / h
if fx != fy:
reldiff = min(reldiff, abs(numgrad - grad[ix]) / max((1.0, abs(numgrad), abs(grad[ix]))))
# Compare gradients
print 'reldiff', reldiff
if reldiff > 1e-5:
print "Gradient check failed."
print "First gradient error found at index %s" % str(ix)
print "Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)
return
it.iternext() # Step to next dimension
print "Gradient check passed!"
def sanity_check():
"""
Some basic sanity checks.
"""
quad_and_grad = lambda x: (np.sum(x ** 2), x * 2)
print "Running sanity checks..."
gradcheck_naive(quad_and_grad, np.array(123.456)) # scalar test
gradcheck_naive(quad_and_grad, np.random.randn(3,)) # 1-D test
gradcheck_naive(quad_and_grad, np.random.randn(4,5)) # 2-D test
print ""
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_gradcheck.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
sigmoid_and_grad = lambda x: (np.sum(sigmoid(x)), sigmoid_grad(sigmoid(x)))
gradcheck_naive(sigmoid_and_grad, np.array(1.23456)) # scalar test
gradcheck_naive(sigmoid_and_grad, np.random.randn(3,)) # 1-D test
gradcheck_naive(sigmoid_and_grad, np.random.randn(4,5)) # 2-D test
gradcheck_naive(sigmoid_and_grad, np.arange(-5.0, 5.0, 0.1)) # range test
sincos_and_grad = lambda x: (np.sin(x) + np.cos(x), np.cos(x) - np.sin(x))
gradcheck_naive(sincos_and_grad, np.array(1.0))
print
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
|
"""
Ethereum Virtual Machine (EVM) Block Instructions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Implementations of the EVM block instructions.
"""
from ethereum.base_types import U256
from .. import Evm
from ..gas import GAS_BASE, GAS_BLOCK_HASH, subtract_gas
from ..stack import pop, push
def block_hash(evm: Evm) -> None:
"""
Push the hash of one of the 256 most recent complete blocks onto the
stack. The block number to hash is present at the top of the stack.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackUnderflowError`
If `len(stack)` is less than `1`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `20`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BLOCK_HASH)
block_number = pop(evm.stack)
if evm.env.number <= block_number or evm.env.number > block_number + 256:
# Default hash to 0, if the block of interest is not yet on the chain
# (including the block which has the current executing transaction),
# or if the block's age is more than 256.
hash = b"\x00"
else:
hash = evm.env.block_hashes[-(evm.env.number - block_number)]
push(evm.stack, U256.from_be_bytes(hash))
evm.pc += 1
def coinbase(evm: Evm) -> None:
"""
Push the current block's beneficiary address (address of the block miner)
onto the stack.
Here the current block refers to the block in which the currently
executing transaction/call resides.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackOverflowError`
If `len(stack)` is equal to `1024`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `2`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BASE)
push(evm.stack, U256.from_be_bytes(evm.env.coinbase))
evm.pc += 1
def timestamp(evm: Evm) -> None:
"""
Push the current block's timestamp onto the stack. Here the timestamp
being referred is actually the unix timestamp in seconds.
Here the current block refers to the block in which the currently
executing transaction/call resides.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackOverflowError`
If `len(stack)` is equal to `1024`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `2`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BASE)
push(evm.stack, evm.env.time)
evm.pc += 1
def number(evm: Evm) -> None:
"""
Push the current block's number onto the stack.
Here the current block refers to the block in which the currently
executing transaction/call resides.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackOverflowError`
If `len(stack)` is equal to `1024`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `2`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BASE)
push(evm.stack, U256(evm.env.number))
evm.pc += 1
def difficulty(evm: Evm) -> None:
"""
Push the current block's difficulty onto the stack.
Here the current block refers to the block in which the currently
executing transaction/call resides.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackOverflowError`
If `len(stack)` is equal to `1024`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `2`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BASE)
push(evm.stack, U256(evm.env.difficulty))
evm.pc += 1
def gas_limit(evm: Evm) -> None:
"""
Push the current block's gas limit onto the stack.
Here the current block refers to the block in which the currently
executing transaction/call resides.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.frontier.vm.error.StackOverflowError`
If `len(stack)` is equal to `1024`.
:py:class:`~ethereum.frontier.vm.error.OutOfGasError`
If `evm.gas_left` is less than `2`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_BASE)
push(evm.stack, U256(evm.env.gas_limit))
evm.pc += 1
|
"""
Example demonstrating usage of :py:meth:`simulation.simulation_tumor_growth_brain`:
- forward simulation
- 2D test domain from brain atlas, 4 tissue subdomains + 'outside'
- spatially heterogeneous parameters, as defined in simulation.simulation_tumor_growth_brain
- no displacement bc between 'outside' and other subdomains
"""
import logging
import os
import test_cases.test_simulation_tumor_growth_brain.testing_config as test_config
from glimslib.simulation import TumorGrowthBrain
from glimslib import fenics_local as fenics, config
import glimslib.utils.file_utils as fu
import glimslib.utils.data_io as dio
# ==============================================================================
# Logging settings
# ==============================================================================
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
fenics.set_log_level(fenics.CRITICAL)
# ==============================================================================
# Load 2D Mesh from IMAGE
# ==============================================================================
path_to_hdf5_mesh = os.path.join(config.test_data_dir, 'brain_atlas_mesh_2d_reduced_domain.h5')
mesh, subdomains, boundaries = dio.read_mesh_hdf5(path_to_hdf5_mesh)
# ==============================================================================
# Problem Settings
# ==============================================================================
class Boundary(fenics.SubDomain):
def inside(self, x, on_boundary):
return on_boundary
tissue_id_name_map = { 1: 'CSF',
3: 'WM',
2: 'GM',
4: 'Ventricles'}
# Boundaries & BCs
boundary = Boundary()
boundary_dict = {'boundary_all': boundary}
dirichlet_bcs = {'clamped_0': {'bc_value': fenics.Constant((0.0, 0.0)),
'named_boundary': 'boundary_all',
'subspace_id': 0}
}
von_neuman_bcs = {}
# Initial Values
u_0_conc_expr = fenics.Expression('exp(-a*pow(x[0]-x0, 2) - a*pow(x[1]-y0, 2))', degree=1, a=0.5, x0=148, y0=-67)
u_0_disp_expr = fenics.Constant((0.0, 0.0))
# ==============================================================================
# Class instantiation & Setup
# ==============================================================================
sim_time = 20
sim_time_step = 1
sim = TumorGrowthBrain(mesh)
sim.setup_global_parameters(subdomains=subdomains,
domain_names=tissue_id_name_map,
boundaries=boundary_dict,
dirichlet_bcs=dirichlet_bcs,
von_neumann_bcs=von_neuman_bcs
)
ivs = {0:u_0_disp_expr, 1:u_0_conc_expr}
sim.setup_model_parameters(iv_expression=ivs,
sim_time=sim_time, sim_time_step=sim_time_step,
E_GM=3000E-6, E_WM=3000E-6, E_CSF=1000E-6, E_VENT=1000E-6,
nu_GM=0.45, nu_WM=0.45, nu_CSF=0.45, nu_VENT=0.3,
D_GM=0.01, D_WM=0.05,
rho_GM=0.05, rho_WM=0.05,
coupling=0.1)
# ==============================================================================
# Run Simulation
# ==============================================================================
output_path = os.path.join(test_config.output_path, 'test_case_simulation_tumor_growth_brain_2D_atlas_reduced_domain')
fu.ensure_dir_exists(output_path)
sim.run(save_method='vtk',plot=False, output_dir=output_path, clear_all=True)
print("============= FINISHED SIMULATION ================")
path_to_h5_file = os.path.join(output_path, 'solution_timeseries.h5')
sim.reload_from_hdf5(path_to_h5_file)
# ==============================================================================
# PostProcess
# ==============================================================================
# dio.merge_VTUs(output_path, sim_time_step, sim_time, remove=True, reference=None)
# selection = slice(0,-1,5)
sim.init_postprocess(os.path.join(output_path, 'postprocess', 'plots'))
# sim.postprocess.plot_all(deformed=False, selection=selection)
# sim.postprocess.plot_all(deformed=True, selection=selection)
import numpy as np
qmin, qmax = fenics.MeshQuality.radius_ratio_min_max(sim.mesh)
print('Minimal radius ratio:', qmin)
print('Maximal radius ratio:', qmax)
sim.postprocess.update_mesh_displacement(20)
qmin, qmax = fenics.MeshQuality.radius_ratio_min_max(sim.mesh)
print('Minimal radius ratio:', qmin)
print('Maximal radius ratio:', qmax)
last_recording_step = max(sim.results.get_recording_steps())
conc = sim.postprocess.get_solution_concentration(last_recording_step)
print("Min conc: ", np.min(conc.vector().array()))
print("Max conc: ", np.max(conc.vector().array())) |
#------------------------------------------------------------------------------#
# fortnet-python: Python Tools for the Fortnet Software Package #
# Copyright (C) 2021 - 2022 <NAME> #
# #
# See the LICENSE file for terms of usage and distribution. #
#------------------------------------------------------------------------------#
'''
Implements common functionalities for the fortnet-python regression testsuite.
'''
import warnings
import subprocess
import numpy as np
from ase import Atoms
from fortformat import Fnetout
ATOL = 1e-16
RTOL = 1e-14
class Hdf5:
'''Representation of an HDF5 file to process.'''
def __init__(self, fname):
'''Initializes an Hdf5 file object.
Args:
fname (str): path to the HDF5 file to process
'''
self._fname = fname
def equals(self, fname):
'''Checks equality with another reference instance
Args:
fname (str): path to HDF5 file to compare with
Returns:
equal (bool): True, if the two instances are equal
'''
process = subprocess.run(['h5diff', self._fname, fname], check=False)
equal = process.returncode == 0
return equal
def compare_fnetout_references(ref, fname, atol=ATOL, rtol=RTOL):
'''Compares the properties extracted by using the
Fnetout class with raw reference values.
Args:
ref (dict): expected content of the fnetout file
fname (str): path to HDF5 file to load and compare with
atol (float): required absolute tolerance
rtol (float): required relative tolerance
Returns:
equal (bool): true, if extracted properties match references
'''
fnetout = Fnetout(fname)
mode = fnetout.mode
ndatapoints = fnetout.ndatapoints
nglobaltargets = fnetout.nglobaltargets
natomictargets = fnetout.natomictargets
globaltargets = fnetout.globaltargets
atomictargets = fnetout.atomictargets
tforces = fnetout.tforces
forces = fnetout.forces
atomicpredictions = fnetout.atomicpredictions
globalpredictions = fnetout.globalpredictions
globalpredictions_atomic = fnetout.globalpredictions_atomic
equal = mode == ref['mode']
if not equal:
warnings.warn('Mismatch in running mode.')
return False
equal = ndatapoints == ref['ndatapoints']
if not equal:
warnings.warn('Mismatch in number of training datapoints.')
return False
equal = nglobaltargets == ref['nglobaltargets']
if not equal:
warnings.warn('Mismatch in number of system-wide targets.')
return False
equal = natomictargets == ref['natomictargets']
if not equal:
warnings.warn('Mismatch in number of atomic targets.')
return False
if ref['globaltargets'] is not None:
for ii, target in enumerate(globaltargets):
equal = np.allclose(target, ref['globaltargets'][ii],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in global targets of datapoint ' \
+ str(ii + 1) + '.')
return False
if ref['atomictargets'] is not None:
for ii, target in enumerate(atomictargets):
equal = np.allclose(target, ref['atomictargets'][ii],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in atomic targets of datapoint ' \
+ str(ii + 1) + '.')
return False
equal = tforces == ref['tforces']
if not equal:
warnings.warn('Mismatch in force specification.')
return False
if ref['forces'] is not None:
for idata in range(ndatapoints):
for itarget, force in enumerate(forces[idata]):
equal = np.allclose(force,
ref['forces'][idata][itarget],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in forces of datapoint ' \
+ str(idata + 1) + ' and target ' + \
str(itarget + 1) + '.')
return False
if ref['atomicpredictions'] is not None:
for ii, prediction in enumerate(atomicpredictions):
equal = np.allclose(prediction, ref['atomicpredictions'][ii],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in atomic predictions of datapoint ' \
+ str(ii + 1) + '.')
return False
if ref['globalpredictions'] is not None:
for ii, target in enumerate(globalpredictions):
equal = np.allclose(target, ref['globalpredictions'][ii],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in global predictions' \
+ ' of datapoint ' + str(ii + 1) + '.')
return False
if ref['globalpredictions_atomic'] is not None:
for ii, target in enumerate(globalpredictions_atomic):
equal = np.allclose(target, ref['globalpredictions_atomic'][ii],
rtol=rtol, atol=atol)
if not equal:
warnings.warn('Mismatch in (atom-resolved) global predictions' \
+ ' of datapoint ' + str(ii + 1) + '.')
return False
return True
def get_mixed_geometries():
'''Generates six geometries with(out) periodic boundary conditions.'''
atoms = []
atoms += get_cluster_geometries()
atoms += get_bulk_geometries()
return atoms
def get_cluster_geometries():
'''Generates three molecules without periodic boundary conditions.'''
h2o = Atoms('H2O')
h2o.positions = np.array([[0.0, 0.0, 0.119262], [0.0, 0.763239, -0.477047],
[0.0, -0.763239, -0.477047]], dtype=float)
ch4 = Atoms('CH4')
ch4.positions = np.array([[0.0, 0.0, 0.0], [0.629118, 0.629118, 0.629118],
[-0.629118, -0.629118, 0.629118],
[0.629118, -0.629118, -0.629118],
[-0.629118, 0.629118, -0.629118]], dtype=float)
nh3 = Atoms('NH3')
nh3.positions = np.array([[0.0, 0.0, 0.116489], [0.0, 0.939731, -0.271808],
[0.813831, -0.469865, -0.271808],
[-0.813831, -0.469865, -0.271808]], dtype=float)
atoms = [h2o, ch4, nh3]
return atoms
def get_bulk_geometries():
'''Generates three crystals with periodic boundary conditions.'''
si = Atoms('Si64')
si.set_scaled_positions(np.array([
[0.8750000000E+00, 0.1250000000E+00, 0.1250000000E+00],
[0.8750000000E+00, 0.3750000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.6250000000E+00, 0.6250000000E+00],
[0.8750000000E+00, 0.8750000000E+00, 0.8750000000E+00],
[0.6250000000E+00, 0.1250000000E+00, 0.3750000000E+00],
[0.6250000000E+00, 0.3750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.6250000000E+00, 0.8750000000E+00],
[0.6250000000E+00, 0.8750000000E+00, 0.1250000000E+00],
[0.3750000000E+00, 0.1250000000E+00, 0.6250000000E+00],
[0.3750000000E+00, 0.3750000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.6250000000E+00, 0.1250000000E+00],
[0.3750000000E+00, 0.8750000000E+00, 0.3750000000E+00],
[0.1250000000E+00, 0.1250000000E+00, 0.8750000000E+00],
[0.1250000000E+00, 0.3750000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.6250000000E+00, 0.3750000000E+00],
[0.1250000000E+00, 0.8750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.3750000000E+00, 0.1250000000E+00],
[0.6250000000E+00, 0.6250000000E+00, 0.3750000000E+00],
[0.6250000000E+00, 0.8750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.1250000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.3750000000E+00, 0.3750000000E+00],
[0.3750000000E+00, 0.6250000000E+00, 0.6250000000E+00],
[0.3750000000E+00, 0.8750000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.1250000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.3750000000E+00, 0.6250000000E+00],
[0.1250000000E+00, 0.6250000000E+00, 0.8750000000E+00],
[0.1250000000E+00, 0.8750000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.1250000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.3750000000E+00, 0.8750000000E+00],
[0.8750000000E+00, 0.6250000000E+00, 0.1250000000E+00],
[0.8750000000E+00, 0.8750000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.1250000000E+00, 0.6250000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.2500000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.5000000000E+00, 0.5000000000E+00],
[0.0000000000E+00, 0.7500000000E+00, 0.7500000000E+00],
[0.7500000000E+00, 0.0000000000E+00, 0.2500000000E+00],
[0.7500000000E+00, 0.2500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.5000000000E+00, 0.7500000000E+00],
[0.7500000000E+00, 0.7500000000E+00, 0.0000000000E+00],
[0.5000000000E+00, 0.0000000000E+00, 0.5000000000E+00],
[0.5000000000E+00, 0.2500000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.5000000000E+00, 0.0000000000E+00],
[0.5000000000E+00, 0.7500000000E+00, 0.2500000000E+00],
[0.2500000000E+00, 0.0000000000E+00, 0.7500000000E+00],
[0.2500000000E+00, 0.2500000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.5000000000E+00, 0.2500000000E+00],
[0.2500000000E+00, 0.7500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.2500000000E+00, 0.0000000000E+00],
[0.7500000000E+00, 0.5000000000E+00, 0.2500000000E+00],
[0.7500000000E+00, 0.7500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.0000000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.2500000000E+00, 0.2500000000E+00],
[0.5000000000E+00, 0.5000000000E+00, 0.5000000000E+00],
[0.5000000000E+00, 0.7500000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.0000000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.2500000000E+00, 0.5000000000E+00],
[0.2500000000E+00, 0.5000000000E+00, 0.7500000000E+00],
[0.2500000000E+00, 0.7500000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.0000000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.2500000000E+00, 0.7500000000E+00],
[0.0000000000E+00, 0.5000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.7500000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.5000000000E+00]], dtype=float))
si.set_cell(np.array([
[0.8764470533E+01, 0.0000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.8764470533E+01, 0.0000000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.8764470533E+01]], dtype=float))
sic = Atoms('Si32C32')
sic.set_scaled_positions(np.array([
[0.8750000000E+00, 0.1250000000E+00, 0.1250000000E+00],
[0.8750000000E+00, 0.3750000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.6250000000E+00, 0.6250000000E+00],
[0.8750000000E+00, 0.8750000000E+00, 0.8750000000E+00],
[0.6250000000E+00, 0.1250000000E+00, 0.3750000000E+00],
[0.6250000000E+00, 0.3750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.6250000000E+00, 0.8750000000E+00],
[0.6250000000E+00, 0.8750000000E+00, 0.1250000000E+00],
[0.3750000000E+00, 0.1250000000E+00, 0.6250000000E+00],
[0.3750000000E+00, 0.3750000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.6250000000E+00, 0.1250000000E+00],
[0.3750000000E+00, 0.8750000000E+00, 0.3750000000E+00],
[0.1250000000E+00, 0.1250000000E+00, 0.8750000000E+00],
[0.1250000000E+00, 0.3750000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.6250000000E+00, 0.3750000000E+00],
[0.1250000000E+00, 0.8750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.3750000000E+00, 0.1250000000E+00],
[0.6250000000E+00, 0.6250000000E+00, 0.3750000000E+00],
[0.6250000000E+00, 0.8750000000E+00, 0.6250000000E+00],
[0.6250000000E+00, 0.1250000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.3750000000E+00, 0.3750000000E+00],
[0.3750000000E+00, 0.6250000000E+00, 0.6250000000E+00],
[0.3750000000E+00, 0.8750000000E+00, 0.8750000000E+00],
[0.3750000000E+00, 0.1250000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.3750000000E+00, 0.6250000000E+00],
[0.1250000000E+00, 0.6250000000E+00, 0.8750000000E+00],
[0.1250000000E+00, 0.8750000000E+00, 0.1250000000E+00],
[0.1250000000E+00, 0.1250000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.3750000000E+00, 0.8750000000E+00],
[0.8750000000E+00, 0.6250000000E+00, 0.1250000000E+00],
[0.8750000000E+00, 0.8750000000E+00, 0.3750000000E+00],
[0.8750000000E+00, 0.1250000000E+00, 0.6250000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.2500000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.5000000000E+00, 0.5000000000E+00],
[0.0000000000E+00, 0.7500000000E+00, 0.7500000000E+00],
[0.7500000000E+00, 0.0000000000E+00, 0.2500000000E+00],
[0.7500000000E+00, 0.2500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.5000000000E+00, 0.7500000000E+00],
[0.7500000000E+00, 0.7500000000E+00, 0.0000000000E+00],
[0.5000000000E+00, 0.0000000000E+00, 0.5000000000E+00],
[0.5000000000E+00, 0.2500000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.5000000000E+00, 0.0000000000E+00],
[0.5000000000E+00, 0.7500000000E+00, 0.2500000000E+00],
[0.2500000000E+00, 0.0000000000E+00, 0.7500000000E+00],
[0.2500000000E+00, 0.2500000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.5000000000E+00, 0.2500000000E+00],
[0.2500000000E+00, 0.7500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.2500000000E+00, 0.0000000000E+00],
[0.7500000000E+00, 0.5000000000E+00, 0.2500000000E+00],
[0.7500000000E+00, 0.7500000000E+00, 0.5000000000E+00],
[0.7500000000E+00, 0.0000000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.2500000000E+00, 0.2500000000E+00],
[0.5000000000E+00, 0.5000000000E+00, 0.5000000000E+00],
[0.5000000000E+00, 0.7500000000E+00, 0.7500000000E+00],
[0.5000000000E+00, 0.0000000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.2500000000E+00, 0.5000000000E+00],
[0.2500000000E+00, 0.5000000000E+00, 0.7500000000E+00],
[0.2500000000E+00, 0.7500000000E+00, 0.0000000000E+00],
[0.2500000000E+00, 0.0000000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.2500000000E+00, 0.7500000000E+00],
[0.0000000000E+00, 0.5000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.7500000000E+00, 0.2500000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.5000000000E+00]], dtype=float))
sic.set_cell(np.array([
[0.8764470533E+01, 0.0000000000E+00, 0.0000000000E+00],
[0.0000000000E+00, 0.8764470533E+01, 0.0000000000E+00],
[0.0000000000E+00, 0.0000000000E+00, 0.8764470533E+01]], dtype=float))
cu = Atoms('Cu')
cu.set_positions(np.array([[0.0, 0.0, 0.0]], dtype=float))
cu.set_cell(np.array([[0.0, 1.8, 1.8], [1.8, 0.0, 1.8], [1.8, 1.8, 0.0]],
dtype=float))
atoms = [si, sic, cu]
return atoms
def get_properties_byatoms(atoms, nprops, atomic):
'''Generates dummy properties for regression testing.
Args:
atoms (ASE atoms list): list of ASE Atoms objects
nprops (int): number of atomic or system-wide properties
atomic (bool): true, if atomic properties are desired
Returns:
props (list or 2darray): atomic or system wide-properties
'''
# fix random seed for reproduction purposes
np.random.seed(42)
if atomic:
props = []
for atom in atoms:
natom = len(atom)
props.append(np.random.random((natom, nprops)))
else:
props = np.random.random((len(atoms), nprops))
return props
def get_properties_byfeatures(features, nprops, atomic):
'''Generates dummy properties for regression testing.
Args:
features (list): list of external atomic input features
nprops (int): number of atomic or system-wide properties
atomic (bool): true, if atomic properties are desired
'''
# fix random seed for reproduction purposes
np.random.seed(42)
if atomic:
props = []
for feature in features:
natom = np.shape(feature)[1]
props.append(np.random.random((natom, nprops)))
else:
props = np.random.random((len(features), nprops))
return props
def get_atomicweights_byatoms(atoms):
'''Generates dummy properties for regression testing.
Args:
atoms (ASE atoms list): list of ASE Atoms objects
Returns:
weights (list): atomic gradient weighting
'''
# fix random seed for reproduction purposes
np.random.seed(42)
weights = []
for atom in atoms:
natom = len(atom)
weights.append(np.asfarray(np.random.randint(1, 100, natom, dtype=int)))
return weights
def get_batomicweights_byatoms(atoms):
'''Generates dummy properties for regression testing.
Args:
atoms (ASE atoms list): list of ASE Atoms objects
Returns:
weights (list): atomic gradient weighting
'''
# fix random seed for reproduction purposes
np.random.seed(42)
sample = [True, False]
weights = []
for atom in atoms:
natom = len(atom)
weights.append(np.random.choice(sample, size=natom))
return weights
|
<gh_stars>1-10
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from lcquad_test import Orchestrator
from learning.classifier.svmclassifier import SVMClassifier
from parser.lc_quad import LC_QaudParser
from parser.lc_quad_linked import LC_Qaud_Linked
from parser.qald import Qald
if __name__ == "__main__":
parser = LC_QaudParser()
classifier1 = SVMClassifier('./output/question_type_classifier/svm.model')
classifier2 = SVMClassifier('./output/double_relation_classifier/svm.model')
query_builder = Orchestrator(None, classifier1, classifier2, parser, None, auto_train=True)
print("train_question_classifier")
scores = query_builder.train_question_classifier(file_path="./data/LC-QUAD/data.json", test_size=0.8)
print(scores)
y_pred = query_builder.question_classifier.predict(query_builder.X_test)
print(accuracy_score(query_builder.y_test, y_pred))
print(classification_report(query_builder.y_test, y_pred, digits=3))
ds = LC_Qaud_Linked(path="./data/LC-QUAD/linked_test.json")
ds.load()
ds.parse()
lcquad = []
lc_y = []
for qapair in ds.qapairs:
lcquad.append(qapair.question.text)
if "COUNT(" in qapair.sparql.query:
lc_y.append(2)
elif "ASK" in qapair.sparql.query:
lc_y.append(1)
else:
lc_y.append(0)
lc_y = np.array(lc_y)
print('LIST: ', sum(lc_y == 0))
print('ASK: ', sum(lc_y == 1))
print('COUNT: ', sum(lc_y == 2))
np.savetxt('lcquad_question_type.csv', lc_y, delimiter=',')
lc_pred = query_builder.question_classifier.predict(lcquad)
print('LC-QUAD question_classifier')
print(accuracy_score(lc_y, lc_pred))
print(classification_report(lc_y, lc_pred, digits=4))
classes = ['List', 'Count', 'Boolean']
cm = confusion_matrix(lc_y, lc_pred)
print('Before Normalization')
print(cm)
print('Accuracy by class: ')
c_acc = cm.diagonal() / cm.sum(axis=1)
print(c_acc)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('After Normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig('confusion_matrix_lcquad.png')
q_ds = Qald(Qald.qald_7)
q_ds.load()
q_ds.parse()
qald = []
q_y = []
for qapair in q_ds.qapairs:
qald.append(qapair.question.text)
if "COUNT(" in qapair.sparql.query:
q_y.append(2)
elif "ASK" in qapair.sparql.query:
q_y.append(1)
x = ascii(qapair.sparql.query.replace('\n', ' ').replace('\t', ' '))
print(x)
else:
q_y.append(0)
q_y = np.array(q_y)
print('LIST: ', sum(q_y == 0))
print('ASK: ', sum(q_y == 1))
print('COUNT: ', sum(q_y == 2))
np.savetxt('qald_question_type.csv', q_y, delimiter=',')
q_pred = query_builder.question_classifier.predict(qald)
print('QALD question_classifier')
print(accuracy_score(q_y, q_pred))
print(classification_report(q_y, q_pred, digits=4))
classes = ['List', 'Count', 'Boolean']
cm = confusion_matrix(q_y, q_pred)
print('Before Normalization')
print(cm)
print('Accuracy by class: ')
c_acc = cm.diagonal() / cm.sum(axis=1)
print(c_acc)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('After Normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig('confusion_matrix_qald.png')
ds = LC_Qaud_Linked(path="./data/LC-QUAD/linked_answer.json")
ds.load()
ds.parse()
lcquad = []
lc_y = []
for qapair in ds.qapairs:
lcquad.append(qapair.question.text)
if "COUNT(" in qapair.sparql.query:
lc_y.append(2)
elif "ASK" in qapair.sparql.query:
lc_y.append(1)
else:
lc_y.append(0)
lc_y = np.array(lc_y)
print('LIST: ', sum(lc_y == 0))
print('ASK: ', sum(lc_y == 1))
print('COUNT: ', sum(lc_y == 2))
np.savetxt('lcquad_question_type_all.csv', lc_y, delimiter=',')
lc_pred = query_builder.question_classifier.predict(lcquad)
print('LC-QUAD question_classifier')
print(accuracy_score(lc_y, lc_pred))
print(classification_report(lc_y, lc_pred, digits=4))
classes = ['List', 'Count', 'Boolean']
cm = confusion_matrix(lc_y, lc_pred)
print('Before Normalization')
print(cm)
print('Accuracy by class: ')
c_acc = cm.diagonal() / cm.sum(axis=1)
print(c_acc)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('After Normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig('confusion_matrix_lcquad_all.png')
|
"""
File name: evaluate_models_performance.py
Author: <NAME>
Date created: 21.05.2018
This is the main script to evaluate models performance. It reads the implementation
and path options from the config.yml script. It loads the training and test sets,
loads the specified trained model(s) and calculates the training and test performance
scores using the specified performance measure(s). It repeats the same process for
the specified number of splits. Finally, it saves the calculated performance scores
as csv files.
"""
import os
import keras
import numpy as np
import pandas as pd
import yaml
from utils.dataset import *
from utils.models import *
########################################################################################
#### ENVIRONMENT AND SESSION SET UP ####################################################
########################################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
# Prevent usage of GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = ""
########################################################################################
###### ASSIGN CONFIGURATION VARIABLES ##################################################
########################################################################################
# You need to add constructor in order to be able to use the join command in the yaml
# file
def join(loader, node):
seq = loader.construct_sequence(node)
return "".join(str(i) for i in seq)
yaml.add_constructor("!join", join)
# Read the config file
cfg = yaml.load(open("config.yml", "r"), Loader=yaml.Loader)
# Assign variables to use
dataset_name = cfg["dataset name"]
dataset_path = cfg["data path"]
splits_path = cfg["splits path"]
number_of_splits = cfg["number of splits"]
impute_data = cfg["impute data"]
models_to_use = cfg["models to use"]
subsampling_types = cfg["subsampling to use"]
fixed_parameters = cfg["fixed hyperparameters"]
performance_scores = cfg["final performance measures"]
models_folder = cfg["models folder path"]
scores_folder = cfg["scores folder path"]
########################################################################################
###### GET TRAINING AND TEST DATA ######################################################
########################################################################################
# Load dataset
data = ClinicalDataset(name=dataset_name, path=dataset_path)
# Load the training-test splits as a class instance variable using the
# assign_train_test_sets function with only the path to the splits file
data.assign_train_test_splits(path=splits_path)
# Preprocess data
if impute_data:
data.impute(number_of_splits=number_of_splits, imputation_type="mean/mode")
data.normalize(number_of_splits=number_of_splits)
print("Number of patients in dataset: " + str(len(data.X)))
#########################################################################################
###### EVALUATE MODELS AND SAVE PERFORMANCE SCORES #####################################
#########################################################################################
# Check if the scores folder path to save already exists. If not, create folder.
if not os.path.exists(scores_folder):
os.makedirs(scores_folder)
# Iterate over subsampling types
for subs in subsampling_types:
data.subsample_training_sets(
number_of_splits=number_of_splits, subsampling_type=subs
)
# Iterate over models.
for mdl in models_to_use:
file_suffix = ".h5" if mdl == "MLP" else ".pkl"
# Iterate over performance scores.
for scr in performance_scores:
path_to_scores = (
f"{scores_folder}/{mdl}_{scr}_scores_{subs}_subsampling.csv"
)
# Check if the scores file path already exists. If it exists, return to
# previous loop.
if not os.path.isfile(path_to_scores):
# Create temporary variable to store the calculated scores from each split.
tmp_scores = np.zeros((number_of_splits, 2))
# Iterate over splits.
for i in range(number_of_splits):
# Get main path of saved models.
path_to_model = f"{models_folder}/{mdl}_model_{subs}_subsampling_split_{i+1}{file_suffix}"
# Create model instance for the current split and load the model of
# the current split.
model = eval(mdl)(
name=mdl, dataset=data.splits[i], fixed_params=fixed_parameters
)
model.load_model(path=path_to_model)
# Calculate the performance score on the current training and test
# data.
score_tr, score_te = model.evaluate_performance(score_name=scr)
# Dump training and test scores of the current split to the
# temporary variable.
tmp_scores[i, 0] = score_tr
tmp_scores[i, 1] = score_te
# Clear keras session to prevent session from slowing down due to
# loaded models.
keras.backend.clear_session()
# Store scores to dataframe and save.
df_scores = pd.DataFrame(tmp_scores, columns=["training", "test"])
df_scores.to_csv(path_to_scores)
print(f"Saved {scr} scores for {mdl} model with {subs} subsampling.")
|
# Data extracted using: https://ij.imjoy.io/
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema
from scipy.constants import pi as π
import uncertainties as unc
from uncertainties import ufloat
distanceSS,grayValueSS = np.loadtxt(r"2021.11.18 Diffraction - Computing/single-slit-profile.csv",delimiter=",",skiprows=1,unpack=True,max_rows=1276)
distanceDS,grayValueDS = np.loadtxt(r"2021.11.18 Diffraction - Computing/double-slit-profile-1.csv",delimiter=",",skiprows=1,unpack=True,max_rows=1276)
font = {'fontname':'CMU Serif'} # Assign font parameters
fontAxesTicks = {'size':7}
def functionSS(x,I0,a,λ,f): # Functions found in lab manuals (equation 1.1, 1.10)
return I0*(((np.sin((π*a*x)/(λ*f)))/((π*a*x)/(λ*f)))**2) # SS = Single Slit, DS = Double Slit
def functionDS(x,I0,a,λ,f,d):
return 4*I0*(((np.sin((π*a*x)/(λ*f)))/((π*a*x)/(λ*f)))**2*((np.cos(π*d*x)/(λ*f))**2))
def functionSSCorrected(x,I0,a,λ,f,φ,k): # Perform manual corrections to curve_fit line of best fit for Single Slit and Double Slit
return k*I0*(((np.sin((π*a*(x-φ))/(λ*f)))/((π*a*(x-φ))/(λ*f)))**2) # "φ" denotes phase shift, "k" denotes vertical scale factor, "ω" denotes horizontal scale factor
def functionDSCorrected(x,I0,a,λ,f,d,φ,k,ω): # Phase Shift Formula: k(ωx - φ)
return 4*I0*k*(((np.sin((π*a*(ω*x-φ))/(λ*f)))/((π*a*(ω*x-φ))/(λ*f)))**2*((np.cos(π*d*(ω*x-φ))/(λ*f))**2))
#def slitSeparationSS:
#def slitWidthDS:
## SINGLE SLIT EXPERIMENT
distanceSS = 0.0254*distanceSS - (np.mean(0.0254*distanceSS)) # Convert to distance from central maxima, (m)
grayValueSS = grayValueSS/np.amax(grayValueSS)
curvefitSS,cov_curvefitSS = curve_fit(functionSS, distanceSS, grayValueSS) # curve_fit finds line of best fit
plt.xlabel("Distance from Central Maximum Peak (x) / m", **font) # Label axes, add titles and error bars
plt.ylabel("Pixel Intensity / Relative", **font)
plt.xticks(**font, **fontAxesTicks)
plt.yticks(**font, **fontAxesTicks)
plt.title("Single Slit, Profile Plot", **font)
plt.errorbar(distanceSS, grayValueSS, yerr=0.001,xerr=0.0008,ls='',mew=1.5,ms=3,capsize=3) # Plots uncertainties in points
plt.plot(distanceSS, functionSSCorrected(distanceSS, *curvefitSS, -0.0025,1.03),'r') # (φ,k)
plt.show() # Show the graphs
pointsY,localminX,increments = [],[],np.linspace(-0.168672,0.168672,num=10000) # Numerical method to find minimum of curve_fit
for i in range(len(increments)): # Loop for values (accurate to 4.d.p)
pointsY.append(functionSSCorrected(increments[i],curvefitSS[0],curvefitSS[1],curvefitSS[2],curvefitSS[3],-0.0025,1.03)) # (φ,k)
pointsY = np.array(pointsY) # Convert to a Numpy array
localminY = argrelextrema(pointsY, np.less) # Find local minima using argelextrema
for j in range(len(localminY)): # Loop around for number of local minima found
localminX.append(increments[localminY[j]]) # Add the x values of these minima to localminX
localminX = np.reshape(localminX,(4,1))
minimaOrderSS = [-2,-1,1,2] # Order of minima in graph
polyfitSS,cov_polyfitSS = np.polyfit(minimaOrderSS,localminX,1,cov=True) # Polyfit a linear line of best fit
plt.plot(minimaOrderSS, localminX, 'x') # Plot the points onto the linear plot
plt.plot(minimaOrderSS, (polyfitSS[0]*minimaOrderSS+polyfitSS[1])) # Plot the line of best fit
plt.xlabel("Order of Minima (n)", **font) # Label axes, add titles, errorbars
plt.ylabel("Displacement from Central Maximum Peak (x) / m", **font)
plt.xticks(**font, **fontAxesTicks)
plt.yticks(**font, **fontAxesTicks)
#plt.errorbar(distanceSS, grayValueSS, yerr=0.001,xerr=0.0008,ls='',mew=1.5,ms=3,capsize=3) # Plots uncertainties in linear plot points
plt.title("Single Slit, Minima Plot", **font)
plt.show() # Show the graphs
## DOUBLE-SLIT EXPERIMENT
mean = np.mean(0.0254*distanceDS)
distanceDS = 0.0254*distanceDS - (np.mean(0.0254*distanceDS)) - 0.00254
grayValueDS = 10.49*(grayValueDS/(np.amax(grayValueDS)))
#grayValueDS = grayValueDS/np.amax(grayValueDS)
curvefitDS,cov_curvefitDS = curve_fit(functionDS,distanceDS,grayValueDS,maxfev=1000000)
plt.xlabel("Distance (x) / m", **font)
plt.ylabel("Pixel Intensity / Relative", **font)
plt.xticks(**font, **fontAxesTicks)
plt.yticks(**font, **fontAxesTicks, color='white')
plt.title("Double Slit, Profile Plot", **font)
plt.errorbar(distanceDS, grayValueDS, yerr=0.001,xerr=0.0008,ls='',mew=1.5,ms=3,capsize=3) # Plots uncertainties in points
plt.plot(distanceDS, functionDSCorrected(distanceDS, *curvefitDS,-0.004,1.2,1.03),'r',color='red',ms=2,mew=3)
plt.show()
pointsY,localminX,increments = [],[],np.linspace(-0.171212,0.171212,num=10000)
for i in range(len(increments)): # Loop for values (accurate to 4.d.p)
pointsY.append(functionDSCorrected(increments[i],curvefitDS[0],curvefitDS[1],curvefitDS[2],curvefitDS[3],curvefitDS[4],-0.004,1.2,0.97)) # (φ,k,ω)
pointsY = np.array(pointsY) # Convert to a Numpy array
localminY = argrelextrema(pointsY, np.less) # Use Dani's method for finding local minima
for j in range(len(localminY)): # Loop around for number of local minima found
localminX.append(increments[localminY[j]]) # Add the x values of these minima to localminX
localminX = np.reshape(localminX,(16,1))
minimaOrderDS = [-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8] # Order of minima in graph
plt.plot(minimaOrderDS, localminX, 'x') # Plot the points onto the linear plot
polyfitDS,cov_polyfitDS = np.polyfit(minimaOrderDS, localminX, 1, cov=True) # Plot the line of best fit
plt.plot(minimaOrderDS, (polyfitDS[0]*minimaOrderDS+polyfitDS[1]))
plt.xlabel("Order of mimima (n)", **font)
plt.ylabel("Displacement from Central Maximum Peak (x) / m", **font)
plt.xticks(**font, **fontAxesTicks)
plt.yticks(**font, **fontAxesTicks)
#plt.errorbar(distanceDS, grayValueDS, yerr=0.001,xerr=0.0008,ls='',mew=1.5,ms=3,capsize=3) # Plots uncertainties in linear plot points
plt.title("Double Slit, Minima Plot", **font)
plt.show()
## DATA ANALYSIS: SINGLE SLIT
print('\nSINGLE SLIT DIFFRACTION:')
print('Gradient: %.3e' %(polyfitSS[0]*1.1)) # Displays the gradient of linear plot, with manual corrections (i.e. k) taken into account
print('The slit seperation (d) / m: %.3e' %(float(670e-9*0.15/(polyfitSS[0])))) # Calculates the subsequent slit separation / mm
## DATA ANALYSIS: DOUBLE SLIT
print('\nDOUBLE SLIT DIFFRACTION:')
print('Gradient: %.3e' %(polyfitDS[0]*1.2/0.97)) # Displays the gradient of linear plot, with manual corrections (i.e. k,ω) taken into account
print('The slit seperation (d) / m: %.3e' %(float(670e-9*0.15/(polyfitDS[0]))))
## UNCERTAINTY PROPAGATION
#σcurvefitSS_I0,σcurvefitSS_a,σcurvefitSS_λ,σcurvefitSS_f = np.sqrt(float(cov_curvefitSS[0][0])),np.sqrt(float(cov_curvefitSS[1][1])),np.sqrt(float(cov_curvefitSS[2][2])),np.sqrt(float(cov_curvefitSS[3][3]))
#σpolyfitSS_X,σpolyfitSS_Y = np.sqrt(float(cov_polyfitSS[0][0])),np.sqrt(float(cov_polyfitSS[1][1]))
#σcurvefitDS_I0,σcurvefitDS_a,σcurvefitDS_λ,σcurvefitDS_f,σcurvefitDS_d = np.sqrt(float(cov_curvefitDS[0][0])),np.sqrt(float(cov_curvefitDS[1][1])),np.sqrt(float(cov_curvefitDS[2][2])),np.sqrt(float(cov_curvefitDS[3][3])),np.sqrt(float(cov_curvefitDS[4][4]))
#σpolyfitDS_X,σpolyfitDS_Y = np.sqrt(float(cov_polyfitDS[0][0])),np.sqrt(float(cov_polyfitDS[1][1]))
uf_λ = ufloat(670e-9,1e-9) # N.B: σ from absolute error (xᵢ-x) of 1e-9 m is also 1e-9 m
uf_f = ufloat(500e-3,0.001) # 670±1nm, f = 500±5 (a unitless ratio)
## Yaar's value = 0.15
uf_optimal_I0SS = ufloat(curvefitSS[0],np.sqrt(cov_curvefitSS[0,0]))
uf_optimal_aSS = ufloat(curvefitSS[1],np.sqrt(cov_curvefitSS[1,1]))
uf_optimal_λSS = ufloat(curvefitSS[2],np.sqrt(cov_curvefitSS[2,2]))
uf_optimal_fSS = ufloat(curvefitSS[3],np.sqrt(cov_curvefitSS[3,3]))
uf_gradientSS = ufloat(polyfitSS[0],np.sqrt(cov_polyfitSS[0,0]))
uf_yInterceptSS = ufloat(polyfitSS[1],np.sqrt(cov_polyfitSS[1,1]))
uf_optimal_I0DS = ufloat(curvefitDS[0],np.sqrt(cov_curvefitDS[0,0]))
uf_optimal_aDS = ufloat(curvefitDS[1],np.sqrt(cov_curvefitDS[1,1]))
uf_optimal_λDS = ufloat(curvefitDS[2],np.sqrt(cov_curvefitDS[2,2]))
uf_optimal_fDS = ufloat(curvefitDS[3],np.sqrt(cov_curvefitDS[3,3]))
uf_optimal_dDS = ufloat(curvefitDS[4],np.sqrt(cov_curvefitDS[4,4]))
uf_gradientDS = ufloat(polyfitDS[0],np.sqrt(cov_polyfitDS[0,0]))
uf_yInterceptDS = ufloat(polyfitDS[1],np.sqrt(cov_polyfitDS[1,1]))
uf_aSS = (uf_λ*uf_f)/uf_gradientSS
uf_dDS = (uf_λ*uf_f)/uf_gradientDS
print("\n Single Slit Diffraction:")
print("gradient: ",uf_gradientSS)
print("y-intercept: ",uf_yInterceptSS)
print("a = (",uf_aSS,") m")
print("\n Double Slit Diffraction:")
print("gradient: ",uf_gradientDS)
print("y-intercept: ",uf_yInterceptDS)
print("d = (",uf_dDS,") m")
unc_curvefitSS = uf_optimal_I0DS*(((np.sin((π*uf_optimal_aSS)/(uf_optimal_λSS*uf_optimal_fSS)))/((π*uf_optimal_aSS)/(uf_optimal_λSS*uf_optimal_fSS)))**2)
unc_curvefitDS = 4*uf_optimal_I0DS*(((np.sin((π*uf_optimal_aDS)/(uf_optimal_λDS*uf_optimal_fDS)))/((π*uf_optimal_aSS)/(uf_optimal_λDS*uf_optimal_fDS)))**2*((np.cos(π*uf_optimal_dDS)/(uf_optimal_λDS*uf_optimal_fDS))**2))
## COVARIANCE MATRICES
''' print(curvefitSS)
print(polyfitSS)
print(curvefitDS)
print(polyfitDS)
print(cov_curvefitSS)
print(cov_polyfitSS)
print(cov_curvefitDS)
print(cov_polyfitDS) ''' |
<filename>transtory/shanghaimetro/trainstats.py
import os
import time
from sqlalchemy import func
from .configs import logger
from .publicdata import ShmPublicDataApp, get_public_data_app
from .dbdefs import Route, Departure, Arrival
from .dbdefs import Train, TrainType, Line
from .dbops import ShmDbOps, get_shm_db_ops
from .dbops import ShmSysConfigs, get_configs
from .dbops import DateTimeHelper, get_datetime_helper
class ShmTrainStats(object):
def __init__(self):
self.configs = get_configs()
self.save_folder = self.configs.stats_folder
self.dbops: ShmDbOps = get_shm_db_ops()
self.session = self.dbops.session
self.data_app: ShmPublicDataApp = get_public_data_app()
self.train_fields = ['seq', 'train', 'status', 'model', 'count', 'manufacturer']
self.train_type_fields = ['seq', 'type', 'taken', 'miss', 'total', 'ratio']
def _get_stats_full_path(self, fname):
return os.path.sep.join([self.save_folder, fname])
@staticmethod
def _write_lists_to_csv(fout, val_list):
"""Goal of the function is to handle the None values properly
"""
for val in val_list:
if val is None:
fout.write("||,")
elif isinstance(val, int):
fout.write("{:d},".format(val))
elif isinstance(val, str):
fout.write("|{:s}|,".format(val))
else:
raise Exception('Unsupported data type in csv writer.')
def _def_validate_train_list_query(self):
"""We need to exclude converted train sets (train status 2) from validation
"""
columns = ["line", "sn", "type"]
query = self.session.query(Line.name, Train.sn, TrainType.name).filter(Train.status != 2)
query = query.join(Line.trains).join(Train.train_type).order_by(Train.sn)
return columns, query
def validate_train_type(self):
validate_pass = True
logger.info('Begin validating train types.')
start_time = time.perf_counter()
columns, query = self._def_validate_train_list_query()
for train in query.all():
train_sn, type_from_db = train[1], train[2]
# line, seq = self.data_app.get_line_and_seq_from_train_sn(train_sn)
type_from_app = self.data_app.get_type_of_train(train_sn)
logger.info('Train {:s}: {:s} passed'.format(train_sn, type_from_db))
if type_from_app != train[2]:
validate_pass = False
logger.warning('Train type from shanghai metro public data app and database do NOT match!')
logger.warning(' for train {:s}: app {:s}, database {:s}'.format(train_sn, type_from_app,
type_from_db))
if validate_pass:
logger.info("All trains have matching type with public data app match.")
else:
logger.warning("Validation of train type failed.")
logger.info("Finished validating train types (time used is {:f}s)".format(time.perf_counter() - start_time))
def _yield_train_list_entries(self):
query = self.session.query(func.count(Route.id), Train, TrainType).join(Route.train).join(Train.train_type)
query = query.group_by(Train.id).order_by(Train.sn)
for count, train, train_type in query.all():
results = list()
results.append(train.sn)
results.append(train.status)
results.append(train_type.name)
results.append(count)
results.append(train_type.maker)
yield results
def save_train_list_csv(self):
logger.info('Begin saving all planes.')
start_time = time.perf_counter()
with open(self._get_stats_full_path('trains.csv'), 'w', encoding='utf8') as fout:
fout.write('\ufeff')
[fout.write('{:s},'.format(x)) for x in self.train_fields]
fout.write('\n')
for idx, result in enumerate(self._yield_train_list_entries()):
fout.write('{:d},'.format(idx + 1))
self._write_lists_to_csv(fout, result)
fout.write('\n')
logger.info("Finished saving all routes (time used is {:f}s)".format(time.perf_counter() - start_time))
def _yield_train_type_list_entries(self):
all_train_types = self.data_app.get_train_type_list()
query = self.session.query(Train.train_type_id, func.count('*').label("count")).group_by(Train.train_type_id)
stmt = query.subquery()
query = self.session.query(TrainType, stmt.c.count).outerjoin(stmt, TrainType.id == stmt.c.train_type_id)
query = query.order_by(TrainType.name)
all_type_total, all_type_taken = 0, 0
for train_type, count in query.all():
name = train_type.name
count = 0 if count is None else count
total = int(all_train_types[name])
results = list()
results.append(name)
results.append(count)
results.append(total - count)
results.append(total)
results.append('{:d}%'.format(int(count*100.0/total)))
all_type_total += total
all_type_taken += count
yield results
results = list()
results.append('Sum')
results.append(all_type_taken)
results.append(all_type_total - all_type_taken)
results.append(all_type_total)
results.append('{:d}%'.format(int(all_type_taken*100.0/all_type_total)))
yield results
def save_train_type_list_csv(self):
logger.info('Begin saving all planes.')
start_time = time.perf_counter()
with open(self._get_stats_full_path("train_type.csv"), "w", encoding="utf8") as fout:
fout.write('\ufeff')
[fout.write('{:s},'.format(x)) for x in self.train_type_fields]
fout.write('\n')
for idx, result in enumerate(self._yield_train_type_list_entries()):
fout.write('{:d},'.format(idx + 1))
self._write_lists_to_csv(fout, result)
fout.write('\n')
logger.info('Finished saving all routes (time used is {:f}s)'.format(time.perf_counter() - start_time))
def _yield_line_list_entries(self):
query = self.session.query(func.count(Route.id), Train, TrainType).join(Route.train).join(Train.train_type)
query = query.group_by(Train.id).order_by(Train.sn)
for count, train, train_type in query.all():
results = list()
results.append(train.sn)
results.append(train_type.name)
results.append(count)
results.append(train_type.maker)
yield results
def save_line_list_csv(self):
logger.info('Begin saving all planes.')
start_time = time.perf_counter()
with open(self._get_stats_full_path('trains.csv'), 'w', encoding='utf8') as fout:
fout.write('\ufeff')
[fout.write('|{:s}|,'.format(x)) for x in self.train_fields]
fout.write('\n')
for idx, result in enumerate(self._yield_train_list_entries()):
fout.write('{:d},'.format(idx + 1))
self._write_lists_to_csv(fout, result)
fout.write('\n')
logger.info("Finished saving all routes (time used is {:f}s)".format(time.perf_counter() - start_time))
def generate_unmet_train_str(self):
train_set = set()
for train_tp in self.session.query(Train.sn).all():
train = train_tp[0]
train_set.add(train)
train_df = self.data_app.get_train_df()
line_list = self.data_app.get_line_list()
output_str = ''
for line in line_list:
output_str += 'Line {:s}: '.format(line)
train_of_line = train_df[train_df['line'] == line]
for _, sr_train in train_of_line.iterrows():
train_sn = sr_train['train']
if ('-' not in train_sn) and (train_sn not in train_set):
_, train_seq = self.data_app.get_line_and_seq_from_train_sn(sr_train['train'])
output_str += '{:d}, '.format(train_seq)
output_str += '\n'
return output_str
def save_all_stats(self):
self.validate_train_type()
self.save_train_list_csv()
self.save_train_type_list_csv()
# self.save_line_list_csv()
print(self.generate_unmet_train_str())
|
<reponame>entn-at/clari_wavenet_vocoder
# coding: utf-8
from __future__ import with_statement, print_function, absolute_import
import math
import librosa
import numpy as np
from hparams import hparams
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from wavenet_vocoder.modules import Embedding, Conv1d1x1, ResidualConv1dGLU, ConvTranspose2d
from train import build_model
from wavenet_vocoder import receptive_field_size
from wavenet_vocoder.wavenet import _expand_global_features, WaveNet
from wavenet_vocoder.mixture import sample_from_discretized_mix_logistic
from wavenet_vocoder.upsample import UpSampleConv
class ClariWaveNet(nn.Module):
def __init__(self, out_channels=2, layers=20, stacks=2,
residual_channels=64,
iaf_layer_sizes=[10, 10, 10, 10, 10, 10],
gate_channels=64,
kernel_size=3, dropout=1 - 0.95,
cin_channels=-1, gin_channels=-1, n_speakers=None,
weight_normalization=True,
upsample_conditional_features=False,
upsample_scales=None,
skip_out_channels=64,
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
use_skip=True,
iaf_shift=False
):
super(ClariWaveNet, self).__init__()
self.scalar_input = scalar_input
self.residual_channels = residual_channels
self.out_channels = out_channels
self.cin_channels = cin_channels
self.iaf_layers_size = iaf_layer_sizes
self.last_layers = []
self.use_skip = use_skip
self.iaf_shift = iaf_shift
assert layers % stacks == 0
layers_per_stack = layers // stacks
self.first_layers = nn.ModuleList()
self.iaf_layers = nn.ModuleList()
self.last_layers = nn.ModuleList()
for i in range(len(iaf_layer_sizes)):
if scalar_input:
self.first_layers.append(
Conv1d1x1(1, self.residual_channels))
else:
self.first_layers.append(Conv1d1x1(self.out_channels, self.residual_channels))
for iaf_layer_size in iaf_layer_sizes:
iaf_layer = nn.ModuleList()
for layer_index in range(iaf_layer_size):
dilation = 2 ** (layer_index % layers_per_stack)
conv = ResidualConv1dGLU(
residual_channels,
gate_channels,
skip_out_channels=skip_out_channels,
kernel_size=kernel_size,
bias=True,
dilation=dilation,
dropout=dropout,
cin_channels=cin_channels,
gin_channels=gin_channels,
weight_normalization=weight_normalization
)
iaf_layer.append(conv)
self.iaf_layers.append(iaf_layer)
self.last_layers.append(nn.ModuleList([
nn.ReLU(),
Conv1d1x1(skip_out_channels, residual_channels,
weight_normalization=weight_normalization) if self.use_skip else
Conv1d1x1(residual_channels, residual_channels, weight_normalization=weight_normalization),
nn.ReLU(),
Conv1d1x1(residual_channels, out_channels, weight_normalization=weight_normalization)
]))
if gin_channels > 0 and use_speaker_embedding:
assert n_speakers is not None
self.embed_speakers = Embedding(
n_speakers, gin_channels, padding_idx=None, std=0.1)
else:
self.embed_speakers = None
# Upsample conv net
if upsample_conditional_features:
self.upsample_conv = UpSampleConv()
else:
self.upsample_conv = None
self.receptive_field = receptive_field_size(layers, stacks, kernel_size)
def load_teacher_upsample_conv(self, teacher):
upsample_state_dict = teacher.upsample_conv.state_dict()
self.upsample_conv.load_state_dict(upsample_state_dict)
for param in self.upsample_conv.parameters():
param.requires_grad = False
self.upsample_conv.eval()
def has_speaker_embedding(self):
return self.embed_speakers is not None
def local_conditioning_enabled(self):
return self.cin_channels > 0
def forward(self, z, c=None, g=None, softmax=False, use_cuda=True, use_scale=False):
if c is not None and self.upsample_conv is not None:
# B x 1 x C x T
c = c.unsqueeze(1)
# B x C x T
c = self.upsample_conv(c)
c = c.squeeze(1)
assert c.size(-1) == z.size(-1)
B, _, T = z.size()
iaf_layers_len = len(self.iaf_layers_size)
if g is not None:
if self.embed_speakers is not None:
# (B x 1) -> (B x 1 x gin_channels)
g = self.embed_speakers(g.view(B, -1))
# (B x gin_channels x 1)
g = g.transpose(1, 2)
assert g.dim() == 3
# Expand global conditioning features to all time steps
g_bct = _expand_global_features(B, T, g, bct=True)
if self.iaf_shift:
z = z[:, :, len(self.iaf_layers_size):]
mu_tot = torch.zeros(z.size(), requires_grad=True)
scale_tot = torch.ones(z.size(), requires_grad=True)
if use_cuda:
mu_tot, scale_tot = mu_tot.cuda(), scale_tot.cuda()
layer = 0
original_c = c
length = z.size(-1)
z_list = []
for first_conv, iaf_layer, last_layer in zip(self.first_layers, self.iaf_layers, self.last_layers):
if self.iaf_shift:
c = original_c[:, :, layer:layer + length]
skips = None
new_z = first_conv(z)
for f in iaf_layer:
if isinstance(f, ResidualConv1dGLU):
new_z, h = f(new_z, c, g_bct)
if skips is None:
skips = h
else:
skips += h
skips *= math.sqrt(0.5)
if self.use_skip:
new_z = skips
for f in last_layer:
new_z = f(new_z)
if use_scale:
mu_s_f, scale_s_f = new_z[:, :1, :], new_z[:, 1:, :]
else:
mu_s_f, scale_s_f = new_z[:, :1, :], torch.exp(torch.clamp(new_z[:, 1:, :], min=-7)) # log_scale
# mu_s_f = torch.clamp(mu_s_f, -1, 1 - 2.0 / hparams.quantize_channels)
mu_tot = mu_s_f + mu_tot * scale_s_f
scale_tot = scale_tot * scale_s_f
z = z * scale_s_f + mu_s_f
z_list.append(z)
layer += 1
return z_list, z, mu_tot, scale_tot
|
# from https://github.com/ronghuaiyang/arcface-pytorch/blob/master/models/metrics.py
# adacos: https://github.com/4uiiurz1/pytorch-adacos/blob/master/metrics.py
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd
from torch.nn import Parameter
import math
class ArcMarginProduct(nn.Module):
r"""Implement of large margin arc distance: :
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta + m)
"""
def __init__(self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0):
super(ArcMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.ls_eps = ls_eps # label smoothing
self.W = Parameter(torch.FloatTensor(out_features, in_features))
self.reset_parameters()
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(1))
self.W.data.uniform_(-stdv, stdv)
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.W))
if label is None:
return cosine
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine.float() > 0, phi, cosine.float())
else:
phi = torch.where(cosine.float() > self.th, phi, cosine.float() - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device=label.device)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
if self.ls_eps > 0:
one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
class ArcMarginProduct2(nn.Module):
def __init__(self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0):
super(ArcMarginProduct2, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.ls_eps = ls_eps # label smoothing
self.W = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.W)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.W))
if label == None:
return cosine
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine.float() > 0, phi, cosine.float())
else:
phi = torch.where(cosine.float() > self.th, phi, cosine.float() - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device=label.device)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
if self.ls_eps > 0:
one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Helpers to plot the lightcurve of a TESS subject, given a
LightCurveCollection
"""
# so that TransitTimeSpec can be referenced in type annotation in the class itself
# see: https://stackoverflow.com/a/49872353
from __future__ import annotations
import inspect
import warnings
from pathlib import Path
import re
from types import SimpleNamespace
from memoization import cached
import xmltodict
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter, AutoMinorLocator
import matplotlib.animation as animation
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astroquery.exceptions import NoResultsWarning
from astroquery.mast import Observations
import IPython
from IPython.display import display, HTML, Audio
from ipywidgets import interactive, interactive_output, fixed
import ipywidgets as widgets
from lightkurve import LightCurveCollection, LightkurveWarning
from lightkurve.utils import TessQualityFlags
from lightkurve_ext import of_sectors
import lightkurve_ext as lke
def parse_dvs_filename(filename):
# e.g.: tess2020267090513-s0030-s0030-0000000142087638-01-00394_dvs.pdf
match = re.match(r"^tess\d+-(s\d+-s\d+)-(\d+)-(\d+)-.+_dvs[.]pdf", filename)
if not match:
return {}
sector_range, tic_id_padded, tce_num_padded = (
match.group(1),
match.group(2),
match.group(3),
)
tic_id = re.sub(r"^0+", "", tic_id_padded)
tce_num = re.sub(r"^0+", "", tce_num_padded)
# sufficient to identify one for a given TIC, less visually busy
tce_id_short = f"{sector_range}:TCE{tce_num}"
# tce_id is the format used on ExoMAT, e.g, TIC142087638S0030S0030TCE1
tce_id = f"""TIC{tic_id}{re.sub("-", "", sector_range.upper())}TCE{tce_num}"""
return dict(
tce_id=tce_id,
tce_id_short=tce_id_short,
sector_range=sector_range,
tic_id=tic_id,
tce_num=tce_num,
)
def parse_dvr_filename(filename):
match = re.match(r"^tess\d+-(s\d+-s\d+)-(\d+)-.+_dvr[.](pdf|xml)", filename)
if not match:
return {}
sector_range, tic_id_padded, file_type = (
match.group(1),
match.group(2),
match.group(3),
)
tic_id = re.sub(r"^0+", "", tic_id_padded)
return dict(sector_range=sector_range, tic_id=tic_id, file_type=file_type)
@cached
def get_dv_products_of_tic(tic_id, productSubGroupDescription, download_dir=None):
# Based on:
# - https://outerspace.stsci.edu/display/TESS/7.0+-+Tips+and+Tricks+to+Getting+TESS+Data+At+MAST
# https://github.com/spacetelescope/notebooks/blob/master/notebooks/MAST/TESS/beginner_astroquery_dv/beginner_astroquery_dv.ipynb
# Note: for TESS, tic_id (the number without TIC) is what an exact match works
# Kepler / K2 ids will need some additional processing for exact match to work.
exact_target_name = tic_id
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NoResultsWarning, message=".*No products to download.*")
obs_wanted = Observations.query_criteria(
target_name=exact_target_name,
dataproduct_type="timeseries",
obs_collection="TESS",
)
data_products = Observations.get_product_list(obs_wanted)
return Observations.filter_products(data_products, productSubGroupDescription=productSubGroupDescription)
@cached
def parse_dvr_xml(file_path):
def as_list(data):
"""Wrap an item as a list, if it's not one.
Useful for handling dict from XML where elements might be one or multiple elements"""
if type(data) is list:
return data
else:
return [data]
def param_value(model_params_dict, param_name):
param_dict = model_params_dict.get(param_name)
if param_dict is None:
return None
val_str = param_dict.get("@value")
if val_str is None:
return None
return float(val_str)
# the body
with open(file_path, "r") as f:
dvr_xml_str = f.read()
parsed = xmltodict.parse(dvr_xml_str)
planets_dict = {}
e_pr_list = as_list(parsed["dv:dvTargetResults"]["dv:planetResults"])
for e_pr in e_pr_list:
e_afit = e_pr["dv:allTransitsFit"]
planet_num = e_afit["@planetNumber"]
params_dict = {} # a temporary structure to access params internally
for mp in e_afit["dv:modelParameters"]["dv:modelParameter"]:
params_dict[mp["@name"]] = mp
# TODO: add other DV fitting parameters, odd/even test, centroid, etc.
# use the underlying xml attribute names, even thought it breaks the convention
a_planet_dict = dict(
planetNumber=planet_num,
transitEpochBtjd=param_value(params_dict, "transitEpochBtjd"),
planetRadiusEarthRadii=param_value(params_dict, "planetRadiusEarthRadii"),
transitDurationHours=param_value(params_dict, "transitDurationHours"),
orbitalPeriodDays=param_value(params_dict, "orbitalPeriodDays"),
transitDepthPpm=param_value(params_dict, "transitDepthPpm"),
minImpactParameter=param_value(params_dict, "minImpactParameter"),
)
planets_dict[planet_num] = a_planet_dict
return planets_dict
def get_tce_infos_of_tic(tic_id, download_dir=None):
def filter_by_dataURI_suffix(products, suffix):
# Helper to filter products into summary, full report, full report xml using suffix.
# It replaces the logic to filter by "description" column, as description is sometimes unreliable
# E.g., for the TCE for TIC 43843023 sector 5, the dvr xml has incorrect description
# so that the entry is treated as a dvr pdf
return products[np.char.endswith(products["dataURI"], suffix)]
products_wanted = get_dv_products_of_tic(tic_id, ["DVS", "DVR"], download_dir=download_dir)
res = []
# basic info
for p in filter_by_dataURI_suffix(products_wanted, "_dvs.pdf"):
tce_info = parse_dvs_filename(p["productFilename"])
entry = dict(
obsID=p["obsID"],
tic_id=tce_info.get("tic_id"),
sector_range=tce_info.get("sector_range"),
tce_num=tce_info.get("tce_num"),
tce_id=tce_info.get("tce_id"),
tce_id_short=tce_info.get("tce_id_short"),
dvs_dataURI=p["dataURI"],
)
res.append(entry)
# DVR pdf link
for p in filter_by_dataURI_suffix(products_wanted, "_dvr.pdf"):
# find TCEs for the same observation (sometimes there are multiple TCEs for the same observation)
for entry in [e for e in res if e["obsID"] == p["obsID"]]:
entry["dvr_dataURI"] = p["dataURI"]
products_dvr_xml = filter_by_dataURI_suffix(products_wanted, "_dvr.xml")
manifest = Observations.download_products(products_dvr_xml, download_dir=download_dir)
if manifest is None:
return res
for m in manifest:
dvr_xml_local_path = m["Local Path"]
dvr_info = parse_dvr_filename(Path(dvr_xml_local_path).name)
for entry in [e for e in res if e["tic_id"] == dvr_info["tic_id"] and e["sector_range"] == dvr_info["sector_range"]]:
entry["dvr_xml_local_path"] = dvr_xml_local_path
planets_dict = parse_dvr_xml(dvr_xml_local_path)
for a_planet_dict in planets_dict.values():
for entry in [
e
for e in res
if e["tic_id"] == dvr_info["tic_id"]
and e["sector_range"] == dvr_info["sector_range"]
and e["tce_num"] == a_planet_dict["planetNumber"]
]:
entry["planet"] = a_planet_dict
return res
def get_tic_meta_in_html(lc, a_subject_id=None, download_dir=None):
# This function does not do the actual display,
# so that the caller can call it in background
# and display it whereever it's needed
def link(link_text, url):
return f"""<a href="{url}" target="_blank">{link_text}</a>"""
def prop(prop_name, prop_value):
return f""" <tr><td>{prop_name}</td><td>{prop_value}</td></tr>\n"""
def row(*args):
return "<tr>" + "".join(f"<td>{v}</td>" for v in args) + "</tr>"
# main logic
m = lc.meta
tic_id = str(m.get("TICID"))
def safe_m_get(key, default_val):
# in some meta, the key exists but the value is None
# this helper handles it
res = m.get(key, default_val)
return res if res is not None else default_val
html = f"""
<h3>TIC {tic_id}</h3>
"""
html += " " + link("ExoFOP", f"https://exofop.ipac.caltech.edu/tess/target.php?id={tic_id}")
html += "\n | "
html += link(
"PHT Talk",
f"https://www.zooniverse.org/projects/nora-dot-eisner/planet-hunters-tess/talk/search?query={tic_id}",
)
if a_subject_id is not None:
# note, a TIC can have multiple subjects, here is just one of them.
html += "\n , a subject: "
html += link(
a_subject_id,
f"https://www.zooniverse.org/projects/nora-dot-eisner/planet-hunters-tess/talk/subjects/{a_subject_id}",
)
# show the sector number (here we assume a_subject_id does correspond the the sector)
# the sector is useful to be included so that users can easily locate the TCE matching the sector.
html += f' (sector {safe_m_get("SECTOR", "")})'
html += "<br>\n"
html += "<table>\n"
html += prop("R<sub>S</sub> (in R<sub>☉</sub>)", f'{safe_m_get("RADIUS", 0):.3f}')
html += prop("Magnitude (TESS)", f'{safe_m_get("TESSMAG", 0):.2f}')
html += prop("T_eff (in K)", safe_m_get("TEFF", 0))
html += "</table>\n"
# TODO: For TCE, query MAST download / parse results (the _dvr.xml), tho show
# - basic planet parameters and orbital info
# - red flags in vetting report
# see: https://archive.stsci.edu/missions-and-data/tess/data-products
tce_info_list = get_tce_infos_of_tic(tic_id, download_dir=download_dir)
if len(tce_info_list) < 1:
return html
header = [
("TCE", ""),
("Reports", ""),
("R<sub>p</sub>", "R<sub>j</sub>"),
("Epoch", "BTJD"),
("Duration", "hr"),
("Period", "day"),
("Depth", "%"),
("Impact P.", "<i>b</i>"),
("Codes", ""),
]
html += """<br>TCEs: <table>
<thead>"""
html += "<tr>"
html += " ".join([f"<th>{h[0]}</th>" for h in header])
html += "</tr>\n"
html += "<tr>"
html += " ".join([f"<th>{h[1]}</th>" for h in header])
html += "</tr>\n"
html += """
</thead>
<tbody>
"""
R_EARTH_TO_R_JUPITER = 6378.1 / 71492
for info in tce_info_list:
exomast_url = f'https://exo.mast.stsci.edu/exomast_planet.html?planet={info.get("tce_id")}'
dvs_url = f'https://exo.mast.stsci.edu/api/v0.1/Download/file?uri={info.get("dvs_dataURI")}'
dvr_url = f'https://exo.mast.stsci.edu/api/v0.1/Download/file?uri={info.get("dvr_dataURI")}'
p_i = info.get("planet", {})
html += row(
link(info.get("tce_id_short"), exomast_url),
f"""{link("dvs", dvs_url)}, {link("full", dvr_url)}""",
f'{p_i.get("planetRadiusEarthRadii", 0) * R_EARTH_TO_R_JUPITER:.3f}',
f'{p_i.get("transitEpochBtjd", 0):.4f}',
f'{p_i.get("transitDurationHours", 0):.4f}',
f'{p_i.get("orbitalPeriodDays", 0):.6f}',
f'{p_i.get("transitDepthPpm", 0) / 10000:.4f}',
f'{p_i.get("minImpactParameter", 0):.2f}',
# code fragments to so that users can easily use a TCE as an entry in transit_specs
f"""\
<input type="text" style="margin-left: 3ch; font-size: 90%; color: #666; width: 10ch;"
value='epoch={p_i.get("transitEpochBtjd", 0):.4f}, duration_hr={p_i.get("transitDurationHours", 0):.4f}, \
period={p_i.get("orbitalPeriodDays", 0):.6f}, label="{info.get("tce_id_short")}",'>""",
)
html += "\n"
html += "</tbody></table>\n"
# TODO: check if there is a TOI?!
return html
def beep():
"""Emits a beep sound. It works only in IPython / Jupyter environment only"""
# a beep to remind the users that the data has been downloaded
# css tweak to hide beep
display(
HTML(
"""<script>
function tweakCSS() {
if (document.getElementById("hide-beep-css")) {
return;
}
document.head.insertAdjacentHTML('beforeend', `<style id="hide-beep-css" type="text/css">
#beep { /* hide the audio control for the beep, generated from tplt.beep() */
width: 1px;
height: 1px;
}
</style>`);
}
tweakCSS();
</script>
"""
)
)
# the actual beep
beep_url = "https://upload.wikimedia.org/wikipedia/commons/f/fb/NEC_PC-9801VX_ITF_beep_sound.ogg"
if int(re.sub(r"[.].+", "", IPython.__version__)) < 7:
# compatibility with older older IPython (e.g., google colab)
audio = Audio(url=beep_url, autoplay=True, embed=True)
else:
audio = Audio(url=beep_url, autoplay=True, embed=True, element_id="beep")
display(audio)
def _normalize_to_percent_quiet(lc):
# Some product are in normalized flux, e.g., as 1, we still want to normalize them to percentage
# for consistency
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=LightkurveWarning, message=".*in relative units.*")
return lc.normalize(unit="percent")
# Plot the flux changes (not flux themselves) to get a sense of the rate of changes, not too helpful yet.
def plot_lcf_flux_delta(lcf, ax, xmin=None, xmax=None, moving_avg_window="30min"):
# possible input arguments
lc = _normalize_to_percent_quiet(lcf)
# Basic scatter of the observation
# ax = lc.scatter(ax=ax)
# convert to dataframe to add moving average
df = lc.to_pandas()
df["time_ts"] = [pd.Timestamp(x, unit="D") for x in df.index]
# the timestamp above is good for relative time.
# if we want the timestamp to reflect the actual time, we need to convert the BTJD in time to timetamp, e.g.
# pd.Timestamp(astropy.time.Time(x + 2457000, format='jd', scale='tdb').datetime.timestamp(), unit='s')
df["flux_mavg"] = df.rolling(moving_avg_window, on="time_ts")["flux"].mean()
# ax.plot(lc.time.value, df['flux_mavg'], c='black', label=f"Moving average ({moving_avg_window})")
df["flux_delta"] = df.rolling(moving_avg_window, on="time_ts")["flux_mavg"].apply(
lambda vary: vary[-1] - vary[0], raw=True
)
ax.plot(
lc.time.value,
df["flux_delta"],
c="blue",
label=f"Flux delta ({moving_avg_window})",
)
ax.set_xlim(xmin, xmax)
return ax
def lcf_fig():
return plt.figure(figsize=(15, 6))
def flux_near(lc, time):
if time is None or lc is None:
return None
else:
idx = (np.abs(lc.time - time)).argmin()
return lc.flux[idx]
def flux_mavg_near(df, time):
if time is None or df is None:
return None
else:
idx = (np.abs(df.index.values - time)).argmin()
# must use df.iloc[idx]['flux_mavg'], rather than df['flux_mavg'][idx]
# because dataframe from lightkurve is indexed by time (rather than regular 0-based index)
# df.iloc[] ensures we can still access the value by 0-based index
return df.iloc[idx]["flux_mavg"]
def _to_unitless(n):
if hasattr(n, "value"):
return n.value
else:
return n
def as_4decimal(float_num):
if float_num is None:
return None
elif isinstance(float_num, tuple) or isinstance(float_num, list):
return [float("{0:.4f}".format(_to_unitless(n))) for n in float_num]
else:
return float("{0:.4f}".format(_to_unitless(float_num)))
def add_flux_moving_average(lc, moving_avg_window):
df = lc.to_pandas()
begin_t = df.index[0]
df["time_ts"] = [pd.Timestamp(t - begin_t, unit="D") for t in df.index]
# the timestamp above is good for relative time.
# 1. we subtract the time with the timestamp because for some products, e.g., CDIPS, the time value itself
# is so large that creating pd.Timestamp with it causes Overflow error
# 2. if we want the timestamp to reflect the actual time, we need to convert the BTJD in time to timetamp, e.g.
# pd.Timestamp(astropy.time.Time(x + 2457000, format='jd', scale='tdb').datetime.timestamp(), unit='s')
df["flux_mavg"] = df.rolling(moving_avg_window, on="time_ts")["flux"].mean()
return df
def add_relative_time(lc, lcf):
t_start = lcf.meta.get("TSTART")
if t_start is None:
return False
lc["time_rel"] = lc.time - t_start
return True
def mask_gap(x, y, min_x_diff):
"""
Help to plot graphs with gaps in the data, so that straight line won't be draw to fill the gap.
Return a masked y that can be passed to pyplot.plot() that can show the gap.
"""
# handle case that x is a astropy Time object, rather than simple float array
x = _to_unitless(x)
x_diff = np.diff(x, prepend=-min_x_diff)
return np.ma.masked_where(x_diff > min_x_diff, y)
def normalize_percent(lc):
"""
A syntactic surgar for lambda for normalize as percentage.
Useful when calling ``lc.fold()``, ``tpf.interact()``, etc.
"""
return lc.normalize(unit="percent")
def _to_lc_with_flux(lc, flux_col):
"""Return a Lightcurve object with the named column as the flux column"""
# analogous lkv1's way: lc = getattr(lcf, flux_col)
res = lc.copy()
res["flux"] = lc[flux_col.lower()] # e.g., PDCSAP_FLUX (how we do in lkv1) will be lowerecased
return res
_cache_plot_n_annotate_lcf = dict(lcf=None, flux_col=None, lc=None)
def plot_n_annotate_lcf(
lcf,
ax,
flux_col="flux",
xmin=None,
xmax=None,
t0=None,
t_start=None,
t_end=None,
moving_avg_window="30min",
t0mark_ymax=0.3,
set_title=True,
show_r_obj_estimate=True,
title_fontsize=18,
lc_tweak_fn=None,
ax_tweak_fn=None,
):
if lcf is None:
print("Warning: lcf is None. Plot skipped")
return
# cache lc to speed up plots repeatedly over the same lcf
global _cache_plot_n_annotate_lcf
if lcf is _cache_plot_n_annotate_lcf["lcf"] and flux_col == _cache_plot_n_annotate_lcf["flux_col"]:
lc = _cache_plot_n_annotate_lcf["lc"]
else:
lc = _normalize_to_percent_quiet(_to_lc_with_flux(lcf, flux_col))
_cache_plot_n_annotate_lcf["lcf"] = lcf
_cache_plot_n_annotate_lcf["flux_col"] = flux_col
_cache_plot_n_annotate_lcf["lc"] = lc
if lc_tweak_fn is not None:
lc = lc_tweak_fn(lc)
if xmin is None and t_start is not None:
xmin = t_start - 0.5
if xmax is None and t_end is not None:
xmax = t_end + 0.5
# implement xmin / xmax by limiting the LC itself, rather than using ax.set_xlim after the plot
# - the Y-scale will then automatically scaled to the specified time range, rather than over entire lightcurve
# - make plotting faster (fewer data points)
if xmin is not None:
lc = lc[lc.time.value >= xmin]
if xmax is not None:
lc = lc[lc.time.value <= xmax]
lcfh = lcf.meta
# Basic scatter of the observation
if "long" == lke.estimate_cadence_type(lc):
# long cadence has more spare data, use a larger "x" to represent them
# "x" is also useful to distinguish it from moving average,
# which will likely overlap with the points given the sparse data
ax = lc.scatter(ax=ax, s=36, marker="x")
else:
ax = lc.scatter(ax=ax)
if len(lc) < 1:
print(
(
"Warning: specified (xmin, xmax) is out of the range of the lightcurve "
f"{lc.label} sector {lcfh['SECTOR']}. Nothing to plot"
)
)
return ax
# convert to dataframe to add moving average
if moving_avg_window is not None:
df = add_flux_moving_average(lc, moving_avg_window)
# mask_gap: if there is a gap larger than 2 hours,
# show the gap rather than trying to fill the gap with a straight line.
ax.plot(
lc.time.value,
mask_gap(lc.time, df["flux_mavg"], 2 / 24),
c="black",
label=f"Moving average ({moving_avg_window})",
)
else:
df = add_flux_moving_average(lc, "10min") # still needed for some subsequent calc, but don't plot it
# annotate the graph
if t_start is not None:
ax.axvline(t_start)
if t_end is not None:
ax.axvline(t_end)
if t0 is not None:
t_lc_start = lcf.meta.get("TSTART", None)
t0_rel_text = ""
if t_lc_start is not None:
t0_rel = t0 - t_lc_start
t0_rel_text = f" ({as_4decimal(t0_rel)})"
ax.axvline(
t0,
ymin=0,
ymax=t0mark_ymax,
color="black",
linewidth=3,
linestyle="--",
label=f"t0 ~= {t0}{t0_rel_text}",
)
if set_title:
title_text = f"{lc.label}, sector {lcfh['SECTOR']}"
if lc.author is not None and lc.author != "SPOC":
title_text += f", by {lc.author}"
if t0 is not None:
transit_duration_msg = ""
if t_start is not None and t_end is not None:
transit_duration_msg = f"\ntransit duration ~= {as_4decimal(24 * (t_end - t_start))}h"
flux_t0 = flux_mavg_near(df, t0)
if flux_t0 is not None:
flux_begin = max(flux_mavg_near(df, t_start), flux_mavg_near(df, t_end))
flux_dip = flux_begin - flux_t0
r_obj_msg = ""
r_obj = lke.estimate_object_radius_in_r_jupiter(lc, flux_dip / 100) # convert flux_dip in percent to fractions
if show_r_obj_estimate and r_obj is not None:
r_obj_msg = f", R_p ~= {r_obj:0.2f} R_j"
title_text += (
f" \nflux@$t_0$ ~= {as_4decimal(flux_t0)}%, "
f"dip ~= {as_4decimal(flux_dip)}%{r_obj_msg}{transit_duration_msg}"
)
ax.set_title(title_text, {"fontsize": title_fontsize})
ax.legend()
ax.xaxis.label.set_size(18)
ax.yaxis.label.set_size(18)
# to avoid occasional formating in scientific notations
ax.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(axis="x", which="minor", length=4)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(axis="y", which="minor", length=4)
if ax_tweak_fn is not None:
ax_tweak_fn(ax)
return ax
def plot_transit(lcf, ax, t0, duration, surround_time, **kwargs):
return plot_n_annotate_lcf(
lcf,
ax=ax,
t0=t0 if duration > 0 else None,
t_start=t0 - duration / 2 if duration > 0 else None,
t_end=t0 + duration / 2 if duration > 0 else None,
xmin=t0 - (duration + surround_time) / 2,
xmax=t0 + (duration + surround_time) / 2,
**kwargs,
)
def plot_transits(lcf_coll, transit_specs, ax_fn=lambda: lcf_fig().gca(), **kwargs):
"""Helper to plot transits zoomed-in."""
axs = []
for spec in transit_specs:
for lcf in of_sectors(lcf_coll, spec["sector"]): # in case we have multiple lcf per sector
# process the supplied spec and apply defaults
t0 = spec["epoch"]
duration = spec["duration_hr"] / 24
period = spec["period"]
steps_to_show = spec["steps_to_show"]
surround_time = spec.get("surround_time", 1.5) # a hardcoded last resort default
# TODO: warn if period is 0, but steps to show is not [0]
for i in steps_to_show:
cur_t0 = t0 + period * i
ax = plot_n_annotate_lcf(
lcf,
ax=ax_fn(),
t0=cur_t0,
t_start=cur_t0 - duration / 2,
t_end=cur_t0 + duration / 2,
xmin=cur_t0 - (duration + surround_time) / 2,
xmax=cur_t0 + (duration + surround_time) / 2,
**kwargs,
)
axs.append(ax)
return axs
def print_data_range(lcf_coll):
"""Print the data range for the given LightCurveCollection
For each LightCurveFile:
* sector start/stop time
* first / last observation time
* camera used
"""
html = '<pre style="line-height: 1.1;">\n'
html += "<summary>Sectors: " + str(list(map(lambda lc: lc.meta.get("SECTOR"), lcf_coll))) + f" ({len(lcf_coll)})" + "\n"
html += "Observation period range / data range:" + "\n"
html += "<details>"
for lc in lcf_coll:
html += f" Sector {lc.meta.get('SECTOR')}: {lc.meta.get('TSTART')} - {lc.meta.get('TSTOP')}" + "\n"
html += f" (cam {lc.meta.get('CAMERA')}) {lc.time.min()} - {lc.time.max()}" + "\n"
html += "</details></summary></pre>"
display(HTML(html))
# Do the actual plots
def plot_all(
lcf_coll,
flux_col="flux",
moving_avg_window=None,
lc_tweak_fn=None,
ax_fn=None,
use_relative_time=False,
mark_quality_issues=True,
mark_momentum_dumps=True,
set_title=True,
ax_tweak_fn=None,
):
"""Plot the given LightCurveFile collection, one graph for each LightCurve
Returns
-------
axs : the list of plots in `matplotlib.Axes`
"""
# choice 1: use the built-in plot method
# ax_all = plt.figure(figsize=(30, 15)).gca()
# lcf_coll.PDCSAP_FLUX.plot(ax=ax_all) # Or lcf_coll.SAP_FLUX.plot()
# choice 2: stitch lightcurves of the collection together, and then use more flexible methods, e.g., scatter
# Note: pass lambda x: x to stitch() so that the code won't normalize the flux value sector by sector
# lc_all = lcf_coll.PDCSAP_FLUX.stitch(lambda x: x)
# lc_all.scatter(ax=ax_all, normalize=True)
# choice 3: plot the lightcurve sector by sector: each sector has its own color
# for i in range(0, len(lcf_coll)):
# lcf_coll[i].PDCSAP_FLUX.scatter(ax=ax_all)
# ax_all.set_title((f"TIC {lcf_coll[0].PDCSAP_FLUX.label}, "
# f"sectors {list(map(lambda lcf: lcf.meta.get('SECTOR'), lcf_coll))}"))
# return ax_all
# choice 4: plot the lightcurve sector by sector: each sector in its own graph
axs = []
for i in range(0, len(lcf_coll)):
if ax_fn is None:
ax = lcf_fig().gca()
else:
ax = ax_fn()
lcf = lcf_coll[i]
lc = _to_lc_with_flux(lcf, flux_col)
lc = _normalize_to_percent_quiet(lc)
if lc_tweak_fn is not None:
lc = lc_tweak_fn(lc)
# temporarily change time to a relative one if specified
if use_relative_time:
rel_time_added = add_relative_time(lc, lcf)
if rel_time_added:
lc["time_orig"] = lc.time
lc.time = lc.time_rel
else:
# the file has no observation start time, so we cannot add it
use_relative_time = False
# tweak label to include sector if any
sector = lcf_coll[i].meta.get("SECTOR", None)
label_long = lc.label
if sector is not None:
lc.label += f", s.{sector}"
label_long += f", sector {sector}"
if lc.author is not None and lc.author != "SPOC":
label_long += f", by {lc.author}"
if "long" == lke.estimate_cadence_type(lc):
# long cadence has more spare data, use a larger "x" to represent them
# "x" is also useful to distinguish it from moving average,
# which will likely overlap with the points given the sparse data
ax = lc.scatter(ax=ax, s=16, marker="x")
else:
ax = lc.scatter(ax=ax)
# convert to dataframe to add moving average
if moving_avg_window is not None:
df = add_flux_moving_average(lc, moving_avg_window)
# mask_gap: if there is a gap larger than 2 hours,
# show the gap rather than trying to fill the gap with a straight line.
ax.plot(
lc.time.value,
mask_gap(lc.time, df["flux_mavg"], 2 / 24),
c="black",
label=f"Moving average ({moving_avg_window})",
)
title_extras = ""
if lc_tweak_fn is not None:
title_extras = "\nLC tweaked, e.g., outliers removed"
if set_title:
ax.set_title(f"{label_long} {title_extras}", {"fontsize": 36})
if use_relative_time:
ax.xaxis.set_label_text("Time - relative")
# restore original time after plot is done
lc.time = lc.time_orig
else:
t_start = lc.meta.get("TSTART")
if t_start is not None:
ax.xaxis.set_label_text(ax.xaxis.label.get_text() + f", TSTART={t_start:0.2f}")
# to avoid occasional formating in scientific notations
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
# minor tick, 1 day interval in practice
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(axis="x", which="minor", length=4)
# ax.xaxis.grid(True, which='minor') # too noisy to be there by default
ax.xaxis.label.set_size(fontsize=18)
ax.yaxis.label.set_size(fontsize=18)
if ax_tweak_fn is not None:
ax_tweak_fn(ax)
# mark quality issue is applied after ax_tweak_fn, in case users use ax_tweak_fn and change the graph's ylim
if mark_quality_issues:
# the time where flux might have potential issues, using the suggested starting quality flag mask
time = lc.time if not use_relative_time else lc.time_rel
time_w_quality_issues = time[lke.create_quality_issues_mask(lc)]
if len(time_w_quality_issues) > 0:
# add marks as vertical lines at bottom 10% of the plot
# Note: ax.vlines's ymin/ymax refers to the data. To specify them relative to y-axis
# I have to 1) use transform, and
# 2) tell the plot not to auto-scale Y-axis
# (if auto-scaled is done, it will treat the line's coodrinate as data)
# somehow it doesn't work all the time. it could crop the y axis such that
# only the vlines are visible
# ax.set_autoscaley_on(False)
# ax.vlines(time_w_quality_issues, ymin=0, ymax=0.1, transform=ax.get_xaxis_transform()
# , color='red', linewidth=1, linestyle='--', label="potential quality issue")
# back to visually less appealing one (that vline doesn't start from the bottom
ybottom, ytop = ax.get_ylim()
ax.vlines(
time_w_quality_issues.value,
ymin=ybottom,
ymax=ybottom + 0.1 * (ytop - ybottom),
color="red",
linewidth=1,
linestyle="--",
label="potential quality issue",
)
if mark_momentum_dumps:
# Note: momentum_dump signals are by default masked out in LightCurve objects.
# To access times marked as such, I need to access the raw LightCurveFile directly.
with fits.open(lcf.filename) as hdu:
if "TIME" in hdu[1].columns.names:
time = hdu[1].data["TIME"]
if use_relative_time:
t_start = lcf.meta.get("TSTART")
time = time - t_start
mom_dumps_mask = np.bitwise_and(hdu[1].data["QUALITY"], TessQualityFlags.Desat) >= 1
time_mom_dumps = time[mom_dumps_mask]
if len(time_mom_dumps) > 0:
ybottom, ytop = ax.get_ylim()
ax.vlines(
time_mom_dumps,
ymin=ybottom,
ymax=ybottom + 0.15 * (ytop - ybottom),
color="red",
linewidth=1,
linestyle="-.",
label="Momentum dumps",
)
else:
# case the file has no TIME column, typically non SPOC-produced ones, e.g., CDIPS,
# the logic of finding momentum dump would not apply to such files anyway.
pass
ax.legend()
axs.append(ax)
return axs
_lcf_4_plot_interactive = None
def _update_plot_lcf_interactive(figsize, flux_col, xrange, moving_avg_window, ymin, ymax, widget_out2):
# use global to accept lct
global _lcf_4_plot_interactive
lcf = _lcf_4_plot_interactive
ax = plt.figure(figsize=figsize).gca()
plot_n_annotate_lcf(
lcf,
ax,
flux_col=flux_col,
xmin=xrange[0],
xmax=xrange[1],
moving_avg_window=moving_avg_window,
)
codes_text = f"ax.set_xlim({xrange[0]}, {xrange[1]})"
ymin_to_use = ymin if ymin >= 0 else None
ymax_to_use = ymax if ymax >= 0 else None
if (ymin_to_use is not None) or (ymax_to_use is not None):
ax.set_ylim(ymin_to_use, ymax_to_use)
codes_text += f"\n\nax.set_ylim({ymin_to_use}, {ymax_to_use})"
widget_out2.clear_output()
with widget_out2:
print(codes_text)
return None
def plot_lcf_interactive(lcf, figsize=(15, 8), flux_col="flux"):
desc_style = {"description_width": "25ch"}
slider_style = {"description_width": "25ch"}
slider_layout = {"width": "100ch"}
t_start = lcf.meta.get("TSTART")
t_stop = lcf.meta.get("TSTOP")
# Add a second output for textual
widget_out2 = widgets.Output()
# pass lcf with a global rather than the slow fixed(lcf) with lkv2
#
# import warnings
# with warnings.catch_warnings():
# # lkv2 workaround: to suppress astropy table warning, stating that the semantics of == will be changed in the future.
# warnings.filterwarnings("ignore", category=FutureWarning)
# fixed_lcf = fixed(lcf)
global _lcf_4_plot_interactive
_lcf_4_plot_interactive = lcf
w = interactive(
_update_plot_lcf_interactive,
figsize=fixed(figsize),
# lcf = fixed_lcf,
flux_col=fixed(flux_col),
xrange=widgets.FloatRangeSlider(
min=t_start,
max=t_stop,
step=0.1,
value=(t_start, t_stop),
description="Time",
continuous_update=False,
readout_format=".1f",
layout=slider_layout,
style=slider_style,
),
moving_avg_window=widgets.Dropdown(
options=[
("None", None),
("10 min", "20min"),
("20 min", "20min"),
("30 min", "30min"),
("1 hour", "1h"),
("2 hours", "2h"),
("4 hours", "4h"),
],
value="30min",
description="Moving average window",
style=desc_style,
),
ymin=widgets.FloatText(value=-1, description="Flux min, -1 for default", style=desc_style),
ymax=widgets.FloatText(value=-1, description="Flux max, -1 for default", style=desc_style),
widget_out2=fixed(widget_out2),
)
w.layout.border = "1px solid lightgray"
w.layout.padding = "1em 0px"
widget_out2.layout.padding = "1em"
w.children = w.children + (widget_out2,)
display(w)
return w
_lcf_4_plot_transit_interactive = None
def _update_plot_transit_interactive(
figsize,
flux_col,
t0,
duration_hr,
period,
step,
surround_time,
moving_avg_window,
t0mark_ymax,
ymin,
ymax,
widget_out2,
):
# a clumsy way to pass lcf, without using fixed(lcf), which is very slow in lkv2
global _cache_plot_n_annotate_lcf
lcf = _lcf_4_plot_transit_interactive
ax = plt.figure(figsize=figsize).gca()
codes_text = "# Snippets to generate the plot"
moving_avg_window_for_codes = "None" if moving_avg_window is None else f"'{moving_avg_window}'"
if t0 < 0:
plot_n_annotate_lcf(lcf, ax, flux_col=flux_col, moving_avg_window=moving_avg_window)
codes_text += f"\nplot_n_annotate_lcf(lcf, ax, moving_avg_window={moving_avg_window_for_codes})"
else:
t0_to_use = t0 + step * period
plot_transit(
lcf,
ax,
t0_to_use,
duration_hr / 24,
surround_time,
flux_col=flux_col,
moving_avg_window=moving_avg_window,
t0mark_ymax=t0mark_ymax,
)
codes_text += f"""
# transit parameters - t0: BTJD {t0}, duration: {duration_hr} hours, period: {period} days
plot_transit(lcf, ax, {t0_to_use}, {duration_hr} / 24, {surround_time}, \
moving_avg_window={moving_avg_window_for_codes}, t0mark_ymax={t0mark_ymax})
# transit_specs for calling plot_transits()
transit_specs = TransitTimeSpecList(
dict(epoch={t0}, duration_hr={duration_hr}, period={period}, label="dip",
sector={lcf.meta.get('SECTOR')}, steps_to_show=[{step}],
),
defaults=dict(surround_time={surround_time})
)
"""
ymin_to_use = ymin if ymin >= 0 else None
ymax_to_use = ymax if ymax >= 0 else None
if (ymin_to_use is not None) or (ymax_to_use is not None):
ax.set_ylim(ymin_to_use, ymax_to_use)
codes_text += f"""
# Zoom in on flux
ax.set_ylim({ymin_to_use}, {ymax_to_use})
"""
widget_out2.clear_output()
with widget_out2:
print(codes_text)
return None
def plot_transit_interactive(lcf, figsize=(15, 8), flux_col="flux"):
desc_style = {"description_width": "25ch"}
# Add a second output for textual
widget_out2 = widgets.Output()
t0 = widgets.FloatText(
value=-1,
step=0.01,
description=r"$t_{epoch}$, -1 for unspecified",
style=desc_style,
)
duration_hr = widgets.FloatText(value=1, step=0.01, description="duration (hours)", style=desc_style)
period = widgets.FloatText(value=999, step=0.01, description="period (days)", style=desc_style)
step = widgets.IntText(value=0, description=r"cycle (0 for transit at $t_{epoch}$)", style=desc_style)
surround_time = widgets.FloatText(value=7, step=0.5, description="padding (days)", style=desc_style)
moving_avg_window = widgets.Dropdown(
options=[
("None", None),
("10 min", "10min"),
("20 min", "20min"),
("30 min", "30min"),
("1 hour", "1h"),
("2 hours", "2h"),
("4 hours", "4h"),
],
value="20min",
description="moving average window",
style=desc_style,
)
ymin = widgets.FloatText(value=-1, step=0.1, description="flux min, -1 for default", style=desc_style)
ymax = widgets.FloatText(value=-1, step=0.1, description="flux max, -1 for default", style=desc_style)
t0mark_ymax = widgets.BoundedFloatText(
value=0.05,
step=0.05,
min=0.0,
max=1.0,
description=r"$t_{epoch}$ mark height",
style=desc_style,
)
VB = widgets.VBox
HB = widgets.HBox
ui = VB(
[
HB([t0, duration_hr, period]),
HB([step, surround_time, moving_avg_window]),
HB([ymin, ymax, t0mark_ymax]),
]
)
# pass lcf via a global, as fixed(lcf) is very slow with lkv2
#
# import warnings
# with warnings.catch_warnings():
# # lkv2 workaround: to suppress astropy table warning, stating that the semantics of == will be changed in the future.
# warnings.filterwarnings("ignore", category=FutureWarning)
# fixed_lcf = fixed(lcf)
global _lcf_4_plot_transit_interactive
_lcf_4_plot_transit_interactive = lcf
w = interactive_output(
_update_plot_transit_interactive,
dict(
figsize=fixed(figsize),
# lcf=fixed_lcf,
flux_col=fixed(flux_col),
t0=t0,
duration_hr=duration_hr,
period=period,
step=step,
surround_time=surround_time,
moving_avg_window=moving_avg_window,
t0mark_ymax=t0mark_ymax,
ymin=ymin,
ymax=ymax,
widget_out2=fixed(widget_out2),
),
)
w.layout.border = "1px solid lightgray"
w.layout.padding = "1em 0px"
widget_out2.layout.padding = "1em"
display(ui, w, widget_out2)
return w
def plot_flux_sap_flux_comparison(lc, sap_col="sap_flux", ax=None, offset=None, **kwargs):
"""Plot flux (typically PDCSAP_FLUX) and sap_flux together,
to spot any anomaly in processed lightcurve."""
lc_sap = lc.copy()
lc_sap["flux"] = lc[sap_col]
if sap_col + "_err" in lc.colnames:
lc_sap["flux_err"] = lc[sap_col + "_err"]
else: # some products, e.g., QLP, does not give err
# Hit a bug - ValueError: TessLightCurve object is invalid - expected 'time' as the first columns but found 'time'
# lc_sap.remove_column('flux_err')
# zero out the column as a workaround
lc_sap["flux_err"] = np.zeros_like(lc_sap["flux_err"])
if offset is None:
# auto offset: move lc_sap curve so that
# - its median is at about 10 percentile of main flux
# - move farther down by a factor of the 40% amplitude of the main flux, so that it is most below the main flux
# without much gap, but not overlapping too much either.
offset = (
np.nanpercentile(lc.flux.value, 10)
- np.nanmedian(lc_sap.flux.value)
- (np.nanmedian(lc.flux.value) - np.nanpercentile(lc.flux.value, 10)) * 3
)
lc_sap.label += f" {sap_col} + {offset:.0f}"
ax = LightCurveCollection([lc, lc_sap]).plot(ax=ax, offset=offset, **kwargs)
ax.set_title(f"{lc.label}, sector {lc.sector} - flux vs {sap_col}")
return ax
class TransitTimeSpec(dict):
def __init__(
self,
epoch: float = None,
period: float = None,
duration_hr: float = None,
sector: int = None,
steps_to_show: list = None,
surround_time: float = None,
label: str = None,
defaults: TransitTimeSpec = None,
):
# core parameters
self["epoch"] = epoch
self["period"] = period
self["duration_hr"] = duration_hr
# used for plotting
self["sector"] = sector
self["steps_to_show"] = steps_to_show
self["surround_time"] = surround_time
self["label"] = label
if defaults is None:
defaults = {}
self._defaults = defaults # put it as a custom attribute
def __getitem__(self, key):
res = super().get(key)
if res is None:
res = self._defaults.get(key)
return res
def get(self, key, default=None):
res = self.__getitem__(key)
if res is None:
res = default
return res
class TransitTimeSpecList(list):
def __init__(self, *tt_spec_dict_list, defaults={}):
self._defaults = TransitTimeSpec(**defaults)
for tt_spec_dict in tt_spec_dict_list:
self.append(TransitTimeSpec(**tt_spec_dict, defaults=self._defaults))
def _spec_property_values(self, property_name):
return np.array([tt[property_name] for tt in self])
#
# The following properties return the specific transit parameters
# in an array. Together they can be used to create a mask
# for the transits using ``LightCurve.create_transit_mask()``
#
@property
def epoch(self):
return self._spec_property_values("epoch")
@property
def period(self):
return self._spec_property_values("period")
@property
def duration_hr(self):
return self._spec_property_values("duration_hr")
@property
def duration(self):
return self.duration_hr / 24
@property
def label(self):
return self._spec_property_values("label")
def to_table(self, columns=("label", "epoch", "duration_hr", "period")):
"""Convert the specs to an ``astropy.Table``"""
data = [getattr(self, col) for col in columns]
return Table(data, names=columns)
def mark_transit_times(
lc, tt_specs, axvline_kwargs_specs=None, skip_no_transit_plot=False, lc_plot_func_name="scatter", ax=None
):
"""Plot the given LC, and mark the transit times based on `tt_specs`."""
tt_list = [lke.get_transit_times_in_lc(lc, a_spec["epoch"], a_spec["period"]) for a_spec in tt_specs]
# skip if no transit found
# (tt_list is a list of list, so it needs to be flattend for counting)
if skip_no_transit_plot and len(np.array(tt_list, dtype=object).flatten()) < 1:
print(f"{lc._repr_simple_()} is skipped - no matching transits.")
return None, None
# base plot
#
if ax is None:
# ax = plt.figure(figsize=(30, 10)).gca()
ax = plt.figure(figsize=(15, 5)).gca()
ax = getattr(lc, lc_plot_func_name)(ax=ax, color="black", label=f"{lc.label} s.{getattr(lc, 'sector', 'N/A')}")
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(axis="x", which="minor", length=4)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(axis="y", which="minor", length=4)
# pre-process axvline_kwargs
#
if axvline_kwargs_specs is None:
axvline_kwargs_specs = [dict(label="dip", linestyle="--", color="red")]
# use the label in tt_specs if not specified in axvline_kwargs
for (a_spec, an_axvline_kwargs, idx_0_based) in zip(tt_specs, axvline_kwargs_specs, range(len(axvline_kwargs_specs))):
if an_axvline_kwargs.get("label") is None:
an_axvline_kwargs["label"] = a_spec.get("label", f"dip {idx_0_based + 1}")
# Mark transit times on the base plot
#
# a hack: mark the first line for each tt set, then set legend
# so that each tt set will have 1 legend
# if we simply set legend at the end, each dip will have its own legend!
for (transit_times, axvline_kwargs) in zip(tt_list, axvline_kwargs_specs):
if len(transit_times) > 0 and axvline_kwargs is not None:
ax.axvline(transit_times[0], 0, 0.1, **axvline_kwargs)
ax.legend()
for (transit_times, axvline_kwargs) in zip(tt_list, axvline_kwargs_specs):
if axvline_kwargs is not None:
for tt in transit_times:
ax.axvline(tt, 0, 0.1, **axvline_kwargs)
return ax, tt_list
def scatter_centroids(
lcf,
fig=None,
highlight_time_range=None,
time_range=None,
c="blue",
c_highlight="red",
):
"""
Scatter centroids, and highlight the specific time range
"""
if fig is None:
fig = plt.figure(figsize=(12, 12))
lc = _normalize_to_percent_quiet(lcf)
sector = lcf.meta.get("SECTOR")
if time_range is not None:
lc = lc.truncate(time_range[0], time_range[1])
fig.gca().yaxis.set_major_formatter(FormatStrFormatter("%.3f")) # avoid scientific notations
fig.gca().scatter(lc.centroid_col.value, lc.centroid_row.value, c=c, label=f"TIC {lc.targetid}")
if highlight_time_range is not None:
lc_highlight = lc.truncate(highlight_time_range[0], highlight_time_range[1])
if len(lc_highlight) < 1:
print("WARNING: scatter_centroids() no observations in highlight_time_range")
fig.gca().scatter(
lc_highlight.centroid_col.value,
lc_highlight.centroid_row.value,
c=c_highlight,
label="highlights",
)
title = f"TIC {lc.targetid} Centroids, sector {sector}"
if time_range is not None:
title += f"\n{as_4decimal(time_range)}"
if highlight_time_range is not None:
title += f"\nHighlights:{as_4decimal(highlight_time_range)}"
fig.gca().set_title(title)
fig.legend()
return fig
def _update_anim(n, ax, lc, label, num_centroids_to_show, use_relative_time, c):
ax.cla()
# fix the x/y scale to ensure it doesn't change over the animation
c_col, c_row = _to_unitless(lc.centroid_col), _to_unitless(lc.centroid_row)
ax.set_xlim(np.nanmin(c_col), np.nanmax(c_col))
ax.set_ylim(np.nanmin(c_row), np.nanmax(c_row))
# avoid scientific notation for y-axis
# x-axis might need scientific notation so that the labels won't get too cramped with long decimals
ax.yaxis.set_major_formatter(FormatStrFormatter("%.3f"))
if num_centroids_to_show is None:
col = lc.centroid_col[:n]
row = lc.centroid_row[:n]
time_label = f"{as_4decimal(lc.time[n])}"
if use_relative_time:
time_label = time_label + f" ({as_4decimal(lc.time_rel[n])})"
else:
n_start = max(0, n - num_centroids_to_show)
col = lc.centroid_col[n_start:n]
row = lc.centroid_row[n_start:n]
time_label = f"{as_4decimal(lc.time[n_start])} - {as_4decimal(lc.time[n])}"
if use_relative_time:
time_label = time_label + f" ({as_4decimal(lc.time_rel[n_start])} - {as_4decimal(lc.time_rel[n])})"
ax.set_title(f"TIC {lc.targetid} Centroids, {label}\nday: {time_label}")
ax.scatter(col, row, c=c)
def animate_centroids(
lcf,
fig=None,
frames=None,
num_obs_per_frame=240,
interval=250,
use_relative_time=False,
time_range=None,
accumulative=True,
c=None,
display=True,
):
"""
Animate centroids to visualize changes over time.
"""
lc = lcf
label = f"sector {lcf.meta.get('SECTOR')}"
# Zoom to a particular time range if specified
if time_range is not None:
# use pandas to zoom to a particular time_range
df = _normalize_to_percent_quiet(lc).to_pandas(columns=["time", "flux", "centroid_row", "centroid_col"])
df = df[(df.time >= time_range[0]) & (df.time <= time_range[1])]
if len(df) < 1:
raise Exception(f"Zoomed lightcurve has no observation. time_range={time_range}")
lc_z = SimpleNamespace() # zoomed-in lightcurve-like object for the purpose of animation
setattr(lc_z, "time", df.time.values)
setattr(lc_z, "flux", df.flux.values)
setattr(lc_z, "centroid_row", df.centroid_row.values)
setattr(lc_z, "centroid_col", df.centroid_col.values)
setattr(lc_z, "targetid", lc.targetid)
lc = lc_z
if fig is None:
fig = plt.figure(figsize=(12, 12))
if frames is None:
num_obs = len(lc.centroid_row)
num_frames = int(num_obs / num_obs_per_frame) # default 240 is about every 8 hours, given 2-minute intervals
ary_n = np.linspace(1, num_obs, num=num_frames, endpoint=False)
ary_n[0] = np.ceil(ary_n[1] / 2)
ary_n = list(map(lambda n: int(n), ary_n))
else:
ary_n = frames
num_obs_per_frame = frames[-1] - frames[-2] # assume the spacing of input is linear
num_centroids_to_show = num_obs_per_frame
if accumulative:
num_centroids_to_show = None
# print(f'Steps: {ary_n}')
if use_relative_time:
rel_time_added = add_relative_time(lc, lcf)
if not rel_time_added:
use_relative_time = False
anim = animation.FuncAnimation(
fig,
_update_anim,
frames=ary_n,
fargs=(fig.gca(), lc, label, num_centroids_to_show, use_relative_time, c),
interval=interval,
blit=False,
)
if display:
# for inline display in jupyter
try:
from IPython.display import HTML
from IPython.display import display as iDisplay
return iDisplay(HTML(anim.to_jshtml(default_mode="once")))
except ImportError:
print("WARNING: animate_centroids() - inline display not possible Not in IPython environment.")
return anim
def markTimes(ax, times, **kwargs):
"""Helper to mark specifics time as vertical lines on a plot"""
axvline_kwargs = kwargs.copy()
# apply defaults
axvline_kwargs.setdefault("c", "gray")
axvline_kwargs.setdefault("linewidth", 1)
axvline_kwargs.setdefault("linestyle", "--")
for t in times:
ax.axvline(t, **axvline_kwargs)
def fold_and_plot_odd_even(lc, period, epoch_time, figsize=(12, 6), title_extra=""):
lc_folded = lc.fold(period=period, epoch_time=epoch_time, epoch_phase=0)
ax = plt.figure(figsize=figsize).gca()
lc_f_odd = lc_folded[lc_folded.odd_mask]
lc_f_odd.scatter(ax=ax, c="r", label="odd", marker=".", s=4)
lc_f_even = lc_folded[lc_folded.even_mask]
lc_f_even.scatter(ax=ax, c="b", label="even", marker="x", s=4)
pct01_odd = np.nanpercentile(lc_f_odd.flux, 0.1)
pct01_even = np.nanpercentile(lc_f_even.flux, 0.1)
ax.axhline(
pct01_odd * 100,
c="r",
linestyle="--",
label=f"odd 0.1 pctile {pct01_odd:0.4f}",
)
ax.axhline(
pct01_even * 100,
c="b",
linestyle="dotted",
label=f"even 0.1 pctile {pct01_even:0.4f}",
)
ax.legend()
plt.title(f"{lc.label} folded {title_extra}")
print("odd 0.1 percentile: ", pct01_odd)
print("even 0.1 percentile: ", pct01_even)
return ax, lc_folded
def fold_2x_periods_and_plot(lc, period, epoch_time, figsize=(12, 6), title_extra=""):
lc_folded = lc.fold(period=period * 2, epoch_time=epoch_time, epoch_phase=period / 2)
ax = plt.figure(figsize=figsize).gca()
lc_folded.scatter(ax=ax)
ax.legend()
plt.title(f"{lc.label} folded at 2X periods {title_extra}")
return ax, lc_folded
#
# TargetPixelFile helpers
#
def show_tpf_orientation(tpf):
""" "Helper to visualize the TPF's orientation in the sky. Requires IPython.
Long arm is north, short arm with arrow is east.
"""
coord_bottom_left = tpf.wcs.pixel_to_world(0, 0)
coord_upper_right = tpf.wcs.pixel_to_world(tpf.shape[2] - 1, tpf.shape[1] - 1)
coord_upper_left = tpf.wcs.pixel_to_world(0, tpf.shape[2] - 1)
deg_from_north = coord_bottom_left.position_angle(coord_upper_left).to(u.deg).value
display(
HTML(
f"""<div style="position: relative; margin-left: 16px;height: 64px;">
<div title="Long arm: North; Short arm with arrow: East"
style="float: left; max-width: 64px;font-size: 32px;margin: 16px;\
transform: rotate({-deg_from_north}deg);transform-origin: left; cursor:pointer;">↳</div>
<div style="font-family: monospace;">Upper right offset from bottom left - <br>
RA: {(coord_upper_right.ra - coord_bottom_left.ra).to(u.arcmin):0.6},
Dec: {(coord_upper_right.dec - coord_bottom_left.dec).to(u.arcmin):0.6}
</div>
</div>"""
)
)
def interact_sky(tpf, notebook_url="localhost:8888", aperture_mask="empty", magnitude_limit=18):
"""tpf.interact_sky wrapper to handle different lightkurve versions."""
if "aperture_mask" in inspect.getfullargspec(tpf.interact_sky).args:
# case using a pre-release lightkurve that supports aperture_mask
return tpf.interact_sky(notebook_url=notebook_url, aperture_mask=aperture_mask, magnitude_limit=magnitude_limit)
else:
# using release lightkurve that not yet supports aperture_mask
return tpf.interact_sky(notebook_url=notebook_url, magnitude_limit=magnitude_limit)
def show_nearby_tic_summary_form():
"""Display a form that create a 1-line summary of a nearby TIC from the first three rows of the selected TIC table"""
display(
HTML(
r"""
First 3 rows of the TIC info table:<br>
<textarea id="inStarInfo" style="width: 40ch; height: 6em;" placeholder="TIC \t12345678\nTESS Mag \t10.123\nSeparation ...">
</textarea><br>
<button id="ctlStarInfo">Create Nearby TIC summary</button>
<input id="outStarInfo" style="width: 40ch;" value="" readonly>
<script>
function convertToMultiLinePlaceholder(elem) { // create multiline placeholder
elem.placeholder = elem.placeholder.replace(/\\n/g, '\n');
elem.placeholder = elem.placeholder.replace(/\\t/g, '\t');
}
convertToMultiLinePlaceholder(document.querySelector('#inStarInfo'));
function createNearbyTicSummary(text) {
const toCells = (line) => {
return line.split("\t");
}
const lines = text.split(/[\r\n]+/);
const ticId = toCells(lines[0])[1].replace(/^\s*(\d+).*$/, '$1');
const tessMag = toCells(lines[1])[1];
const separation = toCells(lines[2])[1];
return `TIC ${ticId} (TESS magnitude ${tessMag}, ${separation} arcsec away)`;
}
document.querySelector('#ctlStarInfo').onclick = (evt) => {
const summary = createNearbyTicSummary(document.querySelector('#inStarInfo').value);
document.querySelector('#outStarInfo').value = summary;
};
</script>
"""
)
)
|
<reponame>vishalbelsare/ade
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ade:
# Asynchronous Differential Evolution.
#
# Copyright (C) 2018-19 by <NAME>,
# http://edsuom.com/ade
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
A L{History} class for maintaining a history of L{Individual}
objects.
"""
import random
from cStringIO import StringIO
import numpy as np
from numpy.polynomial import polynomial as poly
from twisted.internet import defer, task, reactor
from asynqueue import ProcessQueue
from yampex.plot import Plotter
from util import *
def seq2str(X, dtype=None):
"""
Converts the supplied sequence I{X} to a string, returned. If the
sequence is not already a Numpy array, supply an efficient
I{dtype} for the array version that will be created for it.
"""
if dtype: X = np.array(X, dtype=dtype)
fh = StringIO()
np.save(fh, X)
X = fh.getvalue()
fh.close()
return X
def str2array(state, name):
"""
Converts the string with key I{name} in I{state} into a Numpy
array, which gets returned.
If no such string is present, an empty array is constructed and
returned.
"""
text = state.get(name, None)
if text is None:
return np.array([])
fh = StringIO(text)
X = np.load(fh)
fh.close()
return X
class Analysis(object):
"""
I let you analyze the parameter values of a L{Population}.
Construct an instance of me with a sequence of parameter I{names},
a 2-D Numpy array I{X} of values (in columns) for the SSEs (first
column) and then each of those parameters (remaining columns), and
a sequence I{K} of row indices. Or, to analyze values of all
parameters, supply an empty list instead.
Each index in I{K} points to a row of I{X} with one SSE and the
parameter values for that SSE, with the indices of I{K} sorted in
ascending order of the SSE they point to.
@ivar names: A sequence of the names of all the parameters.
"""
fmms = (
(0.05, 'o', 3.0),
(0.10, 'o', 2.0),
(0.20, '.', 1.5),
(0.50, '.', 1.0),
(0.70, '.', 0.5),
(1.01, '.', 0.0),
)
fileSpec = None
defaultWidth = 12.0 # 1200 pixels default, for PNG plots
def __init__(self, names, X, K, Kp=set(), Kn=set(), baseFilePath=None):
self.names = names
self.X = X
self.K = K
self.Kp = Kp
self.Kn = Kn
if baseFilePath: self.filePath(baseFilePath)
def corr(self, k1, k2):
"""
Returns the correlation coefficient between parameter values of
column I{k1} and column I{k2} in my I{X} array.
B{TODO}: Make SSE-weighted (lower=more weight).
"""
kk = [k1, k2]
return np.corrcoef(np.transpose(self.X[self.K][:,kk]))[0,1]
def correlator(self):
"""
Iterates over combinations of parameters, from most correlated to
least. Each iteration yields column indices of the parameter
pair and their correlation coefficient.
Only combinations where the first column index is lower than
the second are yielded. This avoids duplication by limiting
the iteration to the upper right triangle in a 2-D combination
matrix where the first index is for rows and the second is for
columns.
"""
kkR = []
Nc = self.X.shape[1]
for k1 in range(1, Nc):
for k2 in range(1, Nc):
if k2 > k1:
kkR.append([k1, k2, self.corr(k1, k2)])
kkR.sort(key=lambda row: abs(row[2]), reverse=True)
for k1, k2, R in kkR:
yield k1, k2, R
def Kf12(self, f1, f2):
"""
Returns a 1-D Numpy array of row indices to my I{X} array whose
SSEs are from fractional value I{f1} to I{f2} between minimum
and maximum SSE.
"""
def fSSE(f):
mmSSE = SSE[-1] - SSE[0]
return SSE[0] + f*mmSSE
# 1-D array of SSEs, sorted
SSE = self.X[self.K,0]
I = np.logical_and(SSE >= fSSE(f1), SSE < fSSE(f2))
return np.array(self.K)[np.flatnonzero(I)]
def Kp12(self, p1, p2):
"""
Returns a 1-D Numpy array of row indices to my I{X} array whose
SSEs are from fractional portion I{p1} to I{p2} between
minimum and maximum SSE.
The fractional portion is how far along the indices you are,
not how far along the values you are. If the SSEs increased
linearly, they would be the same.
"""
N = len(self.K)
return self.K[slice(int(np.floor(p1*N)), int(np.floor(p2*N))+1)]
def args2names(self, args):
"""
Converts args to a list of parameter names:
- With no args, returns my full list of parameter I{names}.
- With one or more strings, returns a list of the matching
names.
- With integer arguments, creates a slice and returns that
slice of the entries of my parameter I{names} list.
"""
if not args:
return sorted(self.names)
for arg in args:
if not isinstance(arg, int): return args
return sorted(self.names)[slice(*args)]
def name2k(self, name):
"""
Returns the column index in my I{X} array for the values of
the specified parameter I{name}. The reverse of L{k2name}.
"""
return self.names.index(name) + 1
def k2name(self, k):
"""
Returns the parameter I{name} for the the specified column index
I{k} in my I{X} array. The reverse of L{name2k}.
"""
return self.names[k-1]
def value_vs_SSE(self, names, **kw):
"""
Returns a 1-D Numpy array of the SSEs of my individuals and
matching 1-D Numpy arrays for each of the parameter
values in I{names}.
@keyword inPop: Set C{True} to only include individuals in the
population.
@keyword notInPop: Set C{True} to only include individuals who
were once but no longer are in the population.
@keyword neverInPop: Set C{True} to only include individuals
who were never in the population.
@keyword maxRatio: Set this to specify a maximum ratio between
an included individual's SSE and the best individual's
SSE.
"""
def subset(kCol):
return self.X[K,kCol] if len(K) else K
names = self.args2names(names)
maxRatio = kw.get('maxRatio', 1000)
SSE_best = self.X[self.K[0],0]
if kw.get('inPop', False):
K = [k for k in self.K if k in self.Kp]
elif kw.get('notInPop', False):
K = [k for k in self.K if k not in self.Kp and k not in self.Kn]
elif kw.get('neverInPop', False):
K = [k for k in self.K if k in self.Kn]
else: K = self.K
KK = np.flatnonzero(self.X[K,0]/SSE_best <= maxRatio)
K = np.array(K)[KK]
result = [subset(0)]
for name in names:
result.append(subset(self.name2k(name)))
return result
def lineFit(self, k1, k2, K):
"""
Returns the slope and y-intercept of a line that has a best fit to
the SSE-weighted data in column vectors of my I{X} array at
I{k1} (x) and I{k2} (y), with elements in I{K} selected.
For the best (lowest SSE) pair, the weight is 1.0. If the
worst SSE is many times larger, the worst pair's weight is
approximately M{2*SSE_best/SSE_worst}.
"""
SSE = self.X[K,0]
SSE_min = SSE.min()
W = 2*SSE_min / (SSE + SSE_min)
b, m = poly.polyfit(self.X[K,k1], self.X[K,k2], 1, w=W)
return m, b
def pick_N(self, kList):
"""
Returns a sensible number of subplots to show in the next figure,
given a supplied list I{kList} of column indices of remaining
parameters to show.
Favors 2x2 and 3x3 plots. Single-subplot figures have too much
empty space and are visually confusing.
"""
N_left = len(kList)
if N_left in (8, 10):
return 4
if N_left > 8:
return 9
return N_left
def _widthHeight(self, dims):
"""
Given a string I{dims} with W or WxL as an integer number of
pixels (not inches) for the width or width x height, returns
width and height in inches.
"""
def inches(x):
return float(x) / Plotter.DPI
height = None
if dims:
if 'x' in dims:
width, height = [inches(x) for x in dims.split('x')]
else: width = inches(dims)
else: width = self.defaultWidth
return width, height
def filePath(self, baseFilePath=None, dims=None):
"""
Obtains a unique filePath for a PNG file, or, with I{baseFilePath}
set, sets my I{fileSpec} to a 6-list so that plotting is done
to a PNG file at I{filePath}.
The 6-list is: C{[directory, basename, extension, count,
width, height]}
A numerical suffix gets appended to the base name (but not the
extension) to all files after the first one generated,
ensuring that each generated figure/PNG file is
unique. Without the keyword I{baseFilePath} set, the unique
file path is returned from a previous setting, along with the
desired width and height in inches.
@keyword baseFilePath: Specify this to set my
I{fileSpec}. C{None} is returned.
@keyword dims: Set to a string with W or WxL as an integer
number of pixels (not inches) for the width or width x
height of the PNG file(s).
"""
width, height = self._widthHeight(dims)
if baseFilePath is None:
if self.fileSpec is None:
return
directory, baseName, ext, count, width, height = self.fileSpec
count += 1
self.fileSpec[3] = count
fileName = sub(
"{}-{:d}.{}", baseName, count, ext) if count > 1 else sub(
"{}.{}", baseName, ext)
return os.path.join(directory, fileName), width, height
directory, fileName = os.path.split(baseFilePath)
baseName, ext = os.path.splitext(fileName)
self.fileSpec = [directory, baseName, ext.lstrip('.'), 0, width, height]
def makePlotter(self, *args, **kw):
"""
Returns a L{Plotter} object, constructed with the supplied args
and/or keywords, with some global options set.
If I have a I{fileSpec} set, I will write the plot to a PNG
file instead of showing a plot window. There will be a
uniquifying numerical suffix appended to the file's base name.
@keyword dims: Set to a string with W or WxL as an integer
number of pixels (not inches) for the width or width x
height of the PNG file(s). Or C{None} for default (screen
size) dimensions.
"""
stuff = self.filePath()
width = kw.pop('width', None)
height = kw.pop('height', None)
dims = kw.pop('dims', None)
if stuff:
filePath, width, height = stuff
kw['filePath'] = filePath
N, Nc, Nr = Plotter.parseArgs(*args, **kw)[2:]
if height is None:
height = width * min([0.7, Nr/Nc])
elif dims:
width, height = self._widthHeight(dims)
if width: kw['width'] = width
if height: kw['height'] = height
pt = Plotter(*args, **kw)
pt.use_grid()
return pt
def plot(self, names, **kw):
"""
Plots values versus SSE for each parameter in I{names}. Accepts
keywords used for L{value_vs_SSE} (only I{inPop} is honored in
this method), plus I{noShow}, I{semilog}, and I{sp}.
If there are two integer values in I{names}, they are used to
select a range of my I{names} sequence. (Seldom used.)
@keyword noShow: Set C{True} to return the C{Plotter} object
from the last Matplotlib C{Figure} plotted instead of
calling C{showAll} on it, thus allowing you to do so at
your convenience.
@keyword semilog: Set C{True} to plot parameter values on a
logarithmic scale.
@keyword sp: Set to an instance of C{yampex.Plotter} in
subplot context and I will render each subplots using it,
with automatic subplot advancement for each
parameter. It's up to you to make sure the C{Plotter}
object got set up with subplots in the desired
arrangement, and to call it in context before calling this
method. See the docstring for C{yampex.Plotter.__call__}
for details.
"""
def setup(pt_sp):
pt_sp.add_line("")
pt_sp.use_minorTicks('y')
pt_sp.add_marker('o', 2.0); pt_sp.add_color('red')
pt_sp.set_xlabel("SSE")
if semilog:
pt_sp.plot_semilogy()
if not inPop:
pt_sp.add_marker('o', 2.0); pt_sp.add_color('blue')
pt_sp.add_marker('.', 1.5); pt_sp.add_color('#303030')
def doSubplots(sp, kList):
"""
Using Plotter-in-subplot-context object I{sp}, plots the subplots
for the parameter indices in I{kList}.
"""
for k in kList:
name = names[k]
sp.set_title(name)
ax = sp(XYp[0], XYp[k+1])
if not inPop:
ax.plot(XYn[0], XYn[k+1])
ax.plot(XYr[0], XYr[k+1])
noShow = kw.pop('noShow', False)
semilog = kw.pop('semilog', False)
dims = kw.pop('dims', None)
sp = kw.pop('kw', None)
names = self.args2names(names)
inPop = kw.get('inPop', False)
kw['inPop'] = True
XYp = self.value_vs_SSE(names, **kw)
if not inPop:
kw['inPop'] = False
kw['notInPop'] = True
XYn = self.value_vs_SSE(names, **kw)
kw['notInPop'] = False
kw['neverInPop'] = True
XYr = self.value_vs_SSE(names, **kw)
# kList is a range of indices to the XYp, XYn, and XYr lists
# of 1-D Numpy arrays
N = len(XYp) - 1
kList = range(N)
if sp is not None:
setup(sp)
doSubplots(sp, kList)
return
while kList:
N = self.pick_N(kList)
kkList = kList[:N]; kList = kList[N:]
Nc = 1 if N == 1 else 3 if N > 6 else 2
pt = self.makePlotter(N, Nc=Nc, dims=dims)
setup(pt)
with pt as sp:
doSubplots(sp, kkList)
if noShow: return pt
pt.showAll()
def prettyLine(self, m, b):
return sub("Y={:+.6g}*X {} {:.6g}", m, "-" if b < 0 else "+", abs(b))
def plotXY(self, p1, p2, sp=None, useFraction=False):
"""
Plots the values of parameter I{p1} versus the values of parameter
I{p2}, with a rough indication of the SSEs involved.
If I{p1} or I{p2} is an integer, the parameter values for that
column of my I{X} array are used instead.
Also plots a best-fit line determined by I{lineFit} with the
pairs having the best 50% of the SSEs.
Returns a 4-tuple with the x- and y-axis labels and the slope
and y-intercept of the best-fit line.
"""
def plot(sp):
xName = self.k2name(k1)
yName = self.k2name(k2)
sp.set_xlabel(xName)
sp.set_ylabel(yName)
K = self.Kp12(0, 0.5)
m, b = self.lineFit(k1, k2, K)
sp.add_annotation(0, self.prettyLine(m, b))
X = self.X[K,k1]
X = np.array([X.min(), X.max()])
ax = sp(X, m*X+b, '-r')
f1 = 0.0
kw = {'color': "blue", 'linestyle': ""}
for f2, mk, ms in self.fmms:
if ms:
K = self.Kf12(f1, f2) if useFraction else self.Kp12(f1, f2)
kw['marker'] = mk
kw['markersize'] = ms
X, Y = [self.X[K,x] for x in (k1, k2)]
ax.plot(X, Y, **kw)
f1 = f2
return xName, yName, m, b
k1 = p1 if isinstance(p1, int) else self.name2k(p1)
k2 = p2 if isinstance(p2, int) else self.name2k(p2)
if sp is None:
pt = self.makePlotter(1)
with pt as sp:
result = plot(sp)
pt.show()
return result
return plot(sp)
def plotCorrelated(
self, name=None, N=4, noShow=False, verbose=False, dims=None):
"""
Plots values of I{N} pairs of parameters with the highest
correlation. The higher the SSE for a given combination of
values, the less prominent the point will be in the plot.
You can specify one parameter that must be included. Then the
correlations checked are with everything else.
Seeing a very high correlation in one of these plots is an
indication that you should somehow consolidate the correlated
parameters or at least make them explicitly dependent on each
other at the outset, so DE doesn't waste effort searching all
the deserted fitness landscape outside the narrow ellipse of
their correlated values.
@keyword noShow: Set C{True} to return the C{Plotter} object
from the last Matplotlib C{Figure} plotted instead of
calling C{showAll} on it, thus allowing you to do so at
your convenience.
"""
Np = self.X.shape[1] - 1
Ncombos = Np*(Np-1)/2
if Ncombos == 0: return
if Ncombos < N: N = Ncombos
pt = self.makePlotter(N, dims=dims)
with pt as sp:
count = 0
for stuff in self.correlator():
k1, k2, R = stuff
if name and name != self.k2name(k1): continue
corr = sub("R={:+.3f}", R)
sp.add_textBox("SE" if R > 0 else "NE", corr)
xName, yName, m, b = self.plotXY(k1, k2, sp)
if verbose:
firstPart = sub("{}:{} ({})", xName, yName, corr)
print(sub(
"{:>30s} {}", firstPart, self.prettyLine(m, b)))
count += 1
if count == N: break
if noShow: return pt
pt.show()
class ClosestPairFinder(object):
"""
I determine which of two rows are most similar of a Numpy 1-D
array I{X} I maintain having I{Nr} rows and I{Nc} columns.
The array's first column contains the SSEs of individuals and the
remaining columns contain the parameter values that resulted in
each SSE. The array values are normalized such that the average of
of each of the columns is 1.0.
@ivar Nc: The number of columns, SSE + parameter values.
@ivar X: My Numpy 1-D array having up to I{Nr} active rows and
exactly I{Nc} columns of SSE+values combinations.
@ivar S: A Numpy 1-D array having a scaling factor for the
sum-of-squared-differences calculated by L{__call__}. The
scaling factor is reciprocal of the variance of all active
rows in I{X}, or C{None} if the variance needs to be
(re)computed.
@ivar K: A set of indices to the active rows in I{X}.
@cvar Np_max: The maximum number of row pairs to examine for
differences in L{__call__}.
@cvar Kn_penalty: The multiplicative penalty to impose on the
computed difference to favor pairs where at least one member
has been a population member.
"""
Np_max = 10000
Kn_penalty = 2.0
# Property placeholder
_q = None
def __init__(self, Nr, Nc):
"""
C{ClosestPairFinder(Nr, Nc)}
"""
self.Nr = Nr
self.Nc = Nc
self.X = np.empty((Nr, Nc))
self.clear()
@property
def q(self):
"""
Property: An instance of L{ProcessQueue} with a single worker
dedicated to dealing with my I{history} object.
Whenever a new queue instance is constructed, a system event
trigger is added to shut it down before the reactor shuts
down.
"""
def shutdown():
return q.shutdown().addCallback(
lambda _: setattr(self, '_q', None))
if self._q is None:
q = ProcessQueue(1, returnFailure=True)
triggerID = reactor.addSystemEventTrigger(
'before', 'shutdown', shutdown)
self._q = [q, triggerID]
return self._q[0]
def clear(self):
"""
Sets my I{K} and I{Kn} to empty sets and I{S} to C{None},
returning me to a virginal state.
"""
self.K = set()
self.Kn = set()
self.S = None
def setRow(self, k, Z, neverInPop=False):
"""
Call with the row index to my I{X} array and a 1-D array I{Z} with
the SSE+values that are to be stored in that row.
Nulls out my I{S} scaling array to force re-computation of the
column-wise variances when L{__call__} runs next, because the
new row entry will change them.
Never call this with an C{inf} or C{NaN} anywhere in I{Z}. An
exception will be raised if you try.
@keyword neverInPop: Set C{True} to indicate that this
SSE+value was never in the population and thus should be
less more to be bumped in favor of a newcomer during size
limiting.
"""
if not np.all(np.isfinite(Z)):
raise ValueError("Non-finite value in Z")
self.X[k,:] = Z
self.K.add(k)
if neverInPop: self.Kn.add(k)
self.S = None
def clearRow(self, k):
"""
Call with the row index to my I{X} array to have me disregard the
SSE+values that are to be stored in that row. If the index is
in my I{Kn} set, discards it from there.
Nulls out my I{S} scaling array to force re-computation of the
column-wise variances when L{__call__} runs next, because
disregarding the row entry.
"""
if k in self.K:
self.K.remove(k)
self.Kn.discard(k)
self.S = None
def pairs_sampled(self, N):
"""
Returns a 2-D Numpy array of I{N} pairs of separate row indices to
my I{X} array, randomly sampled from my set I{K} with
replacement.
The second value in each row of the returned array must be
greater than the first value. (There may be duplicate rows,
however.) Sampling of I{K} continues until there are enough
suitable rows.
"""
Nr = len(self.K)
mult = 2.1*Nr/(Nr-1)
Ns = int(mult*N)
K = np.random.choice(list(self.K), (Ns, 2))
K = K[np.flatnonzero(K[:,1] > K[:,0]),:][:N]
Ns = K.shape[0]
if Ns < N:
K = np.row_stack([K, self.pairs_sampled(N-Ns)])
return K
def pairs_all(self):
"""
Returns a 2-D Numpy array of all pairs of separate row indices to
my I{X} array where the second value in each pair is greater
than the first value.
The returned array will have M{N*(N-1)/2} rows and two
columns, where I{N} is the length of my I{K} set of row indices.
"""
K1, K2 = np.meshgrid(list(self.K), list(self.K), indexing='ij')
K12 = np.column_stack([K1.flatten(), K2.flatten()])
return K12[np.flatnonzero(K12[:,1] > K12[:,0])]
def calcerator(self, K, Nr, Np):
"""
Iterates over computationally intensive chunks of processing.
B{TODO}: Figure out how to run in a separate process
(currently stuck on defer.cancel not having a qualname and
thus not being picklable), or put code into a traditional
non-Twisted form and use a ThreadQueue.
"""
if K is None:
if Nr*(Nr-1)/2 < Np:
K1 = self.pairs_all()
else: K1 = self.pairs_sampled(Np)
yield
else: K1 = K
if self.S is None:
XK = self.X[list(self.K),:]
self.S = 1.0 / (np.var(XK, axis=0) + 1E-20)
yield
# Calculate difference
X = self.X[K1[:,0]]
yield
X -= self.X[K1[:,1]]
yield
D = np.square(X)
yield
D *= self.S
yield
D = np.sum(D, axis=1)
yield
# Divide difference by mean SSE to favor lower-SSE history
SSEs = [self.X[K2,0] for K2 in [K1[:,k] for k in (0, 1)]]
D /= np.mean(np.column_stack(SSEs), axis=1)
yield
# Divide difference by a computed amount when the first
# item was never in the population, to keep a substantial
# fraction of the non-population history reserved for
# those who once were in the population
penalize = [1 if k1 in self.Kn else 0 for k1, k2 in K1]
# The penalty increases dramatically if the history comes
# to have more never-population records than those that
# have been in the population
N_neverpop = len(self.Kn)
if N_neverpop:
Kn_penalty = 1 + np.exp(12*(N_neverpop/len(D) - 0.4))
D /= np.choose(penalize, [1, Kn_penalty])
yield
if K is None:
kr = K1[np.argmin(D),0]
self.result(kr)
else: self.result(D)
def done(self, null):
return self.result()
def calculate(self, K, Nr, Np):
self.result = Bag()
d = task.cooperate(self.calcerator(K, Nr, Np)).whenDone()
d.addCallback(self.done)
return d
def __call__(self, Np=None, K=None):
"""
Returns a C{Deferred} that fires with the row index to my I{X}
array of the SSE+values combination that is most expendable
(closest to another one, and not currently in the population).
If I have just a single SSE+value combination, the Deferred
fires with that combination's row index in I{X}. If there are
no legit combinations, it fires with C{None}.
If the maximum number of pairs I{Np} to examine (default
I{Np_max}) is greater than M{N*(N-1)/2}, where I{N} is the
length of my I{K} set of row indices, L{pairs_all} is called
to examine all suitable pairs.
Otherwise, L{pairs_sampled} is called instead and examination
is limited to a random sample of I{Np} suitable pairs. With
the default I{Np_max} of 10000, this occurs at C{N>142}. With
I{Np_max} of 1000, it occurs with C{N>45}. Since the I{N_max}
of L{History} has a default of 1000, L{pairs_sampled} is
what's going to be used in all practical situations.
The similarity is determined from the sum of squared
differences between two rows, divided by the column-wise
variance of all (active) rows.
@keyword Np: Set to the maximum number of pairs to
examine. Default is I{Np_max}.
@keyword K: For unit testing only: Supply a 2-D Numpy array of
pairs of row indices, and the C{Deferred} will fire with
just the sum-of-squares difference between each pair.
"""
Nr = len(self.K)
if Nr == 1:
return defer.succeed(list(self.K)[0])
if Np is None: Np = self.Np_max
#return self.q.call(self.calculate, K, Nr, Np)
return self.calculate(K, Nr, Np)
class History(object):
"""
I maintain a roster of the parameter values and SSEs of
I{Individual} objects that a L{Population} has had and possibly
replaced.
@keyword N_max: The most records I can have in my roster. When the
roster is full, adding a non-duplicative I{Individual} will
bump the highest-SSE one currently in the roster to make
room. The default of 1500 seems like a sensible compromise
between reasonably compact C{.dat} file size and informative
plots.
@ivar names: A sequence of my individuals' parameter names,
supplied as the sole constructor argument.
@ivar X: A 2-D Numpy array of SSEs (first column) and parameter
values (remaining columns) of one individual.
@ivar K: A list of indices to rows of I{X}, each entry in the list
corresponding to a row of I{X}.
@ivar Kp: A set of the values (not indices) of I{K} that are for
individuals currently in the population.
@ivar Kn: A set of the values (not indices) of I{K} that are for
individuals who never were in the population.
@ivar kr: A dict containing row indices, keyed by the hashes of
I{Individual} instances.
"""
N_max = 1500
def __init__(self, names, N_max=None):
"""
C{History(names, N_max=None)}
"""
self.names = names
if N_max: self.N_max = N_max
self.N_total = 0
self.X = np.zeros((self.N_max, len(names)+1), dtype='f4')
self.K = []; self.Kp = set(); self.Kn = set()
self.kr = {}
self._initialize()
def __getstate__(self):
"""
For storage-efficient pickling.
"""
return {
'names': self.names,
'N_max': self.N_max,
'N_total': self.N_total,
'X': seq2str(self.X),
'K': seq2str(self.K, 'u2'),
'Kp': seq2str(list(self.Kp), 'u2'),
'Kn': seq2str(list(self.Kn), 'u2'),
'kr': self.kr,
}
def __setstate__(self, state):
"""
For unpickling.
"""
self.names = state['names']
self.N_max = state['N_max']
self.N_total = state['N_total']
self.X = str2array(state, 'X')
self.K = list(str2array(state, 'K'))
self.Kp = set(str2array(state, 'Kp'))
self.Kn = set(str2array(state, 'Kn'))
self.kr = state['kr']
self._initialize()
def _initialize(self):
self.a = Analysis(self.names, self.X, self.K, self.Kp, self.Kn)
self.cpf = ClosestPairFinder(self.N_max, len(self.names)+1)
for kr in self.K:
if kr in self.Kp: continue
self.cpf.setRow(kr, self.X[kr,:], neverInPop=(kr in self.Kn))
self.dLock = defer.DeferredLock()
def shutdown(self):
return self.dLock.acquire().addCallback(lambda _: self.dLock.release())
def __len__(self):
"""
My length is the number of records in my roster.
B{Note}: Immediate result, not locked! Mostly for unit testing.
"""
return len(self.K)
def __getitem__(self, k):
"""
Access the SSE and parameter values corresponding to index I{k} of
my I{K} list.
B{Note}: Immediate result, not locked! Mostly for unit testing.
"""
kr = self.K[k]
return self.X[kr,:]
def __iter__(self):
"""
I iterate over 1-D Numpy arrays of parameter values in ascending
order of the SSEs they resulted in.
B{Note}: Immediate result, not locked! Mostly for unit testing.
"""
for kr in self.K:
yield self.X[kr,1:]
def clear(self):
"""
Call to have me return to a virginal state with no SSE+values
combinations recorded or considered for removal, an empty
population, and an I{N_total} of zero.
Returns a C{Deferred} that fires when the lock has been
acquired and everything is cleared.
"""
def gotLock():
del self.K[:]
self.Kp.clear()
self.Kn.clear()
self.cpf.clear()
self.kr.clear()
self.N_total = 0
return self.dLock.run(gotLock)
def value_vs_SSE(self, *args, **kw):
"""
Obtains a 1-D Numpy array of the SSEs of my individuals and
matching 1-D Numpy arrays for each of the parameter
values in I{names}.
Waits to acquire the lock and then calls
L{Analysis.value_vs_SSE} on my instance I{a}, returning a
C{Deferred} that fires with the eventual result.
"""
def gotLock():
return self.a.value_vs_SSE(*args, **kw)
return self.dLock.run(gotLock)
def kkr(self, SSE, N):
"""
Returns (1) the index I{k} of my I{K} list where the row index of
the new record should appear in my I{X} array, and (2)
that row index I{kr}.
First, index I{k} is obtained, by seeing where the I{K}
list points to a record with an SSE closest but above the
new one. Then each row index in the I{K} list is examined
to see if the previous row of my I{X} array is
unallocated. If so, that is the row index for the new
record. Otherwise, is the next row of my I{X} array is
unallocated, that is used instead. If both adjacent rows
of I{X} are already allocated, the next row index in the
I{K} list is examined.
If there are no row indices in I{K} that point to a row of
I{X} with an unallocated adjacent row, the row index is
determined to be the current length of I{k}.
B{Note}: With the original for-loop Python, the search for an
unallocated row was very CPU intensive when you get a big
history accumulated:::
# Pick a row index for the new record
for kr in self.K:
if kr > 0 and kr-1 not in self.K:
return k, kr-1
if kr < N-1 and kr+1 not in self.K:
return k, kr+1
return k, N
The reason is that the list was being searched for an item
with every iteration, twice!
The optimized version does this same thing with much more
efficiently, by creating a local (array) copy of I{K} and
sorting it in place. Then only adjacent elements needs to be
inspected with each iteration. (It may be just as fast with a
local sorted list instead of a Numpy array.)
"""
K = np.array(self.K)
# Find the index in K of the row index for the closest
# recorded SSE above i.SSE
k = np.searchsorted(self.X[K,0], SSE)
#--- Pick a row index for the new record ------------------------------
# Sort local array version of K in place
K.sort()
for kk, kr in enumerate(K[1:-1]):
if kr == 0: continue
if kr == N-1: break
if K[kk] != kr-1:
return k, kr-1
if K[kk+2] != kr+1:
return k, kr+1
if K[0] > 0:
return k, K[0]-1
return k, min([N, K[-1]+1])
@defer.inlineCallbacks
def add(self, i, neverInPop=False):
"""
Adds the SSE and parameter values of the supplied individual I{i}
to my roster, unless it has an SSE of C{inf}, in which case it
is ignored.
If the roster is already full, bumps the record deemed most
expendable before adding a record for I{i}. That determination
is made by a call to my L{ClosestPairFinder} instance I{cpf}.
Returns a C{Deferred} that fires with the row index of the new
record when it has been written, or C{None} if no record was
written.
@keyword neverInPop: Set C{True} to have the individual added
without ever having been part of the population.
"""
def writeRecord(k, kr):
"""
Writes a 1-D Numpy array with SSE+values to row I{kr} of my I{X}
array, and inserts the row index I{kr} into my I{K} list
at position I{k}.
"""
SV = np.array([i.SSE] + list(i.values))
self.X[kr,:] = SV
self.K.insert(k, kr)
self.N_total += 1 # Add to the lifetime total count
if neverInPop:
# This row has been added without ever being a
# population member, so cpf considers it along with
# the other non-population entries
self.cpf.setRow(kr, self.X[kr,:], neverInPop=True)
self.Kn.add(kr)
else:
self.Kp.add(kr)
# This row is starting out as a population member, so
# have cpf initially disregard it
self.cpf.clearRow(kr)
# Add to the individual-row map so it can be removed
# from the population later
self.kr[hash(i)] = kr
SSE = i.SSE
if np.isfinite(SSE):
yield self.dLock.acquire()
N = len(self.K)
if N == 0:
# First addition
k, kr = 0, 0
elif N < self.N_max:
# Roster not yet full, no need to search for somebody
# to bump first
k, kr = self.kkr(SSE, N)
else:
# Roster is full, we will need to bump somebody (those
# in the current population are protected and exempt)
# before adding
kr = yield self.cpf()
if kr is not None: self.purge(kr)
k, kr = self.kkr(SSE, N)
writeRecord(k, kr)
self.dLock.release()
else: kr = None
defer.returnValue(kr)
def purge(self, kr):
"""
Purges my history of the record at row index I{kr}.
Removes the row index from my I{K} list and has my I{cpf}
instance of L{ClosestPairFinder} disregard the row, because
it's now gone.
B{Note}: Does not remove the index from the values of my I{kr}
dict, as that is a time-consuming process and the caller can
likely just clear the whole thing anyhow.
"""
if kr not in self.K:
raise IndexError(sub("No row index {} in my K list!", kr))
self.K.remove(kr)
self.cpf.clearRow(kr)
self.Kp.discard(kr) # May already have been discarded with .pop
self.Kn.discard(kr)
def notInPop(self, x):
"""
Call this with an integer row index or an I{Individual} instance
that was added via L{add} to remove its row of my I{X} array
from being considered part of the current population.
"""
def gotLock():
if isinstance(x, int):
kr = x
else:
# Must be an Individual
key = hash(x)
if key not in self.kr: return
kr = self.kr.pop(key)
self.Kp.discard(kr)
# This row is no longer a population member and is thus
# expendable, so have cpf start considering it
self.cpf.setRow(kr, self.X[kr,:])
return self.dLock.run(gotLock)
def purgePop(self):
"""
Purges the history of all members of the current
population. (Presumably, they will get added back again after
re-evaluation.)
"""
def gotLock():
while self.Kp:
kr = self.Kp.pop()
self.purge(kr)
self.kr.clear()
return self.dLock.run(gotLock)
|
import numpy as np
def iou(coord_a, coord_b):
x1_a, y1_a, x2_a, y2_a = coord_a
x1_b, y1_b, x2_b, y2_b = coord_b
x_overlap = max(0, min(x2_a, x2_b) - max(x1_a, x1_b))
y_overlap = max(0, min(y2_a, y2_b) - max(y1_a, y1_b))
intersection = x_overlap * y_overlap
union = (x2_a - x1_a) * (y2_a - y1_a) + (x2_b - x1_b) * (y2_b - y1_b)\
- intersection
return intersection * 1.0 / union
def lr_schedule(lr, lr_factor, epoch_now, epoch_lr):
"""
Learning rate schedule with respect to epoch
lr: float, initial learning rate
lr_factor: float, decreasing factor every epoch_lr
epoch_now: int, the current epoch
epoch_lr: int, decreasing every epoch_lr
return: lr, float, scheduled learning rate.
"""
return lr * np.power(lr_factor, epoch_now // epoch_lr)
def img_acc(out, coords, cfg, iou_thred=1e-5):
batch_size, n_anchors, n_anchors, _, _ = out.shape
n_scales = len(cfg['scales'])
n_ratios = len(cfg['ratios'])
out = out.reshape(batch_size, n_anchors, n_anchors, n_scales, n_ratios, 5)
offset = (cfg['stride'] - 1.0) / 2
x_anchors = np.arange(
offset, offset + cfg['stride'] * (n_anchors - 1) + 1, cfg['stride']
)
y_anchors = np.array(x_anchors)
idcs_max = np.argmax(out[..., 0].reshape(batch_size, -1), axis=1)
idcs_x, idcs_y, idcs_s, idcs_r = np.unravel_index(
idcs_max, (n_anchors, n_anchors, n_scales, n_ratios))
acc = 0.0
for i in range(batch_size):
x_anchor = x_anchors[idcs_x[i]]
y_anchor = y_anchors[idcs_y[i]]
scale = cfg['scales'][idcs_s[i]]
ratio = cfg['ratios'][idcs_r[i]]
w_anchor = np.round(scale / np.sqrt(ratio))
h_anchor = np.round(scale * np.sqrt(ratio))
if cfg['use_regress']:
out_i = out[i, idcs_x[i], idcs_y[i], idcs_s[i], idcs_r[i]]
x_bbox = out_i[1] * w_anchor + x_anchor
y_bbox = out_i[2] * h_anchor + y_anchor
w_bbox = np.exp(out_i[3]) * w_anchor
h_bbox = np.exp(out_i[4]) * h_anchor
else:
x_bbox = x_anchor
y_bbox = y_anchor
w_bbox = w_anchor
h_bbox = h_anchor
x1_bbox, y1_bbox = x_bbox - w_bbox / 2.0, y_bbox - h_bbox / 2.0
x2_bbox, y2_bbox = x_bbox + w_bbox / 2.0, y_bbox + h_bbox / 2.0
coord_bbox = (x1_bbox, y1_bbox, x2_bbox, y2_bbox)
for coord in coords[i]:
if tuple(coord) == (0, 0, 0, 0):
break
if iou(coord_bbox, coord) >= iou_thred:
acc += 1.0
break
return acc / batch_size
def froc(froc_data, n_imgs, n_gt_boxes, iou_thred=0.5,
fps_img=[0.5, 1, 2, 4, 8, 16]):
M, N, _ = froc_data.shape
scores = froc_data[:, :, 0].flatten()
idcs_sorted = scores.argsort()[::-1]
idcs_img, idcs_prop = np.unravel_index(idcs_sorted,
dims=(M, N))
tp = 0
fp = 0
tps = []
fps = (np.sort(fps_img) * n_imgs).tolist()
gt_boxes_hitted = set()
# for each proposal sorted by their scores
for i in range(len(idcs_sorted)):
idx_img = idcs_img[i]
idx_prop = idcs_prop[i]
overlap = froc_data[idx_img, idx_prop, 1]
gt_box_id = froc_data[idx_img, idx_prop, 2]
# not hit
if overlap < iou_thred:
fp += 1
if fp < fps[0]:
continue
tps.append(tp)
fps.pop(0)
if len(fps) == 0:
break
# hit
else:
# new hit
if (idx_img, gt_box_id) not in gt_boxes_hitted:
tp += 1
gt_boxes_hitted.add((idx_img, gt_box_id))
sens = np.array(tps) / n_gt_boxes
FROC = sens.mean()
return (FROC, sens)
|
#
# Copyright <NAME> 2013
#
"""
Code to deal with MEME and MEME file formats.
"""
import biopsy
from Bio import SeqIO
from itertools import chain
from collections import defaultdict
def name_matcher(name):
"Create a function that matches strings in lower case"
name = name.lower()
def matcher(other):
return -1 != other.lower().find(name)
return matcher
def match_factor(matcher, factor):
"Does the factor's name or synonyms match the matcher function"
if matcher(factor.name):
return True
else:
for synonym in factor.synonyms:
if matcher(synonym):
return True
def find_matrices(name):
matcher = name_matcher(name)
for matrix in biopsy.transfac.Matrix.all():
for facc in matrix.factors:
factor = facc.link.entry
if factor.gene is not None and 'MOUSE' == factor.gene.entry.species:
if match_factor(matcher, factor):
yield matrix, factor
break
def logo(dist, tag, dir):
"Generate a logo with the given tag in the given directory."
import weblogolib as W
import corebio.seq as S
data = W.LogoData.from_counts(S.unambiguous_dna_alphabet, dist)
options = W.LogoOptions(
logo_title=tag,
color_scheme=W.colorscheme.nucleotide,
show_xaxis=False,
show_yaxis=True,
show_fineprint=False,
)
format = W.LogoFormat(data, options)
filename = 'logo-%s' % tag
#W.eps_formatter(data, format, open(os.path.join(dir, '%s.eps' % filename), 'w'))
W.png_formatter(data, format, open(os.path.join(dir, '%s.png' % filename), 'w'))
#W.pdf_formatter(data, format, open(os.path.join(dir, '%s.pdf' % filename), 'w'))
def dist_for_pssm(pssm):
"@return: The PSSM's frequencies."
import numpy as N
return N.array(
[
[pssm.dists[i].get_freq(b) for b in xrange(4)]
for i in xrange(len(pssm.dists))
]
)
def look_for_matrices(names):
for name in names:
print name
for matrix, factor in find_matrices(name):
print matrix.acc, matrix.name, factor.acc, factor.name
logo(dist_for_pssm(biopsy.get_pssm(str(matrix.acc))), '%s-%s' % (name, matrix.acc), 'logos')
def write_minimal_meme_matrix(out, acc):
"""
The minimal MEME format for a motif looks something like::
MOTIF crp
letter-probability matrix: alength= 4 w= 19 nsites= 17 E= 4.1e-009
0.000000 0.176471 0.000000 0.823529
0.000000 0.058824 0.647059 0.294118
0.000000 0.058824 0.000000 0.941176
0.176471 0.000000 0.764706 0.058824
0.823529 0.058824 0.000000 0.117647
0.294118 0.176471 0.176471 0.352941
0.294118 0.352941 0.235294 0.117647
0.117647 0.235294 0.352941 0.294118
0.529412 0.000000 0.176471 0.294118
0.058824 0.235294 0.588235 0.117647
0.176471 0.235294 0.294118 0.294118
0.000000 0.058824 0.117647 0.823529
0.058824 0.882353 0.000000 0.058824
0.764706 0.000000 0.176471 0.058824
0.058824 0.882353 0.000000 0.058824
0.823529 0.058824 0.058824 0.058824
0.176471 0.411765 0.058824 0.352941
0.411765 0.000000 0.000000 0.588235
0.352941 0.058824 0.000000 0.588235
"""
pssm_info = biopsy.get_pssm(acc)
print >>out, (
"MOTIF %s %s\n"
"letter-probability matrix: alength= 4 w= %d nsites= %d E= %e\n"
"%s\n"
) % (
biopsy.get_pssm_name(acc), acc,
len(pssm_info.dists), pssm_info.sites, 0.,
"\n".join(
' '.join(("%.6f" % dist.get_freq(b)) for b in xrange(4))
for dist in pssm_info.dists
)
)
def write_minimal_meme(out, accs, bg_freqs=None):
"""
The header looks something like::
MEME version 4
ALPHABET= ACGT
strands: + -
Background letter frequencies
A 0.303 C 0.183 G 0.209 T 0.306
"""
if bg_freqs is None:
bg_freqs = [.25, .25, .25, .25]
print >>out, (
"MEME version 4\n\n"
"ALPHABET= ACGT\n\n"
"strands: + -\n\n"
"Background letter frequencies\n"
"A %.3f C %.3f G %.3f T %.3f \n\n"
) % (
bg_freqs[0], bg_freqs[1], bg_freqs[2], bg_freqs[3],
)
for acc in accs:
write_minimal_meme_matrix(out, acc)
print >>out, ""
|
<gh_stars>0
from datetime import datetime
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ProcessPoolExecutor
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from cryton.etc import config
from cryton.lib.util import logger
SCHED_MAX_THREADS = 20
SCHED_MAX_PROCESSES = 5
JOB_MAX_INSTANCES = 3
class SchedulerService:
def __init__(self):
db_url = f"postgresql://{config.DB_USERNAME}:{config.DB_PASSWORD}@{config.DB_HOST}/{config.DB_NAME}"
jobstores = {
'default': SQLAlchemyJobStore(url=db_url)
}
executors = {
'default': {'type': 'threadpool', 'max_workers': SCHED_MAX_THREADS},
'processpool': ProcessPoolExecutor(max_workers=SCHED_MAX_PROCESSES)
}
job_defaults = {
'coalesce': False,
'max_instances': JOB_MAX_INSTANCES
}
self.scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults,
timezone=pytz.timezone(config.TIME_ZONE))
self.scheduler.start()
self.state = 'RUNNING'
def __del__(self):
logger.logger.debug("SCHEDULER DELETED", scheduler=self.scheduler.state)
def exposed_add_job(self, execute_function: str, function_args: list, start_time: datetime) -> str:
"""
:param execute_function: Function/method to be scheduled
:param function_args: Function arguments
:param start_time: Function start time
:return: Scheduled job ID
"""
logger.logger.debug("Scheduling job in scheduler service", execute_function=execute_function)
job_scheduled = self.scheduler.add_job(
execute_function, 'date', misfire_grace_time=config.MISFIRE_GRACE_TIME, run_date=str(start_time),
args=function_args, max_instances=100
)
return job_scheduled.id
def exposed_add_repeating_job(self, execute_function: str, seconds: int) -> str:
"""
:param execute_function: Function/method to be scheduled
:param seconds: Function interval in seconds
:return: Scheduled job ID
"""
logger.logger.debug("Scheduling repeating job in scheduler service", execute_function=execute_function)
job_scheduled = self.scheduler.add_job(
execute_function, 'interval', seconds=seconds
)
return job_scheduled.id
def exposed_reschedule_job(self, job_id: str):
logger.logger.debug("Rescheduling job in scheduler service", job_id=job_id)
return self.scheduler.reschedule_job(job_id)
def exposed_pause_job(self, job_id: str):
logger.logger.debug("Pausing job in scheduler service", job_id=job_id)
return self.scheduler.pause_job(job_id)
def exposed_resume_job(self, job_id: str):
logger.logger.debug("Resuming job in scheduler service", job_id=job_id)
return self.scheduler.resume_job(job_id)
def exposed_remove_job(self, job_id: str):
logger.logger.debug("Removing job in scheduler service", job_id=job_id)
return self.scheduler.remove_job(job_id)
def exposed_get_job(self, job_id: str):
logger.logger.debug("Getting job in scheduler service", job_id=job_id)
return self.scheduler.get_job(job_id)
def exposed_get_jobs(self):
logger.logger.debug("Getting multiple jobs in scheduler service")
return self.scheduler.get_jobs()
def exposed_pause_scheduler(self):
logger.logger.debug("Pausing scheduler service")
return self.scheduler.pause()
def exposed_resume_scheduler(self):
logger.logger.debug("Resuming scheduler service")
return self.scheduler.resume()
def health_check(self):
return 0
|
_codes = (
( 200, 'Thunderstorm', 'Thunderstorm with light rain', '11d' ),
( 201, 'Thunderstorm', 'Thunderstorm with rain', '11d' ),
( 202, 'Thunderstorm', 'Thunderstorm with heavy rain', '11d' ),
( 210, 'Thunderstorm', 'Tight thunderstorm', '11d' ),
( 211, 'Thunderstorm', 'Thunderstorm', '11d' ),
( 212, 'Thunderstorm', 'Heavy thunderstorm', '11d' ),
( 221, 'Thunderstorm', 'Ragged thunderstorm', '11d' ),
( 230, 'Thunderstorm', 'Thunderstorm with light drizzle', '11d' ),
( 231, 'Thunderstorm', 'Thunderstorm with drizzle', '11d' ),
( 232, 'Thunderstorm', 'Thunderstorm with heavy drizzle', '11d' ),
( 233, 'Thunderstorm', 'Thunderstorm with hail', None ),
( 300, 'Drizzle', 'Light intensity drizzle', '09d' ),
( 301, 'Drizzle', 'Drizzle', '09d' ),
( 302, 'Drizzle', 'Heavy intensity drizzle', '09d' ),
( 310, 'Drizzle', 'Light intensity drizzle rain', '09d' ),
( 311, 'Drizzle', 'Drizzle rain', '09d' ),
( 312, 'Drizzle', 'Heavy intensity drizzle rain', '09d' ),
( 313, 'Drizzle', 'Shower rain and drizzle', '09d' ),
( 314, 'Drizzle', 'Heavy shower rain and drizzle', '09d' ),
( 321, 'Drizzle', 'Shower drizzle', '09d' ),
( 500, 'Rain', 'Light rain', '10d' ),
( 501, 'Rain', 'Moderate rain', '10d' ),
( 502, 'Rain', 'Heavy intensity rain', '10d' ),
( 503, 'Rain', 'Very heavy rain', '10d' ),
( 504, 'Rain', 'Extreme rain', '10d' ),
( 511, 'Rain', 'Freezing rain', '13d' ),
( 520, 'Rain', 'Light intensity shower rain', '09d' ),
( 521, 'Rain', 'Shower rain', '09d' ),
( 522, 'Rain', 'Heavy intensity shower rain', '09d' ),
( 531, 'Rain', 'Ragged shower rain', '09d' ),
( 600, 'Snow', 'Light snow', '3d' ),
( 601, 'Snow', 'Snow', '13d' ),
( 602, 'Snow', 'Heavy snow', '13d' ),
( 610, 'Snow', 'Mix snow/rain', None ),
( 611, 'Snow', 'Sleet', '13d' ),
( 612, 'Snow', 'Light shower sleet', '13d' ),
#612 Heavy sleet Weather API Day Sleets05d, Weather API Night Sleets05n
( 613, 'Snow', 'Shower sleet', '13d' ),
( 615, 'Snow', 'Light rain and snow', '13d' ),
( 616, 'Snow', 'Rain and snow', '13d' ),
( 620, 'Snow', 'Light shower snow', '13d' ),
( 621, 'Snow', 'Shower snow', '13d' ),
( 622, 'Snow', 'Heavy shower snow', '13d' ),
#623 Flurries Weather API Day Flurriess06d, Weather API Night Flurriess06n
#700 Mist Weather API Day Mista01d, Weather API Night Mista01n
( 701, 'Mist', 'Mist', '50d' ),
( 711, 'Smoke', 'Smoke', '50d' ),
( 721, 'Haze', 'Haze', '50d' ),
( 731, 'Dust', 'Sand/dust whirl', '50d' ),
( 741, 'Fog', 'Fog', '50d' ),
#751 Freezing Fog Weather API Day Freezing foga06d, Weather API Night freezing foga06n
( 751, 'Sand', 'Sand', '50d' ),
( 761, 'Dust', 'Dust', '50d' ),
( 762, 'Ash', 'Volcanic ash', '50d' ),
( 771, 'Squall', 'Squalls', '50d' ),
( 781, 'Tornado', 'Tornado', '50d' ),
( 800, 'Clear', 'Clear sky', '01d 01n' ),
( 801, 'Clouds', 'Few clouds: 11-25%', '02d 02n' ),
( 802, 'Clouds', 'Scattered clouds: 25-50%', '03d 03n' ),
( 803, 'Clouds', 'Broken clouds: 51-84%', '04d 04n' ),
( 804, 'Clouds', 'Overcast clouds: 85-100%', '04d 04n' ),
#900 Unknown Precipitation Weather API Day Unknown Precipitationu00d, Weather API Night Unknown Precipitationu00n
)
"""
Weather icon-codes from
* darksky.net
* weatherbit.io
* openweathermap.org
"""
"""
Weather icon characters from
* openweathermap.org
* weatherbit.io
"""
"""
Darksky icons
clear-day, clear-night, rain, snow, sleet, wind, fog, cloudy, partly-cloudy-day, or partly-cloudy-night
"""
|
<reponame>lschmelzeisen/wikidata-history-analyzer
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import gzip
from pathlib import Path
from typing import Iterator, Optional, Sequence, Set
from wikidata_history_analyzer.datamodel.wikidata_rdf_revision import (
WikidataRdfRevision,
WikidataRdfTriple,
)
from wikidata_history_analyzer.datamodel.wikidata_revision import WikidataRevision
class WikidataIncrementalRdfRevision(WikidataRevision):
triple_deletions: Sequence[WikidataRdfTriple]
triple_additions: Sequence[WikidataRdfTriple]
@classmethod
def from_rdf_revisions(
cls, revisions: Iterator[WikidataRdfRevision]
) -> Iterator[WikidataIncrementalRdfRevision]:
state: Set[WikidataRdfTriple] = set()
last_page_id = -1
for revision in revisions:
if last_page_id != revision.page_id:
last_page_id = revision.page_id
state = set()
triples_set = set(revision.triples)
triple_deletions = state - triples_set
triple_additions = triples_set - state
# TODO: double check if we can replace this with `state = triples_set`. Main
# concern would be, that auto-generated IDs of blank triples would not line
# up. If it can't be replaced, document why.
state -= triple_deletions
state |= triple_additions
yield WikidataIncrementalRdfRevision(
prefixed_title=revision.prefixed_title,
namespace=revision.namespace,
page_id=revision.page_id,
redirect=revision.redirect,
revision_id=revision.revision_id,
parent_revision_id=revision.parent_revision_id,
timestamp=revision.timestamp,
contributor=revision.contributor,
contributor_id=revision.contributor_id,
is_minor=revision.is_minor,
comment=revision.comment,
content_model=revision.content_model,
format=revision.format,
sha1=revision.sha1,
triple_deletions=sorted(triple_deletions),
triple_additions=sorted(triple_additions),
)
@classmethod
def iter_path(cls, dir_: Path, page_id: int) -> Path:
return dir_ / (str(page_id) + ".jsonl.gz")
@classmethod
def save_iter_to_file(
cls,
revisions: Iterator[WikidataIncrementalRdfRevision],
file_or_dir: Path,
page_id: Optional[int] = None,
) -> None:
path = cls.iter_path(file_or_dir, page_id) if page_id else file_or_dir
path.parent.mkdir(parents=True, exist_ok=True)
with gzip.open(path, "wt", encoding="UTF-8") as fout:
for revision in revisions:
assert revision.page_id == page_id
fout.write(revision.json() + "\n")
@classmethod
def load_iter_from_file(
cls,
file_or_dir: Path,
page_id: Optional[int] = None,
) -> Iterator[WikidataIncrementalRdfRevision]:
path = cls.iter_path(file_or_dir, page_id) if page_id else file_or_dir
with gzip.open(path, "rt", encoding="UTF-8") as fin:
for line in fin:
yield cls.parse_raw(line)
|
# coding=utf-8
__author__ = "<NAME>"
# Taken and adapted from:
# https://github.com/khammernik/sigmanet/blob/master/reconstruction/common/mytorch/models/sn.py
import numpy as np
import torch
def matrix_invert(xx, xy, yx, yy):
det = xx * yy - xy * yx
return yy.div(det), -xy.div(det), -yx.div(det), xx.div(det)
class ComplexInstanceNorm(torch.nn.Module):
"""Motivated by 'Deep Complex Networks' (https://arxiv.org/pdf/1705.09792.pdf)"""
def __init__(self):
super(ComplexInstanceNorm, self).__init__()
self.mean = 0
self.cov_xx_half = 1 / np.sqrt(2)
self.cov_xy_half = 0
self.cov_yx_half = 0
self.cov_yy_half = 1 / np.sqrt(2)
def complex_instance_norm(self, x, eps=1e-5):
"""Operates on images x of size [nBatch, nSmaps, nFE, nPE, 2]"""
x_combined = torch.sum(x, dim=1, keepdim=True)
mean = x_combined.mean(dim=(1, 2, 3), keepdim=True)
x_m = x - mean
self.mean = mean
self.complex_pseudocovariance(x_m)
def complex_pseudocovariance(self, data):
"""Data variable hast to be already mean-free! Operates on images x of size [nBatch, nSmaps, nFE, nPE, 2]"""
if data.size(-1) != 2:
raise AssertionError
shape = data.shape
# compute number of elements
N = shape[2] * shape[3]
# separate real/imaginary channel
re, im = torch.unbind(data, dim=-1)
# dimensions is now length of original shape - 1 (because channels are seperated)
dim = list(range(1, len(shape) - 1))
# compute covariance entries. cxy = cyx
cxx = (re * re).sum(dim=dim, keepdim=True) / (N - 1)
cyy = (im * im).sum(dim=dim, keepdim=True) / (N - 1)
cxy = (re * im).sum(dim=dim, keepdim=True) / (N - 1)
# Eigenvalue decomposition C = V*S*inv(V)
# compute eigenvalues
s1 = (cxx + cyy) / 2 - torch.sqrt((cxx + cyy) ** 2 / 4 - cxx * cyy + cxy**2)
s2 = (cxx + cyy) / 2 + torch.sqrt((cxx + cyy) ** 2 / 4 - cxx * cyy + cxy**2)
# compute eigenvectors
v1x = s1 - cyy
v1y = cxy
v2x = s2 - cyy
v2y = cxy
# normalize eigenvectors
norm1 = torch.sqrt(torch.sum(v1x * v1x + v1y * v1y, dim=dim, keepdim=True))
norm2 = torch.sqrt(torch.sum(v2x * v2x + v2y * v2y, dim=dim, keepdim=True))
v1x = v1x.div(norm1)
v1y = v1y.div(norm1)
v2x = v2x.div(norm2)
v2y = v2y.div(norm2)
# now we need the sqrt of the covariance matrix.
# C^{-0.5} = V * sqrt(S) * inv(V)
det = v1x * v2y - v2x * v1y
s1 = torch.sqrt(s1).div(det)
s2 = torch.sqrt(s2).div(det)
self.cov_xx_half = v1x * v2y * s1 - v1y * v2x * s2
self.cov_yy_half = v1x * v2y * s2 - v1y * v2x * s1
self.cov_xy_half = v1x * v2x * (s2 - s1)
self.cov_yx_half = v1y * v2y * (s1 - s2)
def forward(self, input):
return self.normalize(input)
def set_normalization(self, input):
mean = torch.tensor([torch.mean(input).item()]).to(input)
self.complex_pseudocovariance(input - mean)
self.mean = mean.unsqueeze(1).unsqueeze(1).unsqueeze(1)
self.cov_xx_half = self.cov_xx_half.view(-1, 1, 1, 1)
self.cov_xy_half = self.cov_xy_half.view(-1, 1, 1, 1)
self.cov_yx_half = self.cov_yx_half.view(-1, 1, 1, 1)
self.cov_yy_half = self.cov_yy_half.view(-1, 1, 1, 1)
def normalize(self, x):
x_m = x - self.mean
re, im = torch.unbind(x_m, dim=-1)
cov_xx_half_inv, cov_xy_half_inv, cov_yx_half_inv, cov_yy_half_inv = matrix_invert(
self.cov_xx_half, self.cov_xy_half, self.cov_yx_half, self.cov_yy_half
)
x_norm_re = cov_xx_half_inv * re + cov_xy_half_inv * im
x_norm_im = cov_yx_half_inv * re + cov_yy_half_inv * im
img = torch.stack([x_norm_re, x_norm_im], dim=-1)
img = img.clamp(-6, 6)
return img
def unnormalize(self, x):
re, im = torch.unbind(x, dim=-1)
x_unnorm_re = self.cov_xx_half * re + self.cov_xy_half * im
x_unnorm_im = self.cov_yx_half * re + self.cov_yy_half * im
return torch.stack([x_unnorm_re, x_unnorm_im], dim=-1) + self.mean
class ComplexNormWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.complex_instance_norm = ComplexInstanceNorm()
def forward(self, input):
# compute complex instance norm on sample of size [nBatch, nSmaps, nFE, nPE, 2]
self.complex_instance_norm.set_normalization(input)
output = self.complex_instance_norm.normalize(input)
# re-shape data from [nBatch, nSmaps, nFE, nPE, 2] to [nBatch*nSmaps, 2, nFE, nPE]
shp = output.shape
output = output.view(shp[0] * shp[1], *shp[2:]).permute(0, 3, 1, 2)
# apply denoising
output = self.model(output)
# re-shape data from [nBatch*nSmaps, 2, nFE, nPE]
# to [nBatch, nSmaps, nFE, nPE, 2]
output = output.permute(0, 2, 3, 1).view(*shp)
# unnormalize
output = self.complex_instance_norm.unnormalize(output)
return output
class SensitivityNetwork(torch.nn.Module):
"""Sensitivity network with data term based on forward and adjoint containing the sensitivity maps"""
def __init__(
self,
num_iter,
model,
datalayer,
shared_params=True,
save_space=False,
reset_cache=False,
):
super().__init__()
self.shared_params = shared_params
self.num_iter = 1 if self.shared_params else num_iter
self.num_iter_total = num_iter
self.is_trainable = [True] * num_iter
# setup the modules
self.gradR = torch.nn.ModuleList([ComplexNormWrapper(model) for _ in range(self.num_iter)])
self.gradD = torch.nn.ModuleList([datalayer for _ in range(self.num_iter)])
self.save_space = save_space
if self.save_space:
self.forward = self.forward_save_space
self.reset_cache = reset_cache
def forward(self, x, y, smaps, mask):
x_all = [x]
x_half_all = []
if self.shared_params:
num_iter = self.num_iter_total
else:
num_iter = min(np.where(self.is_trainable)[0][-1] + 1, self.num_iter)
for i in range(num_iter):
x_thalf = x - self.gradR[i % self.num_iter](x)
x = self.gradD[i % self.num_iter](x_thalf, y, smaps, mask)
x_all.append(x)
x_half_all.append(x_thalf)
return x_all[-1]
def forward_save_space(self, x, y, smaps, mask):
if self.shared_params:
num_iter = self.num_iter_total
else:
num_iter = min(np.where(self.is_trainable)[0][-1] + 1, self.num_iter)
for i in range(num_iter):
x_thalf = x - self.gradR[i % self.num_iter](x)
x = self.gradD[i % self.num_iter](x_thalf, y, smaps, mask)
# would run out of memory at test time
# if this is False for some cases
if self.reset_cache:
torch.cuda.empty_cache()
torch.backends.cuda.cufft_plan_cache.clear()
return x
def freeze(self, i):
"""freeze parameter of cascade i"""
for param in self.gradR[i].parameters():
param.require_grad_ = False
self.is_trainable[i] = False
def unfreeze(self, i):
"""freeze parameter of cascade i"""
for param in self.gradR[i].parameters():
param.require_grad_ = True
self.is_trainable[i] = True
def freeze_all(self):
"""freeze parameter of cascade i"""
for i in range(self.num_iter):
self.freeze(i)
def unfreeze_all(self):
"""freeze parameter of cascade i"""
for i in range(self.num_iter):
self.unfreeze(i)
def copy_params(self, src_i, trg_j):
"""copy i-th cascade net parameters to j-th cascade net parameters"""
src_params = self.gradR[src_i].parameters()
trg_params = self.gradR[trg_j].parameters()
for trg_param, src_param in zip(trg_params, src_params):
trg_param.data.copy_(src_param.data)
def stage_training_init(self):
self.freeze_all()
self.unfreeze(0)
print(self.is_trainable)
def stage_training_transition_i(self, copy=False):
if self.shared_params:
return
# if all unlocked, don't do anything
if not np.all(self.is_trainable):
for i in range(self.num_iter):
# if last cascade is reached, unlock all
if i == self.num_iter - 1:
self.unfreeze_all()
break
# freeze current i, unlock next. copy parameter if specified
if self.is_trainable[i]:
self.freeze(i)
self.unfreeze(i + 1)
if copy:
self.copy_params(i, i + 1)
break
|
import gc
import time
import logging
import aiohttp
import asyncio
import socket
import pytest
from aiohttp.test_utils import TestServer
import aioamqp
import aioamqp.channel
import aioamqp.protocol
import aiohttp.web
import asyncpg
from docker.client import DockerClient
from docker.utils import kwargs_from_env
from async_generator import yield_, async_generator
from aioapp.app import Application
# отключаем логи ошибок, чтоб не засирать вывод
# logging.basicConfig(level=logging.CRITICAL)
logging.basicConfig(
format='%(asctime)-15s %(message)s %(filename)s %(lineno)s %(funcName)s')
aioamqp.channel.logger.level = logging.CRITICAL
aioamqp.protocol.logger.level = logging.CRITICAL
@pytest.fixture(scope='session')
def event_loop():
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
gc.collect()
loop.close()
@pytest.fixture(scope='session')
def loop(event_loop):
return event_loop
def get_free_port():
sock = socket.socket()
try:
sock.bind(('', 0))
return sock.getsockname()[1]
finally:
sock.close()
@pytest.fixture(scope='session')
async def postgres(loop):
tag = 'latest'
image = 'postgres'
host = '127.0.0.1'
timeout = 60
unused_tcp_port = get_free_port()
client = DockerClient(version='auto', **kwargs_from_env())
client.images.pull(image, tag=tag)
print('Stating %s:%s on %s:%s' % (image, tag, host, unused_tcp_port))
cont = client.containers.run('%s:%s' % (image, tag), detach=True,
ports={'5432/tcp': ('0.0.0.0',
unused_tcp_port)})
try:
start_time = time.time()
conn = None
while conn is None:
if start_time + timeout < time.time():
raise Exception("Initialization timeout, failed to "
"initialize postgresql container")
try:
conn = await asyncpg.connect(
'postgresql://postgres@%s:%s/postgres'
'' % (host, unused_tcp_port),
loop=loop)
except Exception as e:
time.sleep(.1)
await conn.close()
yield (host, unused_tcp_port)
finally:
cont.kill()
cont.remove()
@pytest.fixture(scope='session')
async def rabbit(loop, rabbit_override_addr):
if rabbit_override_addr:
yield rabbit_override_addr.split(':')
return
tag = '3.7.1'
image = 'rabbitmq:{}'.format(tag)
host = '0.0.0.0'
timeout = 60
unused_tcp_port = get_free_port()
client = DockerClient(version='auto', **kwargs_from_env())
print('Stating rabbitmq %s on %s:%s' % (image, host, unused_tcp_port))
cont = client.containers.run(image, detach=True,
ports={'5672/tcp': ('0.0.0.0',
unused_tcp_port)})
try:
start_time = time.time()
conn = transport = None
while conn is None:
if start_time + timeout < time.time():
raise Exception("Initialization timeout, failed t o "
"initialize rabbitmq container")
try:
transport, conn = await aioamqp.connect(host, unused_tcp_port,
loop=loop)
except Exception:
time.sleep(.1)
await conn.close()
transport.close()
yield (host, unused_tcp_port)
finally:
cont.kill()
cont.remove()
@pytest.fixture
@async_generator
async def client(loop):
async with aiohttp.ClientSession(loop=loop) as client:
await yield_(client)
@pytest.fixture(scope='session')
def tracer_server(loop):
"""Factory to create a TestServer instance, given an app.
test_server(app, **kwargs)
"""
servers = []
async def go(**kwargs):
def tracer_handle(request):
return aiohttp.web.Response(text='', status=201)
app = aiohttp.web.Application()
app.router.add_post('/api/v2/spans', tracer_handle)
server = TestServer(app, port=None)
await server.start_server(loop=loop, **kwargs)
servers.append(server)
return server
yield go
async def finalize():
while servers:
await servers.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
async def app(tracer_server, loop):
tracer_host = '127.0.0.1'
tracer_port = (await tracer_server()).port
tracer_addr = 'http://%s:%s/' % (tracer_host, tracer_port)
app = Application(loop=loop)
app.setup_logging(tracer_driver='zipkin', tracer_addr=tracer_addr,
tracer_name='test')
yield app
await app.run_shutdown()
|
import functools
import subprocess
try:
# This fails when the code is executed directly and not as a part of python package installation,
# I definitely need a better way to handle this.
from adbe.output_helper import print_error, print_error_and_exit, print_verbose
except ImportError:
# This works when the code is executed directly.
from output_helper import print_error, print_error_and_exit, print_verbose
_adb_prefix = 'adb'
_IGNORED_LINES = [
'WARNING: linker: libdvm.so has text relocations. This is wasting memory and is a security risk. Please fix.'
]
# Below version 24, if an adb shell command fails, then it still has an incorrect exit code of 0.
_MIN_VERSION_ABOVE_WHICH_ADB_SHELL_RETURNS_CORRECT_EXIT_CODE = 24
def get_adb_prefix():
return _adb_prefix
def set_adb_prefix(adb_prefix):
# pylint: disable=global-statement
global _adb_prefix
_adb_prefix = adb_prefix
def get_adb_shell_property(property_name, device_serial=None):
_, stdout, _ = execute_adb_shell_command2('getprop %s' % property_name, device_serial=device_serial)
return stdout
def execute_adb_shell_command2(adb_cmd, piped_into_cmd=None, ignore_stderr=False, device_serial=None):
return execute_adb_command2('shell %s' % adb_cmd, piped_into_cmd=piped_into_cmd,
ignore_stderr=ignore_stderr, device_serial=device_serial)
def execute_adb_command2(adb_cmd, piped_into_cmd=None, ignore_stderr=False, device_serial=None) -> [int, str, str]:
"""
:param adb_cmd: command to run inside the adb shell (so, don't prefix it with "adb")
:param piped_into_cmd: command to pipe the output of this command into
:param ignore_stderr: if true, errors in stderr stream will be ignored while piping commands
:param device_serial: device serial to send this command to (in case of multiple devices)
:return: (return_code, stdout, stderr)
"""
adb_prefix = _adb_prefix
if device_serial:
adb_prefix = '%s -s %s' % (adb_prefix, device_serial)
final_cmd = ('%s %s' % (adb_prefix, adb_cmd))
if piped_into_cmd:
final_cmd = '%s | %s' % (final_cmd, piped_into_cmd)
print_verbose("Executing \"%s\"" % final_cmd)
ps1 = subprocess.Popen(final_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = ps1.communicate()
return_code = ps1.returncode
try:
stdout_data = stdout_data.decode('utf-8')
except UnicodeDecodeError:
print_error('Unable to decode data as UTF-8, defaulting to printing the binary data')
stderr_data = stderr_data.decode('utf-8')
_check_for_adb_not_found_error(stderr_data)
_check_for_more_than_one_device_error(stderr_data)
_check_for_device_not_found_error(stderr_data)
if not ignore_stderr and stderr_data:
print_error(stderr_data)
if not stdout_data:
return return_code, None, stderr_data
# stdout_data is not None
if isinstance(stdout_data, bytes):
print_verbose("Result is \"%s\"" % stdout_data)
return return_code, stdout_data, stderr_data
# str for Python 3, this used to be unicode type for python 2
elif isinstance(stdout_data, str):
output = ''
first_line = True
for line in stdout_data.split('\n'):
line = line.strip()
if not line:
continue
if line in _IGNORED_LINES:
continue
if first_line:
output += line
first_line = False
else:
output += '\n' + line
print_verbose("Result is \"%s\"" % output)
return return_code, output, stderr_data
else:
print_error_and_exit('stdout_data is weird type: %s' % type(stdout_data))
def execute_adb_shell_command(adb_cmd, piped_into_cmd=None, ignore_stderr=False, device_serial=None):
_, stdout, _ = execute_adb_command2(
'shell %s' % adb_cmd, piped_into_cmd, ignore_stderr, device_serial=device_serial)
return stdout
def execute_file_related_adb_shell_command(adb_shell_cmd, file_path, piped_into_cmd=None, ignore_stderr=False,
device_serial=None):
file_not_found_message = 'No such file or directory'
is_a_directory_message = 'Is a directory' # Error when someone tries to delete a dir with "-r"
adb_cmds_prefix = []
run_as_package = get_package(file_path)
if run_as_package:
adb_cmds_prefix.append('shell run-as %s' % run_as_package)
if root_required_to_access_file(file_path):
adb_cmds_prefix.append('shell su root')
# As a backup, still try with a plain-old access, if run-as is not possible and root is not available.
adb_cmds_prefix.append('shell')
stdout = None
attempt_count = 1
for adb_cmd_prefix in adb_cmds_prefix:
print_verbose('Attempt %d/%d: "%s"' % (attempt_count, len(adb_cmds_prefix), adb_cmd_prefix))
attempt_count += 1
adb_cmd = '%s %s' % (adb_cmd_prefix, adb_shell_cmd)
return_code, stdout, stderr = execute_adb_command2(adb_cmd, piped_into_cmd, ignore_stderr,
device_serial=device_serial)
if stderr.find(file_not_found_message) >= 0:
print_error('File not found: %s' % file_path)
return stderr
if stderr.find(is_a_directory_message) >= 0:
print_error('%s is a directory' % file_path)
return stderr
api_version = get_device_android_api_version()
if api_version >= _MIN_VERSION_ABOVE_WHICH_ADB_SHELL_RETURNS_CORRECT_EXIT_CODE and return_code == 0:
return stdout
return stdout
# Gets the package name given a file path.
# Eg. if the file is in /data/data/com.foo/.../file1 then package is com.foo
# Or if the file is in /data/user/0/com.foo/.../file1 then package is com.foo
def get_package(file_path):
if not file_path:
return None
if file_path.startswith('/data/data/'):
items = file_path.split('/')
if len(items) >= 4:
run_as_package = items[3]
return run_as_package
# Handles the new multi-user mode
if file_path.startswith('/data/user/'):
items = file_path.split('/')
if len(items) >= 5:
run_as_package = items[4]
return run_as_package
return None
# adb shell getprop ro.build.version.sdk
@functools.lru_cache(maxsize=10)
def get_device_android_api_version(device_serial=None):
version_string = get_adb_shell_property('ro.build.version.sdk', device_serial=device_serial)
if version_string is None:
print_error_and_exit('Unable to get Android device version, is it still connected?')
return int(version_string)
def root_required_to_access_file(remote_file_path):
if not remote_file_path:
return False
elif remote_file_path.startswith('/data/local/tmp'):
return False
elif remote_file_path.startswith('/sdcard'):
return False
return True
def _check_for_adb_not_found_error(stderr_data):
if not stderr_data:
return
stderr_data = stderr_data.strip()
if stderr_data.endswith('%s: command not found' % _adb_prefix):
message = 'ADB (Android debug bridge) command not found.\n'
message += 'Install ADB via https://developer.android.com/studio/releases/platform-tools.html'
print_error_and_exit(message)
def _check_for_more_than_one_device_error(stderr_data):
if not stderr_data:
return
for line in stderr_data.split('\n'):
line = line.strip()
if line:
print_verbose(line)
if line.find('error: more than one') != -1:
message = ''
message += 'More than one device/emulator are connected.\n'
message += 'Please select a device by providing the serial ID (-s parameter).\n'
message += 'You can list all connected devices/emulators via \"devices\" subcommand.'
print_error_and_exit(message)
def _check_for_device_not_found_error(stderr_data):
if not stderr_data:
return
for line in stderr_data.split('\n'):
line = line.strip()
if line:
print_verbose(line)
if line.find('error: device') > -1 and line.find('not found') > -1:
print_error_and_exit(line)
def toggle_screen():
return execute_adb_shell_command2("input keyevent KEYCODE_POWER")
def set_device_id(device_id):
"""
Make :param device_id: as main device to use
Primary use-case: scripting
Command line equivalent: "-s :param device_id:"
"""
old_adb_prefix = get_adb_prefix()
if '-s' in old_adb_prefix:
old_device = old_adb_prefix.split('-s ')[1]
if ' ' in old_device:
# Case: device ID is not the last argument
old_device = old_adb_prefix.split('-s')[1].split(' ')[0]
print_verbose('Switching from %s to %s' % (old_device, device_id))
old_adb_prefix.replace(old_device, device_id)
print_verbose('Setting device ID to %s' % device_id)
set_adb_prefix("%s -s %s" % (old_adb_prefix, device_id))
|
<filename>server/auvsi_suas/views/odlcs.py<gh_stars>0
"""Odlcs view."""
from PIL import Image
import io
import json
import logging
import os
import os.path
import re
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.models.odlc import Odlc
from auvsi_suas.proto import interop_admin_api_pb2
from auvsi_suas.proto import interop_api_pb2
from auvsi_suas.views.decorators import require_login
from auvsi_suas.views.decorators import require_superuser
from auvsi_suas.views.json import ProtoJsonEncoder
from django.contrib.auth.models import User
from django.core.files.images import ImageFile
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.utils.decorators import method_decorator
from django.views.generic import View
from google.protobuf import json_format
from sendfile import sendfile
logger = logging.getLogger(__name__)
ALPHANUMERIC_RE = re.compile(r"^[A-Z0-9]$")
ODLC_MAX = 20 # Limit in the rules.
ODLC_BUFFER = 2 # Buffer for swaps.
ODLC_UPLOAD_LIMIT = (ODLC_MAX + ODLC_BUFFER) * 2 # Account for auto/not.
def odlc_to_proto(odlc):
"""Converts an ODLC into protobuf format."""
odlc_proto = interop_api_pb2.Odlc()
odlc_proto.id = odlc.pk
odlc_proto.mission = odlc.mission.pk
odlc_proto.type = odlc.odlc_type
if odlc.location is not None:
odlc_proto.latitude = odlc.location.latitude
odlc_proto.longitude = odlc.location.longitude
if odlc.orientation is not None:
odlc_proto.orientation = odlc.orientation
if odlc.shape is not None:
odlc_proto.shape = odlc.shape
if odlc.alphanumeric:
odlc_proto.alphanumeric = odlc.alphanumeric
if odlc.shape_color is not None:
odlc_proto.shape_color = odlc.shape_color
if odlc.alphanumeric_color is not None:
odlc_proto.alphanumeric_color = odlc.alphanumeric_color
if odlc.description:
odlc_proto.description = odlc.description
odlc_proto.autonomous = odlc.autonomous
return odlc_proto
def validate_odlc_proto(odlc_proto):
"""Validates ODLC proto, raising ValueError if invalid."""
if not odlc_proto.HasField('mission'):
raise ValueError('ODLC mission is required.')
try:
MissionConfig.objects.get(pk=odlc_proto.mission)
except MissionConfig.DoesNotExist:
raise ValueError('Mission for ODLC does not exist.')
if not odlc_proto.HasField('type'):
raise ValueError('ODLC type is required.')
if odlc_proto.HasField('latitude') != odlc_proto.HasField('longitude'):
raise ValueError('Must specify both latitude and longitude.')
if odlc_proto.HasField('latitude') and (odlc_proto.latitude < -90 or
odlc_proto.latitude > 90):
raise ValueError('Invalid latitude "%f", must be -90 <= lat <= 90' %
odlc_proto.latitude)
if odlc_proto.HasField('longitude') and (odlc_proto.longitude < -180 or
odlc_proto.longitude > 180):
raise ValueError('Invalid longitude "%s", must be -180 <= lat <= 180' %
odlc_proto.longitude)
if (odlc_proto.HasField('alphanumeric') and
ALPHANUMERIC_RE.fullmatch(odlc_proto.alphanumeric) is None):
raise ValueError('Alphanumeric is invalid.')
def update_odlc_from_proto(odlc, odlc_proto):
"""Sets fields of the ODLC from the proto format."""
odlc.mission_id = odlc_proto.mission
odlc.odlc_type = odlc_proto.type
if odlc_proto.HasField('latitude') and odlc_proto.HasField('longitude'):
if odlc.location is None:
l = GpsPosition(
latitude=odlc_proto.latitude, longitude=odlc_proto.longitude)
l.save()
odlc.location = l
else:
odlc.location.latitude = odlc_proto.latitude
odlc.location.longitude = odlc_proto.longitude
odlc.location.save()
else:
# Don't delete underlying GPS position in case it's shared by admin.
# Just unreference it.
odlc.location = None
if odlc_proto.HasField('orientation'):
odlc.orientation = odlc_proto.orientation
else:
odlc.orientation = None
if odlc_proto.HasField('shape'):
odlc.shape = odlc_proto.shape
else:
odlc.shape = None
if odlc_proto.HasField('alphanumeric'):
odlc.alphanumeric = odlc_proto.alphanumeric
else:
odlc.alphanumeric = ''
if odlc_proto.HasField('shape_color'):
odlc.shape_color = odlc_proto.shape_color
else:
odlc.shape_color = None
if odlc_proto.HasField('alphanumeric_color'):
odlc.alphanumeric_color = odlc_proto.alphanumeric_color
else:
odlc.alphanumeric_color = None
if odlc_proto.HasField('description'):
odlc.description = odlc_proto.description
else:
odlc.description = ''
if odlc_proto.HasField('autonomous'):
odlc.autonomous = odlc_proto.autonomous
else:
odlc.autonomous = False
class Odlcs(View):
"""POST new odlc."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(Odlcs, self).dispatch(*args, **kwargs)
def get(self, request):
# Restrict ODLCs to those for user, and optionally a mission.
odlcs = Odlc.objects.filter(user=request.user)
if 'mission' in request.GET:
try:
mission_id = int(request.GET['mission'])
except:
return HttpResponseBadRequest('Provided invalid mission ID.')
odlcs = odlcs.filter(mission=mission_id)
# Limit serving to 100 odlcs to prevent slowdown and isolation problems.
odlcs = odlcs.all()[:100]
odlc_protos = [odlc_to_proto(o) for o in odlcs]
return HttpResponse(
json.dumps(odlc_protos, cls=ProtoJsonEncoder),
content_type="application/json")
def post(self, request):
odlc_proto = interop_api_pb2.Odlc()
try:
json_format.Parse(request.body, odlc_proto)
except Exception as e:
return HttpResponseBadRequest(
'Failed to parse request. Error: %s' % str(e))
# Validate ODLC proto fields.
try:
validate_odlc_proto(odlc_proto)
except ValueError as e:
return HttpResponseBadRequest(str(e))
# Cannot set ODLC ID on a post.
if odlc_proto.HasField('id'):
return HttpResponseBadRequest(
'Cannot specify ID for POST request.')
# Check that there aren't too many ODLCs uploaded already.
odlc_count = Odlc.objects.filter(user=request.user).filter(
mission=odlc_proto.mission).count()
if odlc_count >= ODLC_UPLOAD_LIMIT:
return HttpResponseBadRequest(
'Reached upload limit for ODLCs for mission.')
# Build the ODLC object from the request.
odlc = Odlc()
odlc.user = request.user
update_odlc_from_proto(odlc, odlc_proto)
odlc.save()
return HttpResponse(
json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def find_odlc(request, pk):
"""Lookup requested Odlc model.
Only the request's user's odlcs will be returned.
Args:
request: Request object
pk: Odlc primary key
Raises:
Odlc.DoesNotExist: pk not found
ValueError: Odlc not owned by this user.
"""
odlc = Odlc.objects.get(pk=pk)
# We only let users get their own odlcs, unless a superuser.
if odlc.user == request.user or request.user.is_superuser:
return odlc
else:
raise ValueError("Accessing odlc %d not allowed" % pk)
class OdlcsId(View):
"""Get or update a specific odlc."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(OdlcsId, self).dispatch(*args, **kwargs)
def get(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
return HttpResponse(
json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def put(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
odlc_proto = interop_api_pb2.Odlc()
try:
json_format.Parse(request.body, odlc_proto)
except Exception as e:
return HttpResponseBadRequest(
'Failed to parse request. Error: %s' % str(e))
# Validate ODLC proto fields.
try:
validate_odlc_proto(odlc_proto)
except ValueError as e:
return HttpResponseBadRequest(str(e))
# ID provided in proto must match object.
if odlc_proto.HasField('id') and odlc_proto.id != odlc.pk:
return HttpResponseBadRequest('ID in request does not match URL.')
# Update the ODLC object from the request.
update_odlc_from_proto(odlc, odlc_proto)
odlc.update_last_modified()
odlc.save()
return HttpResponse(
json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def delete(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
# Remember the thumbnail path so we can delete it from disk.
thumbnail = odlc.thumbnail.path if odlc.thumbnail else None
odlc.delete()
if thumbnail:
try:
os.remove(thumbnail)
except OSError as e:
logger.warning("Unable to delete thumbnail: %s", e)
return HttpResponse("Odlc deleted.")
class OdlcsIdImage(View):
"""Get or add/update odlc image."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(OdlcsIdImage, self).dispatch(*args, **kwargs)
def get(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
if not odlc.thumbnail or not odlc.thumbnail.name:
return HttpResponseNotFound('Odlc %s has no image' % pk)
# Tell sendfile to serve the thumbnail.
return sendfile(request, odlc.thumbnail.path)
def post(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
# Request body is the file
f = io.BytesIO(request.body)
# Verify that this is a valid image
try:
i = Image.open(f)
i.verify()
except IOError as e:
return HttpResponseBadRequest(str(e))
if i.format not in ['JPEG', 'PNG']:
return HttpResponseBadRequest(
'Invalid image format %s, only JPEG and PNG allowed' %
(i.format))
# Clear thumbnail review state.
if odlc.thumbnail_approved is not None:
odlc.thumbnail_approved = None
# Save the thumbnail, note old path.
old_path = odlc.thumbnail.path if odlc.thumbnail else None
odlc.thumbnail.save('%d.%s' % (odlc.pk, i.format), ImageFile(f))
# ODLC has been modified.
odlc.update_last_modified()
odlc.save()
# Check whether old thumbnail should be deleted. Ignore errors.
if old_path and odlc.thumbnail.path != old_path:
try:
os.remove(old_path)
except OSError as e:
logger.warning("Unable to delete old thumbnail: %s", e)
return HttpResponse("Image uploaded.")
def put(self, request, pk):
"""We simply make PUT do the same as POST."""
return self.post(request, pk)
def delete(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
if not odlc.thumbnail or not odlc.thumbnail.path:
return HttpResponseNotFound('Odlc %s has no image' % pk)
# Clear thumbnail review state.
if odlc.thumbnail_approved is not None:
odlc.thumbnail_approved = None
odlc.save()
path = odlc.thumbnail.path
# Remove the thumbnail from the odlc.
# Note that this does not delete it from disk!
odlc.thumbnail.delete()
try:
os.remove(path)
except OSError as e:
logger.warning("Unable to delete thumbnail: %s", e)
return HttpResponse("Image deleted.")
def odlc_to_review_proto(odlc):
"""Converts an ODLC into a review proto."""
review_proto = interop_admin_api_pb2.OdlcReview()
review_proto.odlc.CopyFrom(odlc_to_proto(odlc))
review_proto.last_modified_timestamp = odlc.last_modified_time.isoformat()
if odlc.thumbnail_approved is not None:
review_proto.thumbnail_approved = odlc.thumbnail_approved
if odlc.description_approved is not None:
review_proto.description_approved = odlc.description_approved
return review_proto
def update_odlc_from_review_proto(odlc, review_proto):
"""Sets fields of the ODLC from the review."""
if review_proto.HasField('thumbnail_approved'):
odlc.thumbnail_approved = review_proto.thumbnail_approved
else:
odlc.thumbnail_approved = False
if review_proto.HasField('description_approved'):
odlc.description_approved = review_proto.description_approved
else:
odlc.description_approved = False
class OdlcsAdminReview(View):
"""Get or update review status for odlcs."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(OdlcsAdminReview, self).dispatch(*args, **kwargs)
def get(self, request):
"""Gets all of the odlcs ready for review."""
# Get all odlcs which have a thumbnail to review.
odlcs = [t for t in Odlc.objects.all() if t.thumbnail]
# Sort odlcs by last edit time.
odlcs.sort(key=lambda t: t.last_modified_time)
# Convert to review protos.
odlc_review_protos = [odlc_to_review_proto(odlc) for odlc in odlcs]
return HttpResponse(
json.dumps(odlc_review_protos, cls=ProtoJsonEncoder),
content_type="application/json")
def put(self, request, pk):
"""Updates the review status of a odlc."""
review_proto = interop_admin_api_pb2.OdlcReview()
try:
json_format.Parse(request.body, review_proto)
except Exception:
return HttpResponseBadRequest('Failed to parse review proto.')
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
update_odlc_from_review_proto(odlc, review_proto)
odlc.save()
return HttpResponse(
json_format.MessageToJson(odlc_to_review_proto(odlc)),
content_type="application/json")
|
<reponame>lovyan03/esp-idf<gh_stars>1000+
"""
Command line tool to assign tests to CI test jobs.
"""
import argparse
import errno
import json
import os
import re
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader as Loader # type: ignore
import gitlab_api
from tiny_test_fw.Utility import CIAssignTest
try:
from idf_py_actions.constants import PREVIEW_TARGETS, SUPPORTED_TARGETS
except ImportError:
SUPPORTED_TARGETS = []
PREVIEW_TARGETS = []
IDF_PATH_FROM_ENV = os.getenv('IDF_PATH')
class IDFCaseGroup(CIAssignTest.Group):
LOCAL_BUILD_DIR = None
BUILD_JOB_NAMES = None
@classmethod
def get_artifact_index_file(cls):
assert cls.LOCAL_BUILD_DIR
if IDF_PATH_FROM_ENV:
artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, cls.LOCAL_BUILD_DIR, 'artifact_index.json')
else:
artifact_index_file = 'artifact_index.json'
return artifact_index_file
class IDFAssignTest(CIAssignTest.AssignTest):
def __init__(self, test_case_path, ci_config_file, case_group=IDFCaseGroup):
super(IDFAssignTest, self).__init__(test_case_path, ci_config_file, case_group)
def format_build_log_path(self, parallel_num):
return '{}/list_job_{}.json'.format(self.case_group.LOCAL_BUILD_DIR, parallel_num)
def create_artifact_index_file(self, project_id=None, pipeline_id=None):
if project_id is None:
project_id = os.getenv('CI_PROJECT_ID')
if pipeline_id is None:
pipeline_id = os.getenv('CI_PIPELINE_ID')
gitlab_inst = gitlab_api.Gitlab(project_id)
artifact_index_list = []
for build_job_name in self.case_group.BUILD_JOB_NAMES:
job_info_list = gitlab_inst.find_job_id(build_job_name, pipeline_id=pipeline_id)
for job_info in job_info_list:
parallel_num = job_info['parallel_num'] or 1 # Could be None if "parallel_num" not defined for the job
raw_data = gitlab_inst.download_artifact(job_info['id'],
[self.format_build_log_path(parallel_num)])[0]
build_info_list = [json.loads(line) for line in raw_data.decode().splitlines()]
for build_info in build_info_list:
build_info['ci_job_id'] = job_info['id']
artifact_index_list.append(build_info)
artifact_index_file = self.case_group.get_artifact_index_file()
try:
os.makedirs(os.path.dirname(artifact_index_file))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
with open(artifact_index_file, 'w') as f:
json.dump(artifact_index_list, f)
class ExampleGroup(IDFCaseGroup):
SORT_KEYS = CI_JOB_MATCH_KEYS = ['env_tag', 'target']
LOCAL_BUILD_DIR = 'build_examples' # type: ignore
EXAMPLE_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_examples_cmake_{}'.format(target) for target in EXAMPLE_TARGETS] # type: ignore
class TestAppsGroup(ExampleGroup):
LOCAL_BUILD_DIR = 'build_test_apps'
TEST_APP_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_test_apps_{}'.format(target) for target in TEST_APP_TARGETS] # type: ignore
class ComponentUTGroup(TestAppsGroup):
LOCAL_BUILD_DIR = 'build_component_ut'
UNIT_TEST_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_component_ut_{}'.format(target) for target in UNIT_TEST_TARGETS] # type: ignore
class UnitTestGroup(IDFCaseGroup):
SORT_KEYS = ['test environment', 'tags', 'chip_target']
CI_JOB_MATCH_KEYS = ['test environment']
LOCAL_BUILD_DIR = 'tools/unit-test-app/builds' # type: ignore
UNIT_TEST_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_esp_idf_tests_cmake_{}'.format(target) for target in UNIT_TEST_TARGETS] # type: ignore
MAX_CASE = 50
ATTR_CONVERT_TABLE = {
'execution_time': 'execution time'
}
DUT_CLS_NAME = {
'esp32': 'ESP32DUT',
'esp32s2': 'ESP32S2DUT',
'esp32s3': 'ESP32S3DUT',
'esp32c3': 'ESP32C3DUT',
'esp8266': 'ESP8266DUT',
}
def __init__(self, case):
super(UnitTestGroup, self).__init__(case)
for tag in self._get_case_attr(case, 'tags'):
self.ci_job_match_keys.add(tag)
@staticmethod
def _get_case_attr(case, attr):
if attr in UnitTestGroup.ATTR_CONVERT_TABLE:
attr = UnitTestGroup.ATTR_CONVERT_TABLE[attr]
return case[attr]
def add_extra_case(self, case):
""" If current group contains all tags required by case, then add succeed """
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
if key == 'tags':
if set(self._get_case_attr(case, key)).issubset(set(self.filters[key])):
continue
break
else:
self.case_list.append(case)
added = True
return added
def _create_extra_data(self, test_cases, test_function):
"""
For unit test case, we need to copy some attributes of test cases into config file.
So unit test function knows how to run the case.
"""
case_data = []
for case in test_cases:
one_case_data = {
'config': self._get_case_attr(case, 'config'),
'name': self._get_case_attr(case, 'summary'),
'reset': self._get_case_attr(case, 'reset'),
'timeout': self._get_case_attr(case, 'timeout'),
}
if test_function in ['run_multiple_devices_cases', 'run_multiple_stage_cases']:
try:
one_case_data['child case num'] = self._get_case_attr(case, 'child case num')
except KeyError as e:
print('multiple devices/stages cases must contains at least two test functions')
print('case name: {}'.format(one_case_data['name']))
raise e
case_data.append(one_case_data)
return case_data
def _divide_case_by_test_function(self):
"""
divide cases of current test group by test function they need to use
:return: dict of list of cases for each test functions
"""
case_by_test_function = {
'run_multiple_devices_cases': [],
'run_multiple_stage_cases': [],
'run_unit_test_cases': [],
}
for case in self.case_list:
if case['multi_device'] == 'Yes':
case_by_test_function['run_multiple_devices_cases'].append(case)
elif case['multi_stage'] == 'Yes':
case_by_test_function['run_multiple_stage_cases'].append(case)
else:
case_by_test_function['run_unit_test_cases'].append(case)
return case_by_test_function
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
target = self._get_case_attr(self.case_list[0], 'chip_target')
if target:
overwrite = {
'dut': {
'package': 'ttfw_idf',
'class': self.DUT_CLS_NAME[target],
}
}
else:
overwrite = dict()
case_by_test_function = self._divide_case_by_test_function()
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
'CaseConfig': [
{
'name': test_function,
'extra_data': self._create_extra_data(test_cases, test_function),
'overwrite': overwrite,
} for test_function, test_cases in case_by_test_function.items() if test_cases
],
}
return output_data
class ExampleAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^example_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(ExampleAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ExampleGroup)
class TestAppsAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^test_app_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(TestAppsAssignTest, self).__init__(test_case_path, ci_config_file, case_group=TestAppsGroup)
class ComponentUTAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^component_ut_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(ComponentUTAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ComponentUTGroup)
class UnitTestAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^UT_.+')
def __init__(self, test_case_path, ci_config_file):
super(UnitTestAssignTest, self).__init__(test_case_path, ci_config_file, case_group=UnitTestGroup)
def search_cases(self, case_filter=None):
"""
For unit test case, we don't search for test functions.
The unit test cases is stored in a yaml file which is created in job build-idf-test.
"""
def find_by_suffix(suffix, path):
res = []
for root, _, files in os.walk(path):
for file in files:
if file.endswith(suffix):
res.append(os.path.join(root, file))
return res
def get_test_cases_from_yml(yml_file):
try:
with open(yml_file) as fr:
raw_data = yaml.load(fr, Loader=Loader)
test_cases = raw_data['test cases']
except (IOError, KeyError):
return []
else:
return test_cases
test_cases = []
for path in self.test_case_paths:
if os.path.isdir(path):
for yml_file in find_by_suffix('.yml', path):
test_cases.extend(get_test_cases_from_yml(yml_file))
elif os.path.isfile(path) and path.endswith('.yml'):
test_cases.extend(get_test_cases_from_yml(path))
else:
print('Test case path is invalid. Should only happen when use @bot to skip unit test.')
# filter keys are lower case. Do map lower case keys with original keys.
try:
key_mapping = {x.lower(): x for x in test_cases[0].keys()}
except IndexError:
key_mapping = dict()
if case_filter:
for key in case_filter:
filtered_cases = []
for case in test_cases:
try:
mapped_key = key_mapping[key]
# bot converts string to lower case
if isinstance(case[mapped_key], str):
_value = case[mapped_key].lower()
else:
_value = case[mapped_key]
if _value in case_filter[key]:
filtered_cases.append(case)
except KeyError:
# case don't have this key, regard as filter success
filtered_cases.append(case)
test_cases = filtered_cases
# sort cases with configs and test functions
# in later stage cases with similar attributes are more likely to be assigned to the same job
# it will reduce the count of flash DUT operations
test_cases.sort(key=lambda x: x['config'] + x['multi_stage'] + x['multi_device'])
return test_cases
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('case_group', choices=['example_test', 'custom_test', 'unit_test', 'component_ut'])
parser.add_argument('test_case_paths', nargs='+', help='test case folder or file')
parser.add_argument('-c', '--config', help='gitlab ci config file')
parser.add_argument('-o', '--output', help='output path of config files')
parser.add_argument('--pipeline_id', '-p', type=int, default=None, help='pipeline_id')
parser.add_argument('--test-case-file-pattern', help='file name pattern used to find Python test case files')
args = parser.parse_args()
SUPPORTED_TARGETS.extend(PREVIEW_TARGETS)
test_case_paths = [os.path.join(IDF_PATH_FROM_ENV, path) if not os.path.isabs(path) else path for path in args.test_case_paths] # type: ignore
args_list = [test_case_paths, args.config]
if args.case_group == 'example_test':
assigner = ExampleAssignTest(*args_list)
elif args.case_group == 'custom_test':
assigner = TestAppsAssignTest(*args_list)
elif args.case_group == 'unit_test':
assigner = UnitTestAssignTest(*args_list)
elif args.case_group == 'component_ut':
assigner = ComponentUTAssignTest(*args_list)
else:
raise SystemExit(1) # which is impossible
if args.test_case_file_pattern:
assigner.CI_TEST_JOB_PATTERN = re.compile(r'{}'.format(args.test_case_file_pattern))
assigner.assign_cases()
assigner.output_configs(args.output)
assigner.create_artifact_index_file()
|
#!/usr/bin/env python
"""
Toytree viewer.
Created Jan 2021
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, io
import numpy as np
import string
try:
from PySide2 import QtCore
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import QObject, Signal, Slot
except:
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import toytree, toyplot
class TreeViewer(QMainWindow):
"""Phylogeny viewer with toytree"""
def __init__(self):
QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("Toytree-viewer")
self.setGeometry(QtCore.QRect(200, 200, 800, 600))
self.setMinimumHeight(150)
self.main = QWidget(self)
self.main.setFocus()
self.setCentralWidget(self.main)
self.add_widgets()
self.create_menu(self)
self.tree = None
self.width = 400
self.height = 500
self.colors = {}
self.default_style = {
"layout":'r',
"edge_type": 'p',
"edge_style": {
"stroke": 'black',
"stroke-width": 2,
},
"tip_labels": True,
"tip_labels_align": True,
"tip_labels_colors": 'black',
"tip_labels_style": {
"font-size": "14px"
},
"node_labels": False,
"node_sizes": 10,
"node_colors": toytree.colors[2],
"node_markers":"c",
"use_edge_lengths":True,
}
self.style = self.default_style.copy()
#self.test_tree(10)
return
def test_tree(self, n=None):
"""Load a test tree"""
if n==None:
n, ok = QInputDialog().getInt(self, "Test tree",
"Nodes:", 10)
if not ok:
return
self.set_tree(self.random_tree(n=n))
self.height = 200+self.tree.ntips*10
self.update()
return
def random_tree(self, n=12):
"""Make a random tree"""
tre = toytree.rtree.coaltree(n)
## assign random edge lengths and supports to each node
for node in tre.treenode.traverse():
node.dist = np.random.exponential(1)
node.support = int(np.random.uniform(50, 100))
return tre
def save_data(self):
"""Save layers"""
data = tools.get_attributes(self)
data['tree'] = self.tree
return data
def load_data(self, data):
"""Load saved layers"""
try:
self.set_tree(data['tree'])
tools.set_attributes(self, data)
except:
pass
self.update()
return
def create_menu(self, parent):
"""Menu bar"""
self.menubar = self.menuBar()
self.file_menu = QMenu('File', parent)
self.file_menu.addAction('Import Tree', self.load_tree)
self.file_menu.addAction('Load Test Tree', self.test_tree)
self.file_menu.addAction('Export Image', self.export_image)
self.menubar.addMenu(self.file_menu)
self.tree_menu = QMenu('Tree', parent)
self.tree_menu.addAction('Show Unrooted', self.unroot_tree)
self.tree_menu.addAction('Reset Format', self.reset_style)
self.menubar.addMenu(self.tree_menu)
return
def add_widgets(self):
"""Add widgets"""
vbox = QVBoxLayout(self.main)
self.splitter = QSplitter()
vbox.addWidget(self.splitter)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setSizes([300,100])
self.splitter.setStretchFactor(1,0)
#layout.addWidget(self.main)
from PySide2.QtWebEngineWidgets import QWebEngineView
self.browser = QWebEngineView()
self.browser.setMinimumSize(200,200)
self.splitter.addWidget(self.browser)
toolswidget = QWidget()
self.splitter.addWidget(toolswidget)
l = QVBoxLayout(toolswidget)
self.zoomslider = w = QSlider(QtCore.Qt.Horizontal)
w.setSingleStep(5)
w.setMinimum(5)
w.setMaximum(50)
w.setValue(10)
l.addWidget(w)
w.valueChanged.connect(self.zoom)
btn = QPushButton('Set Format')
l.addWidget(btn)
btn.clicked.connect(self.tree_style_options)
t = self.tipitems = QTreeWidget()
t.setHeaderItem(QTreeWidgetItem(["name","visible"]))
t.setColumnWidth(0, 200)
t.setSelectionMode(QAbstractItemView.ExtendedSelection)
t.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
t.customContextMenuRequested.connect(self.show_tree_menu)
l.addWidget(t)
return
def show_tree_menu(self, pos):
"""Show right cick tree menu"""
item = self.tipitems.itemAt( pos )
menu = QMenu(self.tipitems)
colorAction = menu.addAction("Set Color")
rootAction = menu.addAction("Root On")
dropAction = menu.addAction("Drop Tips")
action = menu.exec_(self.tipitems.mapToGlobal(pos))
if action == rootAction:
self.root_tree()
elif action == colorAction:
self.set_color()
elif action == dropAction:
self.drop_tips()
def load_tree(self, filename):
options = QFileDialog.Options()
filter = "newick files (*.newick);;All files (*.*)"
filename, _ = QFileDialog.getOpenFileName(self,"Open tree file",
"",filter=filter,selectedFilter =filter, options=options)
if not filename:
return
self.set_tree(toytree.tree(filename))
return
def set_tree(self, tree):
"""Set a new tree"""
self.tree = tree
self.colors = {}
self.style['tip_labels_colors'] = 'black'
self.tipitems.clear()
for t in self.tree.get_tip_labels():
item = QTreeWidgetItem(self.tipitems)
item.setCheckState(1, QtCore.Qt.Checked)
item.setText(0, t)
return
def update(self):
"""Update the plot"""
if self.tree==None:
return
#set colors
colorlist = [self.colors[tip] if tip in self.colors else "black" for tip in self.tree.get_tip_labels()]
self.style['tip_labels_colors'] = colorlist
canvas,axes,mark = self.tree.draw(
width=self.width,
height=self.height,
scalebar=True, **self.style)
toyplot.html.render(canvas, "temp.html")
with open('temp.html', 'r') as f:
html = f.read()
self.browser.setHtml(html)
self.canvas = canvas
return
def root_tree(self):
item = self.tipitems.selectedItems()[0]
row = self.tipitems.selectedIndexes()[0].row()
name = item.text(0)
self.tree = self.tree.root(name).ladderize()
self.update()
return
def unroot_tree(self):
self.tree = self.tree.unroot().ladderize()
self.update()
return
def export_image(self):
"""Save tree as image"""
options = QFileDialog.Options()
filter = "png files (*.png);;pdf files (*.pdf);;All files (*.*)"
filename, _ = QFileDialog.getSaveFileName(self,"Save Project",
"",filter=filter,selectedFilter =filter, options=options)
if not filename:
return
ext = os.path.splitext(filename)
print (ext)
from toyplot import png
png.render(self.canvas, filename, width=(4, "inches"))
return
def zoom(self):
zoom = self.zoomslider.value()/10
self.browser.setZoomFactor(zoom)
def tree_style_options(self):
fonts = ['%spx' %i for i in range (6,28)]
markers = ['o','s','d','^','>']
nlabels = ['','idx','support']
tip_labels_style = self.style['tip_labels_style']
opts = {
'layout': {'type':'combobox','default':self.style['layout'],'items':['r','d','c']},
'edge_type': {'type':'combobox','default':self.style['edge_type'],'items':['p','c']},
'tip_labels':{'type':'checkbox','default':self.style['tip_labels'] },
'tip_labels_align':{'type':'checkbox','default':self.style['tip_labels_align'] },
'node_labels':{'type':'combobox','default':self.style['node_labels'],'items': nlabels},
'node_sizes':{'type':'spinbox','default':self.style['node_sizes'],'range':(2,20),'interval':1},
'node_markers': {'type':'combobox','default':self.style['node_markers'],'items':markers},
'font_size':{'type':'combobox','default':tip_labels_style['font-size'],'items':fonts},
'width':{'type':'entry','default':self.width},
'height':{'type':'entry','default':self.height,},
}
dlg = MultipleInputDialog(self, opts, title='Tree Style', width=300)
dlg.exec_()
if not dlg.accepted:
return False
kwds = dlg.values
self.set_style(kwds)
self.update()
return
def set_style(self, kwds):
omit=['width','height','font_size']
for k in kwds:
if k not in omit:
self.style[k] = kwds[k]
if kwds['node_labels'] == '':
self.style['node_labels'] = False
self.style['tip_labels_style']['font-size'] = kwds['font_size']
self.width = kwds['width']
self.height = kwds['height']
self.tree = self.tree.ladderize()
return
def reset_style(self):
self.style = self.default_style
self.colors = {}
print (self.style)
self.update()
def set_color(self, kind='text'):
items = self.tipitems.selectedItems()
names = [i.text(0) for i in items]
qcolor = QColorDialog.getColor()
for item in items:
item.setBackground(0 , qcolor)
for name in names:
if kind == 'text':
self.colors[name] = qcolor.name()
elif kind == 'node':
self.node_colors[name] = qcolor.name()
self.update()
return
def drop_tips(self):
items = self.tipitems.selectedItems()
names = [i.text(0) for i in items]
#for name in names:
self.tree = self.tree.drop_tips(names=names).ladderize()
self.update()
return
class MultipleInputDialog(QDialog):
"""Qdialog with multiple inputs"""
def __init__(self, parent, options=None, title='Input', width=400, height=200):
super(MultipleInputDialog, self).__init__(parent)
self.values = None
self.accepted = False
self.setMinimumSize(width, height)
self.setWindowTitle(title)
dialog, self.widgets = dialogFromOptions(self, options)
vbox = QVBoxLayout(self)
vbox.addWidget(dialog)
buttonbox = QDialogButtonBox(self)
buttonbox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
buttonbox.button(QDialogButtonBox.Ok).clicked.connect(self.accept)
buttonbox.button(QDialogButtonBox.Cancel).clicked.connect(self.close)
vbox.addWidget(buttonbox)
self.show()
return self.values
def accept(self):
self.values = getWidgetValues(self.widgets)
self.accepted = True
self.close()
return
def dialogFromOptions(parent, opts, sections=None,
sticky='news', wrap=2, section_wrap=2):
"""Get Qt widgets dialog from a dictionary of options"""
sizepolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizepolicy.setHorizontalStretch(1)
sizepolicy.setVerticalStretch(0)
style = '''
QLabel {
font-size: 12px;
}
QWidget {
max-width: 130px;
min-width: 30px;
font-size: 14px;
}
QPlainTextEdit {
max-height: 80px;
}
'''
if sections == None:
sections = {'options': opts.keys()}
widgets = {}
dialog = QWidget(parent)
dialog.setSizePolicy(sizepolicy)
l = QGridLayout(dialog)
l.setSpacing(2)
l.setAlignment(QtCore.Qt.AlignLeft)
scol=1
srow=1
for s in sections:
row=1
col=1
f = QGroupBox()
f.setSizePolicy(sizepolicy)
f.setTitle(s)
#f.resize(50,100)
#f.sizeHint()
l.addWidget(f,srow,scol)
gl = QGridLayout(f)
gl.setAlignment(QtCore.Qt.AlignTop)
srow+=1
#gl.setSpacing(10)
for o in sections[s]:
label = o
val = None
opt = opts[o]
if 'label' in opt:
label = opt['label']
val = opt['default']
t = opt['type']
lbl = QLabel(label)
gl.addWidget(lbl,row,col)
lbl.setStyleSheet(style)
if t == 'combobox':
w = QComboBox()
w.addItems(opt['items'])
#w.view().setMinListWidth(100)
try:
w.setCurrentIndex(opt['items'].index(str(opt['default'])))
except:
w.setCurrentIndex(0)
elif t == 'entry':
w = QLineEdit()
w.setText(str(val))
elif t == 'textarea':
w = QPlainTextEdit()
#w.setSizePolicy(sizepolicy)
w.insertPlainText(str(val))
elif t == 'slider':
w = QSlider(QtCore.Qt.Horizontal)
s,e = opt['range']
w.setTickInterval(opt['interval'])
w.setSingleStep(opt['interval'])
w.setMinimum(s)
w.setMaximum(e)
w.setTickPosition(QSlider.TicksBelow)
w.setValue(val)
elif t == 'spinbox':
if type(val) is float:
w = QDoubleSpinBox()
else:
w = QSpinBox()
w.setValue(val)
if 'range' in opt:
min,max=opt['range']
w.setRange(min,max)
w.setMinimum(min)
if 'interval' in opt:
w.setSingleStep(opt['interval'])
elif t == 'checkbox':
w = QCheckBox()
w.setChecked(val)
elif t == 'font':
w = QFontComboBox()
w.resize(w.sizeHint())
w.setCurrentIndex(1)
col+=1
gl.addWidget(w,row,col)
w.setStyleSheet(style)
widgets[o] = w
#print (o, row, col)
if col>=wrap:
col=1
row+=1
else:
col+=2
if scol >= section_wrap:
scol=1
else:
scol+=1
return dialog, widgets
def getWidgetValues(widgets):
"""Get values back from a set of widgets"""
kwds = {}
for i in widgets:
val = None
if i in widgets:
w = widgets[i]
if type(w) is QLineEdit:
try:
val = float(w.text())
except:
val = w.text()
elif type(w) is QPlainTextEdit:
val = w.toPlainText()
elif type(w) is QComboBox or type(w) is QFontComboBox:
val = w.currentText()
elif type(w) is QCheckBox:
val = w.isChecked()
elif type(w) is QSlider:
val = w.value()
elif type(w) in [QSpinBox,QDoubleSpinBox]:
val = w.value()
if val != None:
kwds[i] = val
kwds = kwds
return kwds
def setWidgetValues(widgets, values):
"""Set values for a set of widgets from a dict"""
kwds = {}
for i in values:
val = values[i]
if i in widgets:
#print (i, val, type(val))
w = widgets[i]
if type(w) is QLineEdit:
w.setText(str(val))
elif type(w) is QPlainTextEdit:
w.insertPlainText(str(val))
elif type(w) is QComboBox or type(w) is QFontComboBox:
w.setCurrentIndex(1)
elif type(w) is QCheckBox:
w.setChecked(val)
elif type(w) is QSlider:
w.setValue(val)
elif type(w) is QSpinBox:
w.setValue(val)
return
def main():
"Run the application"
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QApplication(sys.argv)
aw = TreeViewer()
aw.show()
app.exec_()
if __name__ == '__main__':
main()
|
"""Utilities for training the dependency parser.
You do not need to read/understand this code
"""
import time
import os
import logging
from collections import Counter
from general_utils import get_minibatches
from q2_parser_transitions import minibatch_parse
import numpy as np
P_PREFIX = '<p>:'
L_PREFIX = '<l>:'
UNK = '<UNK>'
NULL = '<NULL>'
ROOT = '<ROOT>'
class Config(object):
language = 'english'
with_punct = True
unlabeled = True
lowercase = True
use_pos = True
use_dep = True
use_dep = use_dep and (not unlabeled)
data_path = './data'
train_file = 'train.conll'
dev_file = 'dev.conll'
test_file = 'test.conll'
embedding_file = './data/en-cw.txt'
class Parser(object):
"""Contains everything needed for transition-based dependency parsing except for the model"""
def __init__(self, dataset):
root_labels = list([l for ex in dataset
for (h, l) in zip(ex['head'], ex['label']) if h == 0])
counter = Counter(root_labels)
if len(counter) > 1:
logging.info('Warning: more than one root label')
logging.info(counter)
self.root_label = counter.most_common()[0][0]
deprel = [self.root_label] + list(set([w for ex in dataset
for w in ex['label']
if w != self.root_label]))
tok2id = {L_PREFIX + l: i for (i, l) in enumerate(deprel)}
tok2id[L_PREFIX + NULL] = self.L_NULL = len(tok2id)
config = Config()
self.unlabeled = config.unlabeled
self.with_punct = config.with_punct
self.use_pos = config.use_pos
self.use_dep = config.use_dep
self.language = config.language
if self.unlabeled:
trans = ['L', 'R', 'S']
self.n_deprel = 1
else:
trans = ['L-' + l for l in deprel] + ['R-' + l for l in deprel] + ['S']
self.n_deprel = len(deprel)
self.n_trans = len(trans)
self.tran2id = {t: i for (i, t) in enumerate(trans)}
self.id2tran = {i: t for (i, t) in enumerate(trans)}
# logging.info('Build dictionary for part-of-speech tags.')
tok2id.update(build_dict([P_PREFIX + w for ex in dataset for w in ex['pos']],
offset=len(tok2id)))
tok2id[P_PREFIX + UNK] = self.P_UNK = len(tok2id)
tok2id[P_PREFIX + NULL] = self.P_NULL = len(tok2id)
tok2id[P_PREFIX + ROOT] = self.P_ROOT = len(tok2id)
# logging.info('Build dictionary for words.')
tok2id.update(build_dict([w for ex in dataset for w in ex['word']],
offset=len(tok2id)))
tok2id[UNK] = self.UNK = len(tok2id)
tok2id[NULL] = self.NULL = len(tok2id)
tok2id[ROOT] = self.ROOT = len(tok2id)
self.tok2id = tok2id
self.id2tok = {v: k for (k, v) in tok2id.items()}
self.n_features = 18 + (18 if config.use_pos else 0) + (12 if config.use_dep else 0)
self.n_tokens = len(tok2id)
def vectorize(self, examples):
vec_examples = []
for ex in examples:
word = [self.ROOT] + [self.tok2id[w] if w in self.tok2id
else self.UNK for w in ex['word']]
pos = [self.P_ROOT] + [self.tok2id[P_PREFIX + w] if P_PREFIX + w in self.tok2id
else self.P_UNK for w in ex['pos']]
head = [-1] + ex['head']
label = [-1] + [self.tok2id[L_PREFIX + w] if L_PREFIX + w in self.tok2id
else -1 for w in ex['label']]
vec_examples.append({'word': word, 'pos': pos,
'head': head, 'label': label})
return vec_examples
def extract_features(self, stack, buf, arcs, ex):
if stack[0] == "ROOT":
stack[0] = 0
def get_lc(k):
return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] < k])
def get_rc(k):
return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] > k],
reverse=True)
p_features = []
l_features = []
features = [self.NULL] * (3 - len(stack)) + [ex['word'][x] for x in stack[-3:]]
features += [ex['word'][x] for x in buf[:3]] + [self.NULL] * (3 - len(buf))
if self.use_pos:
p_features = [self.P_NULL] * (3 - len(stack)) + [ex['pos'][x] for x in stack[-3:]]
p_features += [ex['pos'][x] for x in buf[:3]] + [self.P_NULL] * (3 - len(buf))
for i in xrange(2):
if i < len(stack):
k = stack[-i-1]
lc = get_lc(k)
rc = get_rc(k)
llc = get_lc(lc[0]) if len(lc) > 0 else []
rrc = get_rc(rc[0]) if len(rc) > 0 else []
features.append(ex['word'][lc[0]] if len(lc) > 0 else self.NULL)
features.append(ex['word'][rc[0]] if len(rc) > 0 else self.NULL)
features.append(ex['word'][lc[1]] if len(lc) > 1 else self.NULL)
features.append(ex['word'][rc[1]] if len(rc) > 1 else self.NULL)
features.append(ex['word'][llc[0]] if len(llc) > 0 else self.NULL)
features.append(ex['word'][rrc[0]] if len(rrc) > 0 else self.NULL)
if self.use_pos:
p_features.append(ex['pos'][lc[0]] if len(lc) > 0 else self.P_NULL)
p_features.append(ex['pos'][rc[0]] if len(rc) > 0 else self.P_NULL)
p_features.append(ex['pos'][lc[1]] if len(lc) > 1 else self.P_NULL)
p_features.append(ex['pos'][rc[1]] if len(rc) > 1 else self.P_NULL)
p_features.append(ex['pos'][llc[0]] if len(llc) > 0 else self.P_NULL)
p_features.append(ex['pos'][rrc[0]] if len(rrc) > 0 else self.P_NULL)
if self.use_dep:
l_features.append(ex['label'][lc[0]] if len(lc) > 0 else self.L_NULL)
l_features.append(ex['label'][rc[0]] if len(rc) > 0 else self.L_NULL)
l_features.append(ex['label'][lc[1]] if len(lc) > 1 else self.L_NULL)
l_features.append(ex['label'][rc[1]] if len(rc) > 1 else self.L_NULL)
l_features.append(ex['label'][llc[0]] if len(llc) > 0 else self.L_NULL)
l_features.append(ex['label'][rrc[0]] if len(rrc) > 0 else self.L_NULL)
else:
features += [self.NULL] * 6
if self.use_pos:
p_features += [self.P_NULL] * 6
if self.use_dep:
l_features += [self.L_NULL] * 6
features += p_features + l_features
assert len(features) == self.n_features
return features
def get_oracle(self, stack, buf, ex):
if len(stack) < 2:
return self.n_trans - 1
i0 = stack[-1]
i1 = stack[-2]
h0 = ex['head'][i0]
h1 = ex['head'][i1]
l0 = ex['label'][i0]
l1 = ex['label'][i1]
if self.unlabeled:
if (i1 > 0) and (h1 == i0):
return 0
elif (i1 >= 0) and (h0 == i1) and \
(not any([x for x in buf if ex['head'][x] == i0])):
return 1
else:
return None if len(buf) == 0 else 2
else:
if (i1 > 0) and (h1 == i0):
return l1 if (l1 >= 0) and (l1 < self.n_deprel) else None
elif (i1 >= 0) and (h0 == i1) and \
(not any([x for x in buf if ex['head'][x] == i0])):
return l0 + self.n_deprel if (l0 >= 0) and (l0 < self.n_deprel) else None
else:
return None if len(buf) == 0 else self.n_trans - 1
def create_instances(self, examples):
all_instances = []
succ = 0
for id, ex in enumerate(examples):
n_words = len(ex['word']) - 1
# arcs = {(h, t, label)}
stack = [0]
buf = [i + 1 for i in xrange(n_words)]
arcs = []
instances = []
for i in xrange(n_words * 2):
gold_t = self.get_oracle(stack, buf, ex)
if gold_t is None:
break
legal_labels = self.legal_labels(stack, buf)
assert legal_labels[gold_t] == 1
instances.append((self.extract_features(stack, buf, arcs, ex),
legal_labels, gold_t))
if gold_t == self.n_trans - 1:
stack.append(buf[0])
buf = buf[1:]
elif gold_t < self.n_deprel:
arcs.append((stack[-1], stack[-2], gold_t))
stack = stack[:-2] + [stack[-1]]
else:
arcs.append((stack[-2], stack[-1], gold_t - self.n_deprel))
stack = stack[:-1]
else:
succ += 1
all_instances += instances
return all_instances
def legal_labels(self, stack, buf):
labels = ([1] if len(stack) > 2 else [0]) * self.n_deprel
labels += ([1] if len(stack) >= 2 else [0]) * self.n_deprel
labels += [1] if len(buf) > 0 else [0]
return labels
def parse(self, dataset, eval_batch_size=5000):
sentences = []
sentence_id_to_idx = {}
for i, example in enumerate(dataset):
n_words = len(example['word']) - 1
sentence = [j + 1 for j in range(n_words)]
sentences.append(sentence)
sentence_id_to_idx[id(sentence)] = i
model = ModelWrapper(self, dataset, sentence_id_to_idx)
dependencies = minibatch_parse(sentences, model, eval_batch_size)
UAS = all_tokens = 0.0
for i, ex in enumerate(dataset):
head = [-1] * len(ex['word'])
for h, t, in dependencies[i]:
head[t] = h
for pred_h, gold_h, gold_l, pos in \
zip(head[1:], ex['head'][1:], ex['label'][1:], ex['pos'][1:]):
assert self.id2tok[pos].startswith(P_PREFIX)
pos_str = self.id2tok[pos][len(P_PREFIX):]
if (self.with_punct) or (not punct(self.language, pos_str)):
UAS += 1 if pred_h == gold_h else 0
all_tokens += 1
UAS /= all_tokens
return UAS, dependencies
class ModelWrapper(object):
def __init__(self, parser, dataset, sentence_id_to_idx):
self.parser = parser
self.dataset = dataset
self.sentence_id_to_idx = sentence_id_to_idx
def predict(self, partial_parses):
mb_x = [self.parser.extract_features(p.stack, p.buffer, p.dependencies,
self.dataset[self.sentence_id_to_idx[id(p.sentence)]])
for p in partial_parses]
mb_x = np.array(mb_x).astype('int32')
mb_l = [self.parser.legal_labels(p.stack, p.buffer) for p in partial_parses]
pred = self.parser.model.predict_on_batch(self.parser.session, mb_x)
pred = np.argmax(pred + 10000 * np.array(mb_l).astype('float32'), 1)
pred = ["S" if p == 2 else ("LA" if p == 0 else "RA") for p in pred]
return pred
def read_conll(in_file, lowercase=False, max_example=None):
examples = []
with open(in_file) as f:
word, pos, head, label = [], [], [], []
for line in f.readlines():
sp = line.strip().split('\t')
if len(sp) == 10:
if '-' not in sp[0]:
word.append(sp[1].lower() if lowercase else sp[1])
pos.append(sp[4])
head.append(int(sp[6]))
label.append(sp[7])
elif len(word) > 0:
examples.append({'word': word, 'pos': pos, 'head': head, 'label': label})
word, pos, head, label = [], [], [], []
if (max_example is not None) and (len(examples) == max_example):
break
if len(word) > 0:
examples.append({'word': word, 'pos': pos, 'head': head, 'label': label})
return examples
def build_dict(keys, n_max=None, offset=0):
count = Counter()
for key in keys:
count[key] += 1
ls = count.most_common() if n_max is None \
else count.most_common(n_max)
return {w[0]: index + offset for (index, w) in enumerate(ls)}
def punct(language, pos):
if language == 'english':
return pos in ["''", ",", ".", ":", "``", "-LRB-", "-RRB-"]
elif language == 'chinese':
return pos == 'PU'
elif language == 'french':
return pos == 'PUNC'
elif language == 'german':
return pos in ["$.", "$,", "$["]
elif language == 'spanish':
# http://nlp.stanford.edu/software/spanish-faq.shtml
return pos in ["f0", "faa", "fat", "fc", "fd", "fe", "fg", "fh",
"fia", "fit", "fp", "fpa", "fpt", "fs", "ft",
"fx", "fz"]
elif language == 'universal':
return pos == 'PUNCT'
else:
raise ValueError('language: %s is not supported.' % language)
def minibatches(data, batch_size):
x = np.array([d[0] for d in data])
y = np.array([d[2] for d in data])
one_hot = np.zeros((y.size, 3))
one_hot[np.arange(y.size), y] = 1
return get_minibatches([x, one_hot], batch_size)
def load_and_preprocess_data(reduced=True):
config = Config()
print "Loading data...",
start = time.time()
train_set = read_conll(os.path.join(config.data_path, config.train_file),
lowercase=config.lowercase)
dev_set = read_conll(os.path.join(config.data_path, config.dev_file),
lowercase=config.lowercase)
test_set = read_conll(os.path.join(config.data_path, config.test_file),
lowercase=config.lowercase)
if reduced:
train_set = train_set[:1000]
dev_set = dev_set[:500]
test_set = test_set[:500]
print "took {:.2f} seconds".format(time.time() - start)
print "Building parser...",
start = time.time()
parser = Parser(train_set)
print "took {:.2f} seconds".format(time.time() - start)
print "Loading pretrained embeddings...",
start = time.time()
word_vectors = {}
for line in open(config.embedding_file).readlines():
sp = line.strip().split()
word_vectors[sp[0]] = [float(x) for x in sp[1:]]
embeddings_matrix = np.asarray(np.random.normal(0, 0.9, (parser.n_tokens, 50)), dtype='float32')
for token in parser.tok2id:
i = parser.tok2id[token]
if token in word_vectors:
embeddings_matrix[i] = word_vectors[token]
elif token.lower() in word_vectors:
embeddings_matrix[i] = word_vectors[token.lower()]
print "took {:.2f} seconds".format(time.time() - start)
print "Vectorizing data...",
start = time.time()
train_set = parser.vectorize(train_set)
dev_set = parser.vectorize(dev_set)
test_set = parser.vectorize(test_set)
print "took {:.2f} seconds".format(time.time() - start)
print "Preprocessing training data...",
start = time.time()
train_examples = parser.create_instances(train_set)
print "took {:.2f} seconds".format(time.time() - start)
return parser, embeddings_matrix, train_examples, dev_set, test_set,
if __name__ == '__main__':
pass
|
"""
tests that obiwan runs end to end and get reasonalbe outputs for varietry of
cases. Travis CI runs this script
"""
from __future__ import print_function
if __name__ == "__main__":
import matplotlib
matplotlib.use('Agg')
import unittest
import run_200x200_pixel_regions as tools
#from run_200x200_pixel_regions import run_kenobi_main, run_kenobi_main_cosmos
#from run_200x200_pixel_regions import test_flux_shape_measurements
#from run_200x200_pixel_regions import test_detected_simulated_and_real_sources
#from run_200x200_pixel_regions import test_draw_circles_around_sources_check_by_eye
class run_and_analyze(object):
def __init__(self, survey=None, dataset=None,
bands='grz', obj='elg',rowstart=0,
add_noise=False,all_blobs=False,
on_edge=False, early_coadds=False,
checkpoint=False, skip_ccd_cuts=False):
"""
Args:
survey: one of SURVEYS
dataset: one of DATASETS
z, grz: to run the z and/or grz testcases
all_blobs: to fit models to all blobs, not just the blobs containing sims
add_noise: to add Poisson noise to simulated galaxy profiles
on_edge: to add randoms at edge of region, not well within the boundaries
early_coadds: write coadds before model fitting and stop there
"""
assert(bands in ['z','grz'])
d= locals()
del d['self']
self.params= dict(d)
def run(self):
d= dict(self.params)
if d['checkpoint']:
# create checkpoint file
d.update(no_cleanup=True,stage='fitblobs')
R= tools.run_kenobi_main(**d)
R.run()
if d['checkpoint']:
# restart from checkpoint and finish
d.update(no_cleanup=False,stage=None)
R= tools.run_kenobi_main(**d)
R.run()
def analyze(self):
d= dict(self.params)
if not d['early_coadds']:
tools.test_flux_shape_measurements(**d)
tools.test_detected_simulated_and_real_sources(**d)
tools.test_draw_circles_around_sources_check_by_eye(**d)
class test_main(unittest.TestCase):
def test_decals(self):
Test= run_and_analyze(survey='decals',
all_blobs=False,on_edge=False,
early_coadds=False,
checkpoint=False,skip_ccd_cuts=False)
Test.params.update(dataset='dr5',bands='grz')
Test.run()
Test.analyze()
Test.params.update(bands='z')
Test.run()
Test.analyze()
Test.params.update(early_coadds=True)
Test.run()
Test.analyze()
Test.params.update(early_coadds=False,all_blobs=True)
Test.run()
Test.analyze()
Test.params.update(all_blobs=False,on_edge=True)
Test.run()
Test.analyze()
Test.params.update(on_edge=False,obj='star')
Test.run()
Test.analyze()
# Test.params.update(dataset='dr5',obj='elg',checkpoint=True)
# Test.run()
# Test.analyze()
Test.params.update(dataset='dr3',bands='grz',obj='elg')
Test.run()
Test.analyze()
Test.params.update(bands='z')
Test.run()
Test.analyze()
# Above must simply complete w/out error
self.assertTrue(True)
def test_bass_mzls(self):
Test= run_and_analyze(survey='bass_mzls',obj='elg',
skip_ccd_cuts=True,
all_blobs=False,on_edge=False,
early_coadds=False,
checkpoint=False)
Test.params.update(dataset='dr6',bands='grz')
Test.run()
Test.analyze()
# Above must simply complete w/out error
self.assertTrue(True)
def test_cosmos_subset_60(self):
# runs a 200x200 pixel region but on the full 0.5 GB images
t= tools.run_kenobi_main_cosmos(survey='decals')
t.run()
print('WARNING: no analyze method exists for testcase_cosmos')
print('adapt run_a_test_case().anlayze()')
# Above must simply complete w/out error
self.assertTrue(True)
if __name__ == "__main__":
unittest.main()
|
import wpilib
from wpilib import XboxController
from wpilib import SpeedControllerGroup
from wpilib.command import Command
from wpilib.command import Subsystem
from wpilib.drive import DifferentialDrive
from wpilib.interfaces import GenericHID
from wpilib.interfaces import SpeedController
from wpilib.smartdashboard import SmartDashboard
import commandbased
from commands.performance import Performance
class Drive(wpilib.command.subsystem.Subsystem):
"""
Minimal drive with two PWM motor controllers.
"""
leftMotor: SpeedController = None
rightMotor: SpeedController = None
def __init__(self, id: int, leftMotor: SpeedController, rightMotor: SpeedController):
super().__init__("Drive")
self.leftMotor = leftMotor
self.rightMotor = rightMotor
def setPower(self, left, right):
self.leftMotor.set(left)
self.rightMotor.set(right)
class DrivePower(wpilib.command.Command):
""" Minimal command to control drive subsystem. """
def __init__(self, drive: Drive, leftPower: float, rightPower: float):
super().__init__("DrivePower " + str(drive.leftMotor.getChannel()) + "," + str(drive.rightMotor.getChannel()))
self.requires(drive)
self.leftPower: float = leftPower
self.rightPower: float = rightPower
self.drive: Drive = drive
def execute(self):
self.drive.setPower(self.leftPower, self.rightPower)
def isFinished(self):
return False
def interrupted(self):
self.drive.setPower(0, 0)
def end(self):
self.interrupted()
class DriveArcade(wpilib.command.Command):
""" Command to control drive using gamepad in arcade mode. """
diffDrive: DifferentialDrive
gamePad: GenericHID
nominal: float
def __init__(self, drive: Drive, leftMotors: SpeedControllerGroup, rightMotors: SpeedControllerGroup, gamePad: GenericHID):
super().__init__("DriveArcade")
self.requires(drive)
self.gamePad = gamePad
self.diffDrive = DifferentialDrive(leftMotors, rightMotors)
self.nominal = 3.0 / 8.0
self.deadband = 0.15 # XBox360 controller has a lot of slop
def tweak(self, val, scale):
tweaked: float = DifferentialDrive.applyDeadband(val, self.deadband)
if tweaked == 0.0:
# User input value in deadband, just return 0.0
return tweaked
tweaked *= scale * (1 - self.nominal)
if tweaked > 0:
tweaked = (tweaked * tweaked) + self.nominal
else:
tweaked = (tweaked * -tweaked) - self.nominal
return tweaked
def initialize(self):
self.diffDrive.setSafetyEnabled(True)
def execute(self):
throttle: float = self.tweak(-self.gamePad.getY(GenericHID.Hand.kLeft), 1.0)
rotation: float = self.tweak(self.gamePad.getX(GenericHID.Hand.kRight), 0.5)
#print("Throttle", throttle, "Rotation", rotation)
self.diffDrive.arcadeDrive(throttle, rotation, False)
def isFinished(self):
return False
def interrupted(self):
self.diffDrive.stopMotor()
self.diffDrive.setSafetyEnabled(False)
def end(self):
self.interrupted()
class MyRobot(commandbased.CommandBasedRobot):
"""
Minimal implementation of a robot program using the command based framework.
"""
# Set to True to enable arcade drive subsystem and command using gamepad where
# PWM 0, 1 go with left side and PWM 2, 3 go with right side
enableDrive: bool = True
# Set to number of additional PWM subsystems to create (set to 0 for no extra subsystems,
# 8 is max if drive enabled, 10 if not)
numSubsystems: int = 1
# Used to track performance
performance: Performance = None
def robotInit(self):
""" Initalizes all subsystems and user controls. """
if self.numSubsystems > 0:
# Create drive subsystems
pwmOfs: int = 0
if self.enableDrive:
leftMotors = SpeedControllerGroup(wpilib.VictorSP(0), wpilib.VictorSP(1))
rightMotors = SpeedControllerGroup(wpilib.VictorSP(2), wpilib.VictorSP(3))
gamePad: GenericHID = XboxController(0)
drive: Drive = Drive(99, leftMotors, rightMotors)
drive.setDefaultCommand(DriveArcade(drive, leftMotors, rightMotors, gamePad))
pwmOfs += 4
for i in range(0, self.numSubsystems):
pwm: int = pwmOfs + i * 2
leftMotor: SpeedController = wpilib.VictorSP(pwm)
rightMotor: SpeedController = wpilib.VictorSP(pwm + 1)
drive = Drive(i, leftMotor, rightMotor)
SmartDashboard.putData("Forward " + str(i), DrivePower(drive, 0.2, 0.2))
SmartDashboard.putData("Backward " + str(i), DrivePower(drive, -0.2, -0.2))
# Add command to dashboard to track time for one periodic pass
self.performance = Performance()
SmartDashboard.putData("Measure Performance", self.performance)
def loopFunc(self):
""" Override base implementation so we can peek at how long each iteration takes. """
super().loopFunc()
# Record how long it took to run iteration of loop
if self.performance != None:
self.performance.updateRunTime(self.watchdog.getTime())
if __name__ == "__main__":
wpilib.run(MyRobot)
|
<filename>inventory/accounts/admin.py
# -*- coding: utf-8 -*-
#
# inventory/accounts/admin.py
#
"""
Accounts admin.
"""
__docformat__ = "restructuredtext en"
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.contrib.auth import get_user_model
from inventory.common.admin_mixins import UserAdminMixin
from inventory.projects.models import Membership
from .models import Question, Answer
from .forms import QuestionForm, AnswerForm
#
# Membership
#
class MembershipInline(admin.TabularInline):
fields = ('project', 'role',)
extra = 0
can_delete = True
model = Membership
#
# User
#
class UserAdmin(DjangoUserAdmin):
fieldsets = (
(None, {'fields': ('public_id', 'username', 'password',)}),
(_("Personal Info"), {'fields': ('picture', 'first_name', 'last_name',
'address_01', 'address_02', 'city',
'subdivision', 'postal_code',
'country', 'dob', 'email', 'language',
'timezone', 'project_default')}),
(_("Permissions"), {'classes': ('collapse',),
'fields': ('_role', 'is_active', 'is_staff',
'is_superuser', 'groups',
'user_permissions',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('send_email', 'need_password', 'last_login',
'date_joined',)}),
)
readonly_fields = ('public_id', 'last_login', 'date_joined',)
list_display = ('image_thumb_producer', 'public_id', 'username', 'email',
'first_name', 'last_name', 'projects_producer', '_role',
'is_staff', 'is_active', 'image_url_producer',)
list_editable = ('is_staff', 'is_active', '_role',)
search_fields = ('username', 'last_name', 'email', 'public_id',)
filter_horizontal = ('groups', 'user_permissions',)
inlines = (MembershipInline,)
## class Media:
## js = ('js/js.cookie-2.0.4.min.js',
## 'js/inheritance.js',
## 'js/regions.js',)
admin.site.register(get_user_model(), UserAdmin)
#
# Question
#
@admin.register(Question)
class QuestionAdmin(UserAdminMixin, admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('public_id', 'question',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active', 'creator', 'created', 'updater',
'updated',)}),
)
readonly_fields = ('public_id', 'creator', 'created', 'updater',
'updated',)
list_display = ('question', 'active', 'updater_producer', 'updated',)
list_editable = ('active',)
list_filter = ('active', 'updater__username',)
search_fields = ('question', 'public_id',)
form = QuestionForm
#
# Answer
#
@admin.register(Answer)
class AnswerAdmin(UserAdminMixin, admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('public_id', 'user', 'question', 'answer',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('creator', 'created', 'updater',
'updated',)}),
)
readonly_fields = ('public_id', 'creator', 'created', 'updater',
'updated',)
list_display = ('question', 'user', 'updater_producer', 'updated',)
list_filter = ('user__username',)
search_fields = ('user__username', 'question__question', 'public_id',)
form = AnswerForm
|
<filename>codegen_workspace/detection2onnx.py<gh_stars>0
import torch
import sys
# import os
# sys.path.append(os.environ["HOME"]+"/vision/")
import torchvision # https://pytorch.org/vision/stable/models.html
from pathlib import Path
from torch.onnx import TrainingMode
import onnx
import argparse
import warnings
get_model={
# Object Detection, Instance Segmentation and Person Keypoint Detection
"fasterrcnn_resnet50_fpn": (torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"fasterrcnn_mobilenet_v3_large_fpn": (torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"fasterrcnn_mobilenet_v3_large_320_fpn": (torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"retinanet_resnet50_fpn": (torchvision.models.detection.retinanet_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 224, 224), (91, 11)), # pytorch->onnx fails: l1_loss
"ssd300_vgg16": (torchvision.models.detection.ssd300_vgg16(pretrained=False, pretrained_backbone=False, ), (4, 3, 300, 300), (91, 11)), # pytorch->onnx fails when training: smooth_l1_loss; fails when eval: resolve_conj
"ssdlite320_mobilenet_v3_large": (torchvision.models.detection.ssdlite320_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, ), (24, 3, 320, 320), (91, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
"maskrcnn_resnet50_fpn": (torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 224, 224), (91, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
# in .local/lib/python3.7/site-packages/torchvision/models/detection/transform.py:
# resized_data[:, :, 0] --> resized_data[:, 0] due to no batch dimension
# "keypointrcnn_resnet50_fpn": (torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, trainable_backbone_layers=5), (2, 3, 224, 224), (2, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
}
def infer_shapes(model, inputs, batch):
def build_shape_dict(name, tensor, is_input, batch):
print(name, "'s shape", tensor[0].shape)
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, batch) for t in tensor]
else:
# Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
if len(tensor.shape) > 0:
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == batch][0]: "batch"}
else:
axes = {}
print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
return axes
# Generate input names & axes
input_dynamic_axes = {k: build_shape_dict(k, v, True, batch) for k, v in inputs.items()}
print("input_dynamic_axes", input_dynamic_axes)
# Generate output names & axes
loss = model(**inputs)
outputs = {'loss': loss}
output_dynamic_axes = {k: build_shape_dict(k, v, False, batch) for k, v in outputs.items()}
print("output_dynamic_axes", output_dynamic_axes)
# Create the aggregated axes representation
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
print("dynamic_axes:", dynamic_axes)
return dynamic_axes
class WrapperModel(torch.nn.Module):
def __init__(self, model):
super(WrapperModel, self).__init__()
self._model = model
self.loss = torch.nn.BCEWithLogitsLoss(reduction='sum')
def forward(self, images, targets):
out = self._model(images, targets)
if self.training:
total_loss = 0
for loss in out.values():
total_loss += loss
return total_loss
else:
total_loss = 0
if isinstance(out, dict):
for loss in out.values():
total_loss += loss
else:
for output in out:
total_loss += output.sum()
return total_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default=None, help="torchvision model name")
parser.add_argument("--batch_size", type=int, default=0, help="batch size")
args = parser.parse_args()
if args.model_name == None:
model_names = get_model.keys()
else:
model_names = args.model_name.split(',')
for args.model_name in model_names:
torchvision_model, (batch_size, channels, height, width), (num_classes, ground_truth_box) = get_model[args.model_name]
if args.batch_size > 0:
batch_size = args.batch_size
dummy_images = torch.randn(batch_size, channels, height, width)
dummy_boxes = torch.zeros((batch_size, ground_truth_box, 4))
if height < width:
dummy_boxes[:,:,2:] = height
else:
dummy_boxes[:,:,2:] = width
dummy_labels = torch.randint(1, num_classes, (batch_size, ground_truth_box))
if args.model_name in ["maskrcnn_resnet50_fpn"]:
dummy_masks = torch.randint(0, 1, (batch_size, 1, height, width))
if args.model_name in ["keypointrcnn_resnet50_fpn"]:
num_keypoints=17
dummy_keypoints = torch.randn(batch_size, num_keypoints, 3, dtype = torch.float32) # 3: (x, y, visibility)
dummy_keypoints[:,:,-1:] = 1
dummy_images = list(image for image in dummy_images)
dummy_targets = []
for i in range(len(dummy_images)):
d = {}
d['boxes'] = dummy_boxes[i]
d['labels'] = dummy_labels[i]
if args.model_name in ["maskrcnn_resnet50_fpn"]:
d['masks'] = dummy_masks[i]
if args.model_name in ["keypointrcnn_resnet50_fpn"]:
d['keypoints'] = dummy_keypoints[i]
dummy_targets.append(d)
inputs = {}
inputs['images'] = dummy_images
inputs['targets'] = dummy_targets
input_args = (inputs['images'],
inputs['targets'])
ordered_input_names = ['images', 'targets']
output_names = ['loss', ]
model=WrapperModel(torchvision_model)
model.train()
# model.eval()
# dynamic_axes=infer_shapes(model, inputs, batch_size)
torch.onnx.export(
model=model,
args=input_args,
f=Path(args.model_name+'.onnx').as_posix(),
input_names=ordered_input_names,
output_names=output_names,
# dynamic_axes=dynamic_axes,
do_constant_folding=False,
_retain_param_name=True,
enable_onnx_checker=True,
opset_version=12,
training=TrainingMode.PRESERVE
)
model = onnx.load(args.model_name+'.onnx')
model = onnx.shape_inference.infer_shapes(model)
onnx.checker.check_model(model)
onnx.save(model, args.model_name+'.onnx')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.