text stringlengths 957 885k |
|---|
<filename>corehq/apps/accounting/enterprise.py
import re
from datetime import datetime, timedelta
from django.utils.translation import ugettext as _
from memoized import memoized
from couchforms.analytics import get_last_form_submission_received
from dimagi.utils.dates import DateSpan
from corehq.apps.accounting.exceptions import EnterpriseReportError
from corehq.apps.accounting.models import BillingAccount, Subscription
from corehq.apps.accounting.utils import get_default_domain_url
from corehq.apps.app_manager.dbaccessors import get_brief_apps_in_domain
from corehq.apps.domain.calculations import sms_in_last
from corehq.apps.domain.models import Domain
from corehq.apps.es import forms as form_es
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.users.dbaccessors.all_commcare_users import (
get_all_user_rows,
get_mobile_user_count,
get_web_user_count,
)
from corehq.apps.users.models import CouchUser
from corehq.util.quickcache import quickcache
class EnterpriseReport(object):
DOMAINS = 'domains'
WEB_USERS = 'web_users'
MOBILE_USERS = 'mobile_users'
FORM_SUBMISSIONS = 'form_submissions'
title = _('Enterprise Report')
subtitle = ''
def __init__(self, account_id, couch_user):
super(EnterpriseReport, self).__init__()
self.account = BillingAccount.objects.get(id=account_id)
self.couch_user = couch_user
self.slug = None
@property
def headers(self):
return [_('Project Space Name'), _('Project Name'), _('Project URL')]
@property
def filename(self):
return "{} ({}) {}.csv".format(self.account.name, self.title, datetime.utcnow().strftime('%Y%m%d %H%M%S'))
@classmethod
def create(cls, slug, account_id, couch_user):
if slug == cls.DOMAINS:
report = EnterpriseDomainReport(account_id, couch_user)
if slug == cls.WEB_USERS:
report = EnterpriseWebUserReport(account_id, couch_user)
if slug == cls.MOBILE_USERS:
report = EnterpriseMobileWorkerReport(account_id, couch_user)
if slug == cls.FORM_SUBMISSIONS:
report = EnterpriseFormReport(account_id, couch_user)
if report:
report.slug = slug
return report
raise EnterpriseReportError(_("Unrecognized report '{}'").format(slug))
def format_date(self, date):
return date.strftime('%Y/%m/%d %H:%M:%S') if date else ''
def domain_properties(self, domain_obj):
return [
domain_obj.name,
domain_obj.hr_name,
get_default_domain_url(domain_obj.name),
]
def rows_for_domain(self, domain_obj):
raise NotImplementedError("Subclasses should override this")
def total_for_domain(self, domain_obj):
raise NotImplementedError("Subclasses should override this")
@memoized
def domains(self):
subscriptions = Subscription.visible_objects.filter(account_id=self.account.id, is_active=True)
domain_names = set(s.subscriber.domain for s in subscriptions)
return [Domain.get_by_name(name) for name in domain_names]
@property
def rows(self):
rows = []
for domain_obj in self.domains():
rows += self.rows_for_domain(domain_obj)
return rows
@property
def total(self):
total = 0
for domain_obj in self.domains():
total += self.total_for_domain(domain_obj)
return total
class EnterpriseDomainReport(EnterpriseReport):
title = _('Project Spaces')
def __init__(self, account_id, couch_user):
super(EnterpriseDomainReport, self).__init__(account_id, couch_user)
@property
def headers(self):
headers = super(EnterpriseDomainReport, self).headers
return [_('Created On [UTC]'), _('# of Apps'), _('# of Mobile Users'), _('# of Web Users'),
_('# of SMS (last 30 days)'), _('Last Form Submission [UTC]')] + headers
def rows_for_domain(self, domain_obj):
return [[
self.format_date(domain_obj.date_created),
len(domain_obj.applications()),
get_mobile_user_count(domain_obj.name, include_inactive=False),
get_web_user_count(domain_obj.name, include_inactive=False),
sms_in_last(domain_obj.name, 30),
self.format_date(get_last_form_submission_received(domain_obj.name)),
] + self.domain_properties(domain_obj)]
def total_for_domain(self, domain_obj):
return 1
class EnterpriseWebUserReport(EnterpriseReport):
title = _('Web Users')
def __init__(self, account_id, couch_user):
super(EnterpriseWebUserReport, self).__init__(account_id, couch_user)
@property
def headers(self):
headers = super(EnterpriseWebUserReport, self).headers
return [_('Name'), _('Email Address'), _('Role'), _('Last Login [UTC]'),
_('Last Access Date [UTC]')] + headers
def rows_for_domain(self, domain_obj):
rows = []
for user in get_all_user_rows(domain_obj.name, include_web_users=True, include_mobile_users=False,
include_inactive=False, include_docs=True):
user = CouchUser.wrap_correctly(user['doc'])
domain_membership = user.get_domain_membership(domain_obj.name)
last_accessed_domain = None
if domain_membership:
last_accessed_domain = domain_membership.last_accessed
rows.append(
[
user.full_name,
user.username,
user.role_label(domain_obj.name),
self.format_date(user.last_login),
last_accessed_domain
]
+ self.domain_properties(domain_obj))
return rows
def total_for_domain(self, domain_obj):
return get_web_user_count(domain_obj.name, include_inactive=False)
class EnterpriseMobileWorkerReport(EnterpriseReport):
title = _('Mobile Workers')
def __init__(self, account_id, couch_user):
super(EnterpriseMobileWorkerReport, self).__init__(account_id, couch_user)
@property
def headers(self):
headers = super(EnterpriseMobileWorkerReport, self).headers
return [_('Username'), _('Name'), _('Created Date [UTC]'), _('Last Sync [UTC]'),
_('Last Submission [UTC]'), _('CommCare Version')] + headers
def rows_for_domain(self, domain_obj):
rows = []
for user in get_all_user_rows(domain_obj.name, include_web_users=False, include_mobile_users=True,
include_inactive=False, include_docs=True):
user = CouchUser.wrap_correctly(user['doc'])
rows.append([
re.sub(r'@.*', '', user.username),
user.full_name,
self.format_date(user.created_on),
self.format_date(user.reporting_metadata.last_sync_for_user.sync_date),
self.format_date(user.reporting_metadata.last_submission_for_user.submission_date),
user.reporting_metadata.last_submission_for_user.commcare_version or '',
] + self.domain_properties(domain_obj))
return rows
def total_for_domain(self, domain_obj):
return get_mobile_user_count(domain_obj.name, include_inactive=False)
class EnterpriseFormReport(EnterpriseReport):
title = _('Mobile Form Submissions')
def __init__(self, account_id, couch_user):
super(EnterpriseFormReport, self).__init__(account_id, couch_user)
self.window = 30
self.subtitle = _("past {} days").format(self.window)
@property
def headers(self):
headers = super(EnterpriseFormReport, self).headers
return [_('Form Name'), _('Submitted [UTC]'), _('App Name'), _('Mobile User')] + headers
@quickcache(['self.account.id', 'domain_name'], timeout=60)
def hits(self, domain_name):
time_filter = form_es.submitted
datespan = DateSpan(datetime.now() - timedelta(days=self.window), datetime.utcnow())
users_filter = form_es.user_id(EMWF.user_es_query(domain_name,
['t__0'], # All mobile workers
self.couch_user)
.values_list('_id', flat=True))
query = (form_es.FormES()
.domain(domain_name)
.filter(time_filter(gte=datespan.startdate,
lt=datespan.enddate_adjusted))
.filter(users_filter))
return query.run().hits
def rows_for_domain(self, domain_obj):
apps = get_brief_apps_in_domain(domain_obj.name)
apps = {a.id: a.name for a in apps}
rows = []
for hit in self.hits(domain_obj.name):
username = hit['form']['meta']['username']
submitted = self.format_date(datetime.strptime(hit['received_on'][:19], '%Y-%m-%dT%H:%M:%S'))
rows.append([
hit['form']['@name'],
submitted,
apps[hit['app_id']] if hit['app_id'] in apps else _('App not found'),
username,
] + self.domain_properties(domain_obj))
return rows
def total_for_domain(self, domain_obj):
return len(self.hits(domain_obj.name))
|
<filename>bluesky_widgets/qt/search_results.py<gh_stars>0
import collections
import logging
from qtpy import QtCore
from qtpy.QtCore import (
QAbstractTableModel,
# QItemSelection,
# QItemSelectionModel,
QTimer,
Qt,
)
from qtpy.QtWidgets import QAbstractItemView, QHeaderView, QTableView
from .threading import create_worker
logger = logging.getLogger(__name__)
LOADING_PLACEHOLDER = "..."
CHUNK_SIZE = 5 # max rows to fetch at once
LOADING_LATENCY = 100 # ms
def _load_data(get_data, indexes):
"Load a batch of data. This is run in a threadpool."
for index in indexes:
row, column = index.row(), index.column()
try:
item = get_data(row, column)
except Exception:
logger.exception("Error while loading search results")
continue
yield index, item
class _SearchResultsModel(QAbstractTableModel):
"""
Qt model connecting our model to Qt's model--view machinery
This is implementing two layers of "laziness" to ensure that the app
remains responsive when large tables are loaded.
1. Rows are added dynamically using Qt's canFetchMore / fetchMore
machinery.
2. Data (which Qt assumes is readily available in memory) is immediately
filled with LOADING_PLACEHOLDER. Work is kicked off on a thread to later
update this with the actual data.
"""
def __init__(self, model, *args, **kwargs):
self.model = model # our internal model for the components subpackage
super().__init__(*args, **kwargs)
# State related to dynamically adding rows
self._current_num_rows = 0
self._catalog_length = len(self.model.catalog)
# Cache for loaded data
self._data = {}
# Queue of indexes of data to be loaded
self._work_queue = collections.deque()
# Set of active workers
self._active_workers = set()
# Start a timer that will periodically load any data queued up to be loaded.
self._data_loading_timer = QTimer(self)
# We run this once to initialize it. The _process_work_queue schedules
# it to be run again when it completes. This is better than a strictly
# periodic timer because it ensures that requests do not pile up if
# _process_work_queue takes longer than LOADING_LATENCY to complete.
self._data_loading_timer.singleShot(LOADING_LATENCY, self._process_work_queue)
# Changes to the model update the GUI.
self.model.events.begin_reset.connect(self.on_begin_reset)
self.model.events.end_reset.connect(self.on_end_reset)
def _process_work_queue(self):
if self._work_queue:
worker = create_worker(
_load_data, self.model.get_data, tuple(self._work_queue)
)
self._work_queue.clear()
# Track this worker in case we need to ignore it and cancel due to
# model reset.
self._active_workers.add(worker)
worker.finished.connect(lambda: self._active_workers.discard(worker))
worker.yielded.connect(self.on_item_loaded)
worker.start()
# Else, no work to do.
# Schedule the next processing.
self._data_loading_timer.singleShot(LOADING_LATENCY, self._process_work_queue)
def on_item_loaded(self, payload):
# Update state and trigger Qt to run data() to update its internal model.
index, item = payload
self._data[index] = item
self.dataChanged.emit(index, index, [])
def on_begin_reset(self, event):
self.beginResetModel()
self._current_num_rows = 0
self._catalog_length = len(self.model.catalog)
for worker in self._active_workers:
# Cease allowing this worker to mutate _data so that we do not get
# any stale updates.
worker.yielded.disconnect(self.on_item_loaded)
# To avoid doing useless work, try to cancel the worker. We do not
# rely on this request being effective.
worker.quit()
self._active_workers.clear()
self._work_queue.clear()
self._data.clear()
def on_end_reset(self, event):
self.endResetModel()
def canFetchMore(self, parent=None):
if parent.isValid():
return False
return self._current_num_rows < self._catalog_length
def fetchMore(self, parent=None):
if parent.isValid():
return
remainder = self._catalog_length - self._current_num_rows
rows_to_add = min(remainder, CHUNK_SIZE)
if rows_to_add <= 0:
return
self.beginInsertRows(
parent, self._current_num_rows, self._current_num_rows + rows_to_add - 1
)
self._current_num_rows += rows_to_add
self.endInsertRows()
def rowCount(self, parent=None):
return self._current_num_rows
def columnCount(self, parent=None):
return len(self.model.headings)
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return super().headerData(section, orientation, role)
if orientation == Qt.Horizontal and section < self.columnCount():
return str(self.model.headings[section])
elif orientation == Qt.Vertical and section < self.rowCount():
return section
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid(): # does > 0 bounds check
return QtCore.QVariant()
if index.column() >= self.columnCount() or index.row() >= self.rowCount():
return QtCore.QVariant()
if role == QtCore.Qt.DisplayRole:
if index in self._data:
return self._data[index]
else:
self._data[index] = LOADING_PLACEHOLDER
self._work_queue.append(index)
return LOADING_PLACEHOLDER
else:
return QtCore.QVariant()
class QtSearchResults(QTableView):
"""
Table of search results
Parameters
----------
model: bluesky_widgets.components.search_results.SearchResults
"""
def __init__(self, model, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSortingEnabled(False)
self.setSelectionBehavior(QTableView.SelectRows)
self.setShowGrid(False)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setDefaultAlignment(Qt.AlignHCenter)
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
# self.setAlternatingRowColors(True)
self._abstract_table_model = _SearchResultsModel(model)
self.setModel(self._abstract_table_model)
# Notify model of changes to selection and activation.
self.selectionModel().selectionChanged.connect(self.on_selection_changed)
self.clicked.connect(self.on_clicked)
# Update the view to changes in the model.
self.model.selected_rows.events.added.connect(self.on_row_added)
self.model.selected_rows.events.removed.connect(self.on_row_removed)
def on_selection_changed(self, selected, deselected):
# One would expect we could ask Qt directly for the rows, as opposed to
# using set() here, but I cannot find such a method.
for row in set(index.row() for index in deselected.indexes()):
if row in self.model.selected_rows:
self.model.selected_rows.remove(row)
for row in set(index.row() for index in selected.indexes()):
if row not in self.model.selected_rows:
self.model.selected_rows.append(row)
def on_clicked(self, index):
self.model.active_row = index.row()
def on_row_added(self, event):
"""Sync changes to model to view.
This is expected to be rare, is not yet publicly exposed.
"""
# TODO -- Not sure what is broken here
# index1 = self._abstract_table_model.index(event.item, 0)
# index2 = self._abstract_table_model.index(event.item, self._abstract_table_model.columnCount())
# selection = QItemSelection(index1, index2)
# self.selectionModel().select(selection, QItemSelectionModel.Select)
...
def on_row_removed(self, event):
"""Sync changes to model to view.
This is expected to be rare, is not yet publicly exposed.
"""
# TODO -- Not sure what is broken here
# index1 = self._abstract_table_model.index(event.item, 0)
# index2 = self._abstract_table_model.index(event.item, self._abstract_table_model.columnCount())
# selection = QItemSelection(index1, index2)
# self.selectionModel().select(selection, QItemSelectionModel.Deselect)
...
def on_activated_by_model(self, event):
# TODO
...
|
<filename>createProject.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#import urllib.request
#from urllib import request,parse
# pip install ruamel.yaml
import sys
import os
import getopt
import time
import io
import git
from git import Repo
import ruamel.yaml
from ruamel.yaml.util import load_yaml_guess_indent
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path+'/framework')
from translate import translate_baidu
sys.path.append(path+'/template')
from templateManager import TemplateConfig
#log = master.log()
#currentCommit = repo.commit(currentBranch)
#compareCommit = repo.commit(compareBranch)
#diffed = repo.log(currentBranch,compareBranch)
#print(currentCommit+currentCommit)
#commits = list(repo.iter_commits(currentBranch))[:5]
#for commit in commits:
# print('author:%s email:%s' % (commit.author.name,commit.author.email))
def getGitBranchNameFromTaskName(taskName):
content = translate_baidu(taskName)
return handleTranslateStr(content)
#首字母大写然后拼接
def handleTranslateStr(content):
comps = content.split(' ')
comps_new = []
for com in comps:
com = com.capitalize()
comps_new.append(com)
return ''.join(comps_new)
def create():
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path,'config.yaml')
yamlContent,ind,bsi = load_yaml_guess_indent(open(path))
print ('\n')
print ('\n')
# 'LenzBusiness' 'LenzMember'
print('-----------------------------------')
print('工程列表:')
count = 0
for p in yamlContent['project_list']:
count += 1
print(str(count)+'.'+p['prefix'])
print('-----------------------------------')
repo_index = int(raw_input('请输入工程名称索引:'))
print('-----------------------------------')
repo_name = yamlContent['project_list'][repo_index - 1]['repo_name']
prefix = yamlContent['project_list'][repo_index - 1]['prefix']
pm_name = ''
task_name = ''
print ('\n')
print('-----------------------------------')
print('生成 feature/时间_任务名称')
print('例子 feature/20190516_时间打点')
print('-----------------------------------')
print('-----------------------------------')
print('pm列表:')
count = 0
for p in yamlContent['pm_list']:
count += 1
print(str(count)+'.'+p)
pm_index = int(raw_input('请输入PM名字索引:'))
pm_name = yamlContent['pm_list'][pm_index-1]
print('-----------------------------------')
print ('\n')
print('-----------------------------------')
while task_name == '':
task_name = raw_input('请输入任务名称(不要带空格):')
print('-----------------------------------')
taskName = getGitBranchNameFromTaskName(task_name)
date_string = time.strftime("%Y%m%d",time.localtime())
just_test_branch = date_string + '_' + taskName #用作文件名
test_branch = 'feature/' + date_string + '_' + taskName
print ('\n')
print ('\n')
in_text = ''
test_options = ''
print('-----------------------------------')
print('项目测试项:---------一行一个---------')
print('相机优化 ')
print('主任务列表优化 ')
print('最后输入 q 回车 结束输入')
print('-----------------------------------')
print('请输入项目测试项:')
count = 0
while in_text != 'q':
count += 1
in_text = raw_input()
if in_text != 'q':
test_options += str(count) + '.' + in_text
test_options += '\n'
print('-----------------------------------')
print ('\n')
#git 打新分支 默认 feature/xxx
repo = Repo('~/' + repo_name)
master = repo.heads.master
currentBranch = repo.head.reference
if currentBranch != master:
master.checkout()
git = repo.git
git.checkout('master',b=test_branch)
print('切分支成功:')
print(test_branch)
#yaml文件更新
config = TemplateConfig()
config.readConfigFromTemplate()
config.git_branch = test_branch
config.git_project_name = repo_name
config.test_options = test_options
config.project_pm = pm_name
config.project_name = prefix + ' ' + task_name
yaml_name = just_test_branch+'_config.yaml'
path = os.path.dirname(os.path.realpath(__file__))
yamlPath = os.path.join(path,'configs/' + yaml_name)
if not os.path.isfile(yamlPath):
os.system("touch " + yamlPath)
path = os.path.dirname(os.path.realpath(__file__))
with io.open(path+'/configs/configs','a',encoding='utf-8') as f:
f.write(yaml_name)
f.write(u'\n')
config.save(yamlPath)
print('存储到本地配置成功:')
print(test_options)
def main(argv):
create()
if __name__ == "__main__":
main(sys.argv[1:])
#可优化:1.项目名称和产品通过配置文件 2.自动抓取上一次对应项目的模版
|
<gh_stars>1-10
##############################################################################################
# Copyright 2017 The Johns Hopkins University Applied Physics Laboratory LLC
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################################
# label_funcs_with_no_xrefs.py
# This script checks each defined function in the address range entered for cross-references.
# If there are no cross-references to the function, the prefix "noXrefs_" is added to the
# function's name. It then iterates through all functions in the code range again to identify
# all functions who's only code references are functions that have no cross-references. This
# is to detected functions called only by other functions who have no code references.
# This script helps to detect "dead code" that is never called.
#
# Inputs: start_addr: Start address for segment to define as data
# end_addr: End address for segment to define as data
#
##############################################################################################
################### USER DEFINED VALUES ###################
# None needed.
###########################################################
def addPrefixToFunctionName(prefix, functionAddr):
name = GetFunctionName(curr_addr)
if (name and not name.startswith(prefix)):
name = prefix + name
print ("[label_funcs_with_no_xrefs.py] Function 0x%x Name: " % curr_addr) + name
MakeName(curr_addr, name)
start_addr = AskAddr(MinEA(), "Please enter the starting address for the functions to be examined.")
end_addr = AskAddr(MaxEA(), "Please enter the ending address for the functions to be examined.")
if ((start_addr is not None and end_addr is not None) and (start_addr != BADADDR and end_addr != BADADDR) and start_addr < end_addr):
print "[label_funcs_with_no_xrefs.py] Running on addresses 0x%x to 0x%x" % (start_addr, end_addr)
# If start_addr is in a function, get the starting address of that function. Else, returns -1.
curr_addr = GetFunctionAttr(start_addr, FUNCATTR_START) # Get the function head for the "start" addr
if (curr_addr == BADADDR):
# start_addr is not currently in a function so select the beginning of the next function
curr_addr = NextFunction(start_addr)
# Using this to continually iterate through all functions until no new functions
# having no code reference paths are found.
new_noXrefs_found = False
while (curr_addr != BADADDR and curr_addr < end_addr):
if (not GetFunctionName(curr_addr).startwith("noXrefs_")):
xrefs = XrefsTo(curr_addr)
has_valid_xref = False;
for x in xrefs:
if (not GetFunctionName(x.frm).startswith("noXrefs_")):
# Function has a valid cross-reference and is not "dead code"
has_valid_xref = True;
break;
if (has_valid_xref == False):
# No valid xrefs were found to this function
new_noXrefs_found = True
addPrefixToFunctionName("noXrefs_", curr_addr)
curr_addr = NextFunction(curr_addr)
if ((curr_addr == BADADDR or curr_addr >= end_addr) and new_noXrefs_found):
print "[label_funcs_with_no_xrefs.py] Iterating through range again because new functions with no Xrefs found."
curr_addr = start_addr
new_noXrefs_found = False
print "[label_funcs_with_no_xrefs.py] FINISHED."
else:
print "[label_funcs_with_no_xrefs.py] QUITTING. Invalid address(es) entered."
|
from datetime import datetime, timezone
from django.core.management.base import BaseCommand
import logging
from multiprocessing import Process, Queue
from organisation.models import DepartmentUser, CostCentre, Location
from organisation.utils import ms_graph_users
def get_users(queue):
# Worker function to call the Graph API via a process queue.
azure_users = queue.get()
azure_users = ms_graph_users()
queue.put(azure_users)
return
class Command(BaseCommand):
help = 'Checks licensed user accounts from Azure AD and creates/updates linked DepartmentUser objects'
def handle(self, *args, **options):
logger = logging.getLogger('organisation')
logger.info('Querying Microsoft Graph API for Azure AD user accounts')
# Call the MS Graph API in a separate process with a timeout.
azure_users = None
queue = Queue()
queue.put(azure_users)
process = Process(target=get_users, args=(queue,))
process.start()
process.join(timeout=80) # Give this function a maximum time to finish (process will block for this duration, regardless).
azure_users = queue.get()
if not process.exitcode == 0:
logger.error('Queued process did not complete in time')
return
if not azure_users:
logger.error('Microsoft Graph API returned no data')
return
logger.info('Comparing Department Users to Azure AD user accounts')
for az in azure_users:
if az['mail'] and az['displayName']: # Azure object has an email address and a display name; proceed.
if not DepartmentUser.objects.filter(azure_guid=az['objectId']).exists():
# No existing DepartmentUser is linked to this Azure AD user.
# A department user with matching email may already exist in IT Assets with a different azure_guid.
# If so, return a warning and skip that user.
# We'll need to correct this issue manually.
if DepartmentUser.objects.filter(email=az['mail'], azure_guid__isnull=False).exists():
existing_user = DepartmentUser.objects.filter(email=az['mail']).first()
logger.warning(
'Skipped {}: email exists and already associated with Azure ObjectId {} (this ObjectId is {})'.format(az['mail'], existing_user.azure_guid, az['objectId'])
)
continue # Skip to the next Azure user.
# A department user with matching email may already exist in IT Assets with no azure_guid.
# If so, associate the Azure AD objectId with that user.
if DepartmentUser.objects.filter(email=az['mail'], azure_guid__isnull=True).exists():
existing_user = DepartmentUser.objects.filter(email=az['mail']).first()
existing_user.azure_guid = az['objectId']
existing_user.azure_ad_data = az
existing_user.azure_ad_data_updated = datetime.now(timezone.utc)
existing_user.update_from_azure_ad_data()
logger.info('AZURE AD SYNC: linked existing user {} with Azure objectId {}'.format(az['mail'], az['objectId']))
continue # Skip to the next Azure user.
# Only create a new DepartmentUser instance if the Azure AD account has >0 licences assigned to it.
if az['assignedLicenses']:
if az['companyName'] and CostCentre.objects.filter(code=az['companyName']).exists():
cost_centre = CostCentre.objects.get(code=az['companyName'])
else:
cost_centre = None
if az['officeLocation'] and Location.objects.filter(name=az['officeLocation']).exists():
location = Location.objects.get(name=az['officeLocation'])
else:
location = None
new_user = DepartmentUser.objects.create(
azure_guid=az['objectId'],
azure_ad_data=az,
azure_ad_data_updated=datetime.now(timezone.utc),
active=az['accountEnabled'],
email=az['mail'],
name=az['displayName'],
given_name=az['givenName'],
surname=az['surname'],
title=az['jobTitle'],
telephone=az['telephoneNumber'],
mobile_phone=az['mobilePhone'],
cost_centre=cost_centre,
location=location,
dir_sync_enabled=az['onPremisesSyncEnabled'],
)
logger.info(f'AZURE AD SYNC: created new department user {new_user}')
else:
# An existing DepartmentUser is linked to this Azure AD user.
# Update the existing DepartmentUser object fields with values from Azure.
existing_user = DepartmentUser.objects.get(azure_guid=az['objectId'])
existing_user.azure_ad_data = az
existing_user.azure_ad_data_updated = datetime.now(timezone.utc)
existing_user.update_from_azure_ad_data()
# Iterate through department users and clear any nonexistent Azure AD GUID values.
azure_users = {i['objectId']: i for i in azure_users}
for du in DepartmentUser.objects.filter(azure_guid__isnull=False, email__iendswith='<EMAIL>'):
if du.azure_guid not in azure_users:
logger.info("ONPREM AD SYNC: Azure AD GUID {} not found in MS Graph output; clearing it from {}".format(du.azure_guid, du))
du.azure_guid = None
du.azure_ad_data = {}
du.azure_ad_data_updated = datetime.now(timezone.utc)
du.assigned_licences = []
du.dir_sync_enabled = None
du.save()
logger.info('Completed')
|
<filename>pyuac/main_decorator.py
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vim: fileencoding=utf-8 tabstop=4 expandtab shiftwidth=4
"""
See main_requires_admin
"""
from __future__ import absolute_import
import os
import sys
from logging import getLogger
from decorator import decorator
from tee import StderrTee, StdoutTee
from pyuac import isUserAdmin, runAsAdmin
log = getLogger(__name__)
@decorator
def main_requires_admin(
run_function,
cmdLine=None,
return_output=False,
stdout_handle=None, stderr_handle=None,
scan_for_error=('error', 'exception'),
*args, **kwargs
):
"""
A decorator for a Python script 'main' function (i.e., when the file is invoked from the
command line as a script) that ensures the 'main' function is executed as an Admin.
Implements a common usage pattern of this module, which allows for capturing the stdout and
stderr output from the sub-process and a very basic scan for errors.
There is strong assumption here that when the current Python script is re-executed in the
admin context with the same command line args, it code logic will lead to this same decorated
main function being executed again.
You can NOT send data from the parent (non-admin) process to the child (Admin)
process. Depending on how the parent process was invoked, the child process
might spawn a Python console window that can be interacted with directly.
Warning: this does not allow capture of the process return code for error detection.
However, the scan_for_error option will look for the case-ins string "error" on the last line
(only) of output, or 'exception', and raise a RuntimeError with the string if found.
:param run_function: the function to run
:param args: arguments tuple to pass to run_function when called (optional)
:param kwargs: keyword arguments dict to pass to run_function when called (optional)
:param cmdLine: override the command line arguments for the new Admin process.
Defaults to the current command line (sys.argv)!
:param return_output: return the output to the caller of this function instead
of writing it to stdout_handle. Note: due to the nature of how this works with UAC,
this does NOT return the actual "return value" of run_function - only its
stdout and stderr output (as a 2-tuple of (stderr, stdout). If you set this, the callers
of your decorated function should be prepared for this output.
:param stdout_handle: file handle to write the process stdout output, defaults to sys.stdout
:param stderr_handle: file handle to write the process stderr output, defaults to sys.stderr
:param scan_for_error: scan the LAST line only of stdout and stderr for the listed strings.
Case is ignored. Set to None or False to disable this. If one of the listed strings
is found, a RuntimeError is raised in the parent process.
:return: None unless return_output is set.
If return_output is True, the output of the decorated function is a 2-tuple
of (stdout, stderr) strings.
"""
if os.name != 'nt':
log.debug("Invoked main_requires_admin on a non-Windows platform; doing nothing!")
return run_function(*args, **kwargs)
# Should we add another function parameter to run the in the "not-admin" case?
# Generate secure temp path? - path has to be known in spawned process...
stdout_temp_fn = 'pyuac.stdout.tmp.txt'
stderr_temp_fn = 'pyuac.stderr.tmp.txt'
if stdout_handle is None:
stdout_handle = sys.stdout
if stderr_handle is None:
stderr_handle = sys.stderr
if isUserAdmin():
with StdoutTee(stdout_temp_fn, mode="a", buff=1), \
StderrTee(stderr_temp_fn, mode="a", buff=1):
try:
log.debug("Starting run_function as admin")
rv = run_function(*args, **kwargs)
log.debug("Finished run_function as admin. return val: %r", rv)
return rv
except Exception:
log.error("Error running main function as admin", exc_info=True)
raise
else:
log.debug("I'm not admin, starting runAsAdmin")
runAsAdmin(cmdLine=cmdLine, wait=True)
log.debug("I'm not admin, runAsAdmin has finished. Collecting result.")
rv = []
for filename, handle in (
(stdout_temp_fn, stdout_handle),
(stderr_temp_fn, stderr_handle),
):
if os.path.exists(filename):
with open(filename, "r") as log_fh:
console_output = log_fh.read()
os.remove(filename)
if os.path.exists(filename):
log.warning("Couldn't delete temporary log file %s", filename)
if scan_for_error:
lines = str.splitlines(console_output.strip())
if lines:
last_line = lines[-1].strip()
for error_str in scan_for_error:
if last_line.lower().find(error_str) != -1:
log.info(
"Identified an error line in Admin process log at %s - "
"emitting RuntimeError in parent process.\n%s",
filename, last_line)
raise RuntimeError(last_line)
if return_output:
# return console_output
rv.append(console_output)
handle.write(console_output)
handle.flush()
if return_output and rv:
return rv
|
# pyGestalt Mechanics Module
"""A set of objects and methods for defining, analyzing, and utilizing mechanisms."""
#---- INCLUDES ----
import math
from pygestalt import errors, units, utilities, geometry
class transformer(object):
"""Base class for all types that transform from one state to another.
Mechanisms transform from one domain of machine state to another. For example, converting between rotations of a leadscrew
to translation of a lead nut. When going in the forward direction (i.e from the actuator to the end effector thru a transmission),
there is only one solution to end effector state for a given actuator state, assuming that the mechanism is holonomic. An example
of a non-holonomic mechanism would be a wheeled vehicle. Therefor, the forward direction is always defined as from the actuator to the
end effector.
There are two types of envisioned transformers:
elements -- these are one-dimensional transformers that transform from a single state dimension to another. Examples include gears,
pulleys, leadscrews, etc. While most are linear transformers, they can also be non-linear as in cams.
kinematics -- these are multi-dimensional transformers, usually situated at the end of the mechanical chain, that transform from one
multi-dimensional coordinate space to another. Examples include linear stages such as normal and CoreXY cartesian
robots, but also polar bots, robotic arms, five bar linkages,etc.
"""
def __init__(self, forwardTransform, reverseTransform = None, inertia = 0.0):
"""Initializer for the transformer.
forwardTransform -- a dimensional float (units.dFloat), transformation matrix (geometry.matrix), or a callable object,
that transforms from one domain into another in the forward (i.e. actuator -> end effector) direction.
reverseTransform -- if provided, is used to perform the transformation in the reverse direction. Note that this mandatory
if anything other than an invertable object such as a dFloat is provided as the forward transform.
inertia -- the inertia of the transformer element used for dynamic simulation
"""
self.forwardTransform = forwardTransform
if reverseTransform: #a reverse transform was provided, so use that.
self.reverseTransform = reverseTransform
else: #no reverse transform provided. Try to invert the forward transform.
try:
self.reverseTransform = forwardTransform**-1
except:
raise errors.MechanismError("No reverse transform provided. Forward transform [" + str(forwardTransform) + "] is not invertable!")
self.inertia = inertia
self.dimensions = self.calculateDimensions()
def forward(self, inputState):
"""Transforms state in the forward direction.
inputState -- the input-side state of the transformer.
Note that this function simply multiplies the forward transform by the input state. Any complexity must be handled
by the __mul__ function of the transform.
"""
outputState = self.forwardTransform * inputState
return outputState
def reverse(self, outputState):
"""Transforms state in the reverse direction.
outputState -- the input-side state of the transformer.
Returns the corresponding input-side state.
Note that this function simply multiplies the reverse transform by the output state. Any complexity must be handled
by the __mul__ function of the transform.
"""
inputState = self.reverseTransform * outputState
return inputState
def calculateDimensions(self):
"""Determines and returns the input and output dimensions of the transformer.
The dimensionality of the transformer is defined as the number of degrees of freedom it accepts as inputs and that it
provides as outputs.
returns dimensions as a tuple in the format (outputDimension, inputDimension), where:
outputDimension -- the number of degrees of freedom of the transformer output
inputDimension -- the number of degrees of freedom of the transformer input
Note that the order is (output, input) to maintain compatibility with matrix sizes and indices as (rows, columns), where
the number of columns corresponds to the inputs of the transformation matrix, and the number of rows the outputs.
"""
if isinstance(self.forwardTransform, float):
outputDimension, inputDimension = (1,1) #transform is a (dimensional) floating point number, so input and output dimensions are 1
else:
try: #see if forwardTransform implements getSize()
outputDimension, inputDimension = self.forwardTransform.getSize()
except AttributeError: #no getSize is implemented
outputDimension, inputDimension = (None, None) #return (None, None) as a placeholder since no size can be determined.
return (outputDimension, inputDimension)
def getSize(self):
"""Returns the pre-calculated input and output dimensions of the transformer.
returns dimensions as a tuple in the format (outputDimension, inputDimension), where:
outputDimension -- the number of degrees of freedom of the transformer output
inputDimension -- the number of degrees of freedom of the transformer input
Note that this method is called getSize rather than getDimensions to keep it consistent with the geometry.array method. It is still
slightly confusing because the size of an array might be e.g. 3x3 but its dimensionality is 2. But we think of the dimensionality of a
transformer as 1D, 2D, 3D, etc...
"""
return self.dimensions
class singleAxisElement(transformer):
"""A one-dimensional machine element that transforms state from one domain to another."""
def __init__(self, transform, inputUnits, outputUnits, inertia = None):
"""Initializes a new single-axis element.
transform -- either a scalar or a custom transformation type that converts in the forward and reverse directions.
If a scalar, will default to the provided units unless units are attached as a dFloat.
inputUnits -- units of the input to the transformer in the forward direction.
outputUnits -- units of the output from the transformer in the forward direction.
inertia -- the inertia of the transformer. Can be used for dynamic performance simulations etc.
"""
if type(inputUnits) == units.unit and type(outputUnits) == units.unit: #check for valid units
self.inputUnits = inputUnits
self.outputUnits = outputUnits
else:
raise errors.UnitError("Input and output units must be of type units.unit")
if type(transform) == units.dFloat: #provided transform comes with its own units
if not units.hasUnits(transform, inputUnits, checkEquivalents = True): #dFloat without input units provided
transform = transform / inputUnits #assume input units in denominator
if not units.hasUnits(transform, outputUnits, checkEquivalents = True): #dFloat without output units provided
transform = transform * outputUnits #assume output units in numerator
transform = transform.convert(self.outputUnits/self.inputUnits) #convert units into units provided on initialization
else: #transform is a scalar. Give it units.
transform = self.outputUnits(transform)/self.inputUnits
#for now we assume any input that isn't a dFloat is a scalar of some type. Later, this needs to be appended to include custom transforms.
super(singleAxisElement, self).__init__(forwardTransform = transform, reverseTransform = None, inertia = inertia)
def forward(self, forwardState):
"""Tranforms from an input state of the tranformer to the corresponding output state.
forwardState -- the forward-going input state of the transformer. MUST be provided as a units.dFloat type.
Note that this function over-rides its base class transformer.forward() function.
"""
if type(forwardState) == units.dFloat:
convertedForwardState = units.convertToUnits(forwardState, self.inputUnits, strict = True) #convert to input units, don't allow reciprocals
return self.forwardTransform*convertedForwardState
else:
utilities.notice(self, "Input to singleAxisElement transformer must be of type units.dFloat!")
raise errors.MechanismError("Incorrect input type to singleAxisElement.forward()")
def reverse(self, reverseState):
"""Tranforms in the reverse direction from an output state of the tranformer to the corresponding input state.
inputState -- the input state of the transformer. MUST be provided as a units.dFloat type.
Note that this function over-rides its base class transformer.forward() function.
"""
if type(reverseState) == units.dFloat:
convertedReverseState = units.convertToUnits(reverseState, self.outputUnits, strict = True) #convert to input units, don't allow reciprocals
return self.reverseTransform*convertedReverseState
else:
utilities.notice(self, "Input to singleAxisElement transformer must be of type units.dFloat!")
raise errors.MechanismError("Incorrect input type to singleAxisElement.reverse()")
def transform(self, inputState):
"""Transforms from one state to another based on the provided input units.
This is something of a bonus function, as the recommended useage is to explicitly call forward() or reverse().
"""
if type(inputState) == units.dFloat:
forwardUnitEquivalency = units.getUnitEquivalency(inputState, self.inputUnits(1)) #1 if equivalent, -1 if reciprocals, 0 if not equivalent
reverseUnitEquivalency = units.getUnitEquivalency(inputState, self.outputUnits(1))
if forwardUnitEquivalency == 1: #inputState units match transform input units. Transform in the forward direction.
convertedInputState = units.convertToUnits(inputState, self.inputUnits, strict = True) #convert to input units, don't allow reciprocals
return self.forwardTransform*convertedInputState
elif reverseUnitEquivalency == 1: #inputState units match transform output units. Transform in the reverse direction.
convertedInputState = units.convertToUnits(inputState, self.outputUnits, strict = True) #convert to input units, don't allow reciprocals
return self.reverseTransform*convertedInputState
else:
utilities.notice(self, "Input to singleAxisElement transformer cannot be transformed because of a dimensionality mismatch.")
raise errors.MechanismError("Encountered dimensionality mismatch while attempting transform.")
else:
utilities.notice(self, "Input to singleAxisElement transformer must be of type units.dFloat!")
raise errors.MechanismError("Incorrect input type to singleAxisElement.transform()")
#---- SINGLE AXIS ELEMENT TYPES ----
class leadscrew(singleAxisElement):
"""A mechanical element that transforms rotation into translation by means of a helical screw."""
def __init__(self, lead):
"""Initializes a new leadscrew.
lead -- the distance traveled in one revolution of the screw.
defaults to mm/rev unless other units are provided.
"""
super(leadscrew, self).__init__(transform = lead, inputUnits = units.rev, outputUnits = units.mm)
class gear(singleAxisElement):
"""A mechanical element that transforms torque and angular velocity by means of meshing teeth."""
def __init__(self, reductionRatio):
"""Initializes a new gear set.
reductionRatio -- the ratio between revolutions of the input gear to revolutions of the output gear.
this can be calculated by dividing the output pitch diameter by the input pitch diameter,
or the output number of teeth by the input number of teeth.
inputUnits and outputUnits are both in revolutions
"""
super(gear, self).__init__(transform = 1.0/reductionRatio, inputUnits = units.rev, outputUnits = units.rev)
class rotaryPulley(singleAxisElement):
"""A mechanical element that transforms torque and angular velocity by means of a belt connecting two pulleys."""
def __init__(self, reductionRatio):
"""Initializes a new rotary pulley set.
reductionRatio -- the ratio between revolutions of the input pulley to revolutions of the output pulley.
this can be calculated by dividing the diamter of the output pulley by the diameter of the input pulley.
inputUnits and outputUnits are both in revolutions
"""
super(rotaryPulley, self).__init__(transform = 1.0/reductionRatio, inputUnits = units.rev, outputUnits = units.rev)
class timingBelt(singleAxisElement):
"""A mechanical element that transforms rotation into translation by means of a toothed pulley meshed with a timing belt."""
def __init__(self, pulleyPitchDiameter):
"""Initializes a new timing belt.
pulleyPitchDiameter -- the pitch diameter of the timing pulley, in mm.
"""
pitchCircumference = math.pi*pulleyPitchDiameter #transformation ratio is the circumference when going from rev -> travel distance
super(timingBelt, self).__init__(transform = pitchCircumference, inputUnits = units.rev, outputUnits = units.mm)
class rack(singleAxisElement):
"""A mechanical element that transforms rotation into translation by means of a gear pinion meshed with a flat gear rack."""
def __init__(self, pinionPitchDiameter):
"""Initializes a new rack and pinion.
pinionPitchDiameter -- the pitch diameter of the pinion, in mm.
"""
pitchCircumference = math.pi*pinionPitchDiameter #transformation is circumference when going from rev -> travel distance
super(rack, self).__init__(transform = pitchCircumference, inputUnits = units.rev, outputUnits = units.mm)
class stepper(singleAxisElement):
"""An electromechanical element that transforms electrical 'step' pulses into rotation."""
def __init__(self, stepSize):
"""Initializes a new stepper motor.
stepSize -- the rotational angle moved by the motor each step, in degrees.
"""
super(stepper, self).__init__(transform = stepSize, inputUnits = units.step, outputUnits = units.deg)
class invert(transformer):
"""A single-axis utility element that inverts the sign of the signal passing thru it."""
def __init__(self):
"""Initializes a new inverter."""
super(invert, self).__init__(forwardTransform = -1.0)
#---- MULTI-AXIS KINEMATIC TRANSFORMERS ----
class matrixTransformer(transformer):
"""A matrix-based transformer.
While the transformer class by default accepts matrices as forward and reverse transforms, the formatting expected
on the input and output of matrices must be 2D, whereas for transformers it is expected to be 1D. This class
performs the necessary pre-formatting of inputs and post-formatting of results.
"""
def forward(self, forwardState):
"""Transform in the forward direction.
forwardState -- a list-formatted single-row array containing the input state of the transformer.
The purpose of this method is just to convert the input state into a 2D column matrix so it can be multiplied
by the forward transform matrix.
"""
forwardStateMatrix = geometry.matrix(forwardState).transpose()
outputStateMatrix = self.forwardTransform*forwardStateMatrix
outputState = list(outputStateMatrix.transpose())[0]
return outputState
def reverse(self, reverseState):
"""Transform in the reverse direction.
reverseState -- a list-formatted single-row array containing the output state of the transformer.
The purpose of this method is just to convert the output state into a 2D column matrix so it can be multiplied
by the reverse transform matrix.
"""
reverseStateMatrix = geometry.matrix(reverseState).transpose()
inputStateMatrix = self.reverseTransform*reverseStateMatrix
inputState = list(inputStateMatrix.transpose())[0]
return inputState
class corexy(matrixTransformer):
"""CoreXY or H-bot based kinematics.
See www.corexy.com
"""
def __init__(self):
"""Initializes a new corexy transformer."""
forwardTransform = geometry.matrix([[0.5, 0.5], [0.5, -0.5]])
super(corexy, self).__init__(forwardTransform = forwardTransform)
#---- UTILITY TRANSFORMERS ----
class router(transformer):
"""A transformer that routes from a set of inputs to a set of outputs"""
def __init__(self, forwardRoutingMap):
"""Initializes the routing transformer.
forwardRoutingMap -- an ordered list whose indices correspond to the input positions, and whose values correspond to output positions.
[output[0], output[1], ..., output[n]]
For example, to route input[0] to output[1] and vice versa, the routing map would be [1,0]. Note that the length of the
routingMap must exactly equal the number of inputs and outputs, and that all mappings must be specified.
"""
self.forwardRoutingMap = forwardRoutingMap
###Need to add some checks here to make sure the routing map is valid
self.reverseRoutingMap = list(range(len(forwardRoutingMap))) #placeholder for reverse routing map
for index, value in enumerate(self.forwardRoutingMap): #build reverse routing map
self.reverseRoutingMap[value] = index
self.dimensions = self.calculateDimensions()
def forward(self, forwardState):
return [forwardState[index] for index in self.forwardRoutingMap]
def reverse(self, reverseState):
return [reverseState[index] for index in self.reverseRoutingMap]
def calculateDimensions(self):
"""Calculates and returns the dimensions of the router."""
routingMapSize = len(self.forwardRoutingMap)
return (routingMapSize, routingMapSize)
class offset(transformer):
"""A transformer that applies a constant offset.
This is useful for implementing homing and zeroing.
"""
def __init__(self, dof):
"""Initializes the offset.
dof -- the number of degrees of freedom of the offset transformer.
"""
self.dof = dof
self.dimensions = self.calculateDimensions()
self.offset = geometry.array([0.0 for degreeOfFreedom in range(self.dof)])
def calculateDimensions(self):
"""Calculates and returns the dimensions of the offset.
The dimensions of the offset equals the number of degrees of freedom for both the inputs and the outputs.
"""
return (self.dof, self.dof)
def set(self, offsetArray):
"""Sets the offset to be internally applied by the transformer.
offsetArray -- a list-formatted 1D array containing the offsets to apply. Note that the sign is in the forward direction,
i.e. for an offset of [3,4], output = input + offset.
This method is useful for setting the absolute position of a transformer chain, as is done in homing.
"""
if self.validateOffset(offsetArray):
self.offset = geometry.array(offsetArray)
else:
raise errors.MechanismError("Unable to set offset.")
def adjust(self, adjustmentArray):
"""Applies an adjustment to the offset.
adjustmentArray -- a list-formatted array containing the values by which to change the internal offset.
This method is useful for changing the desired output state of a transformer chain by a certain amount, as is done in zeroing.
"""
if self.validateOffset(adjustmentArray):
self.offset = self.offset + geometry.array(adjustmentArray)
else:
raise errors.MechanismError("Unable to adjust offset.")
def forward(self, forwardState):
"""Transform in the forward direction.
forwardState -- a single value or list-formatted array containing the input state of the transformer.
Offset is applied by adding it to forwardState
"""
return list(forwardState + self.offset)
def reverse(self, reverseState):
"""Transform in the reverse direction.
reverseState -- a single value or list-formatted array containing the output state of the transformer.
Offset is applied by subtracting it from reverseState
"""
return list(reverseState - self.offset)
def validateOffset(self, offsetArray):
"""Validates that a provided offset array is compatible with the transformer.
offsetArray -- the offset array to be validated.
Returns True if validation passes, or False if not.
"""
offsetSize = geometry.arraySize(offsetArray)
if len(offsetSize) > 1:
utilities.notice(self, "Provided offset array has a dimension of "+ str(len(offsetSize)) + ", and must be 1D!")
return False
elif offsetSize[0]!= self.dof:
utilities.notice(self, "Provided offset has a size of " + str(offsetSize[0]) + " DOF, but the transformer has " + str(self.dof)+ " DOF.")
return False
else:
return True
class passThru(transformer):
"""A transformer that acts as a direct pass-thru of the input to the output.
This type of transformer can act as a place-holder in a stack, so that the stack has the correct dimensionality.
"""
def __init__(self, lanes):
"""Initializes the pass-thru.
lanes -- the number of dimensions the pass-thru will pass.
"""
self.lanes = lanes
self.dimensions = self.calculateDimensions()
def forward(self, forwardState):
"""Transform in the forward direction.
forwardState -- a single value or list-formatted array containing the input state of the transformer.
"""
return forwardState
def reverse(self, reverseState):
"""Transform in the reverse direction.
reverseState -- a single value or list-formatted array containing the output state of the transformer.
"""
return reverseState
def calculateDimensions(self):
"""Calculates and returns the dimensions of the pass-thru.
The dimensions of the pass-thru equals the number of lanes for both the inputs and the outputs.
"""
return (self.lanes, self.lanes)
#--- TRANSFORMER CONTAINERS ---
class chain(transformer):
"""A serial chain of transformer elements."""
def __init__(self, *transformers):
"""Initializes a new transformer chain.
*transformers -- a series of transformer elements, provided as positional arguments in the forward direction.
"""
self.transformChain = transformers
self.dimensions = self.calculateDimensions()
def forward(self, forwardState):
"""Tranforms from an input state of the tranformer chain to the corresponding output state.
forwardState -- the forward-going input state of the transformer chain.
Transformation is accomplished by successively feeding the output of each element into the input of the subsequent element.
Note that this function over-rides its base class transformer.forward() function.
"""
for transformerElement in self.transformChain:
forwardState = transformerElement.forward(forwardState)
return forwardState
def reverse(self, outputState):
"""Tranforms from an output state of the tranformer chain to the corresponding input state.
outputState -- the reverse-going output state of the transformer chain.
Transformation is accomplished by successively feeding the input of each element into the output of the subsequent element.
Note that this function over-rides its base class transformer.reverse() function.
"""
for transformerElement in reversed(self.transformChain):
outputState = transformerElement.reverse(outputState)
return outputState
def calculateDimensions(self):
"""Determines and returns the input and output dimensions of the transformer chain.
The dimensionality of the transformer is defined as the number of degrees of freedom it accepts as inputs and that it
provides as outputs. Note that this method overrides transformer.calculateDimensions.
returns dimensions as a tuple in the format (outputDimension, inputDimension), where:
outputDimension -- the number of degrees of freedom of the transformer output
inputDimension -- the number of degrees of freedom of the transformer input
"""
outputDimension = self.transformChain[-1].getSize()[0]
inputDimension = self.transformChain[0].getSize()[1]
return (outputDimension, inputDimension)
class stack(transformer):
"""A parallel stack of transformers."""
def __init__(self, *transformers):
"""Initializes a new transformer stack.
*transformers -- a parallel set of stacked transformers, provided in sequence from the 0th to Nth dimension.
"""
self.transformerStack = transformers
self.dimensions = self.calculateDimensions()
def forward(self, forwardState):
"""Tranforms from an input state of the tranformer stack to the corresponding output state.
forwardState -- the forward-going input state of the transformer stack.
Transformation is accomplished by expanding the input state into chunks sized for each transformer in the stack.
Note that this function over-rides its base class transformer.forward() function.
"""
if not isinstance(forwardState, list): #if the forwardState is not provided as a list-formatted array, wrap it.
forwardState = [forwardState]
outputState = [] #initialize output state as an empty list
for transformerElement in self.transformerStack:
inputDimension = transformerElement.getSize()[1]
if len(forwardState)>= inputDimension: #make sure there's enough input dimensions remaining
if inputDimension == 1: #single-axis, so feed with dFloat rather than list.
forwardSubState = forwardState[0] #feed first value of forwardState
forwardState = forwardState[1:] #forwardState gets first value stripped
outputSubState = transformerElement.forward(forwardSubState) #perform transform to get output segment state
else: #multi-axis, feed with a list
forwardSubState = forwardState[0:inputDimension]
forwardState = forwardState[inputDimension:]
outputSubState = transformerElement.forward(forwardSubState)
if not isinstance(outputSubState, list): #output state is not a list, so wrap
outputState += [outputSubState]
else:
outputState += outputSubState
else:
utilities.notice(self, "Cannot perform transform because dimension of forward state is less than input dimension of transformer.")
raise errors.MechanismError("Encountered dimensionality mismatch while attempting transform.")
if len(forwardState) == 0:
if len(outputState) == 1:
return outputState[0] #single element, so strip away list
else:
return outputState
else: #uh oh! some input is left over
utilities.notice(self, "Cannot perform transform because dimension of forward state is greater than input dimension of transformer.")
raise errors.MechanismError("Encountered dimensionality mismatch while attempting transform.")
def reverse(self, outputState):
"""Tranforms from an output state of the tranformer chain to the corresponding input state.
outputState -- the reverse-going output state of the transformer stack.
Transformation is accomplished by expanding the output state into chunks sized for each transformer in the stack.
Note that this function over-rides its base class transformer.reverse() function.
"""
if not isinstance(outputState, list): #if the forwardState is not provided as a list-formatted array, wrap it.
outputState = [outputState]
inputState = [] #initialize input state as an empty list
for transformerElement in self.transformerStack:
outputDimension = transformerElement.getSize()[0]
if len(outputState)>= outputDimension: #make sure there's enough input dimensions remaining
if outputDimension == 1: #single-axis, so feed with dFloat rather than list.
outputSubState = outputState[0] #feed first value of outputState
outputState = outputState[1:] #outputState gets first value stripped
inputSubState = transformerElement.reverse(outputSubState) #perform transform to get input segment state
else: #multi-axis, feed with a list
outputSubState = outputState[0:outputDimension]
outputState = outputState[outputDimension:]
inputSubState = transformerElement.reverse(outputSubState)
if not isinstance(inputSubState, list): #input state is not a list, so wrap
inputState += [inputSubState]
else:
inputState += inputSubState
else:
utilities.notice(self, "Cannot perform transform because dimension of forward state is less than input dimension of transformer.")
raise errors.MechanismError("Encountered dimensionality mismatch while attempting transform.")
if len(outputState) == 0:
if len(inputState) == 1:
return inputState[0] #single element, so strip away list
else:
return inputState
else: #uh oh! some input is left over
utilities.notice(self, "Cannot perform transform because dimension of forward state is greater than input dimension of transformer.")
raise errors.MechanismError("Encountered dimensionality mismatch while attempting transform.")
def calculateDimensions(self):
"""Determines and returns the input and output dimensions of the transformer stack.
The dimensionality of the transformer is defined as the number of degrees of freedom it accepts as inputs and that it
provides as outputs. Note that this method overrides transformer.calculateDimensions. Dimensionality is calculated
by summing the dimensions of the parallel items in the stack.
returns dimensions as a tuple in the format (outputDimension, inputDimension), where:
outputDimension -- the number of degrees of freedom of the transformer output
inputDimension -- the number of degrees of freedom of the transformer input
"""
inputDimension = 0
outputDimension = 0
for transformerElement in self.transformerStack:
outputSize, inputSize = transformerElement.getSize()
outputDimension += outputSize
inputDimension += inputSize
return (outputDimension, inputDimension)
def gang(transformer):
"""Reduces the outputs of multiple single-axis transformers to one dimension.
This object will convert multiple inputs to a single output, and is useful for e.g. machines that rely on multiple
linear actuators moving in synchrony to maintain parallelism. This type of arrangement can be found on many varieties
of hobbyist-grade 3D printers and CNC machines.
"""
pass
|
# -*- coding: utf-8 -*-
import unittest
from mahjong.ai.shanten import Shanten
from utils.tests import TestMixin
class ShantenTestCase(unittest.TestCase, TestMixin):
def test_shanten_number(self):
shanten = Shanten()
tiles = self._string_to_34_array(sou='111234567', pin='11', man='567')
self.assertEqual(shanten.calculate_shanten(tiles), Shanten.AGARI_STATE)
tiles = self._string_to_34_array(sou='111345677', pin='11', man='567')
self.assertEqual(shanten.calculate_shanten(tiles), 0)
tiles = self._string_to_34_array(sou='111345677', pin='15', man='567')
self.assertEqual(shanten.calculate_shanten(tiles), 1)
tiles = self._string_to_34_array(sou='11134567', pin='15', man='1578')
self.assertEqual(shanten.calculate_shanten(tiles), 2)
tiles = self._string_to_34_array(sou='113456', pin='1358', man='1358')
self.assertEqual(shanten.calculate_shanten(tiles), 3)
tiles = self._string_to_34_array(sou='1589', pin='13588', man='1358', honors='1')
self.assertEqual(shanten.calculate_shanten(tiles), 4)
tiles = self._string_to_34_array(sou='159', pin='13588', man='1358', honors='12')
self.assertEqual(shanten.calculate_shanten(tiles), 5)
tiles = self._string_to_34_array(sou='1589', pin='258', man='1358', honors='123')
self.assertEqual(shanten.calculate_shanten(tiles), 6)
tiles = self._string_to_34_array(sou='11123456788999')
self.assertEqual(shanten.calculate_shanten(tiles), Shanten.AGARI_STATE)
tiles = self._string_to_34_array(sou='11122245679999')
self.assertEqual(shanten.calculate_shanten(tiles), 0)
def test_shanten_number_and_chitoitsu(self):
shanten = Shanten()
tiles = self._string_to_34_array(sou='114477', pin='114477', man='77')
self.assertEqual(shanten.calculate_shanten(tiles), Shanten.AGARI_STATE)
tiles = self._string_to_34_array(sou='114477', pin='114477', man='76')
self.assertEqual(shanten.calculate_shanten(tiles), 0)
tiles = self._string_to_34_array(sou='114477', pin='114479', man='76')
self.assertEqual(shanten.calculate_shanten(tiles), 1)
tiles = self._string_to_34_array(sou='114477', pin='14479', man='76', honors='1')
self.assertEqual(shanten.calculate_shanten(tiles), 2)
def test_shanten_number_and_kokushi_musou(self):
shanten = Shanten()
tiles = self._string_to_34_array(sou='19', pin='19', man='19', honors='12345677')
self.assertEqual(shanten.calculate_shanten(tiles), Shanten.AGARI_STATE)
tiles = self._string_to_34_array(sou='129', pin='19', man='19', honors='1234567')
self.assertEqual(shanten.calculate_shanten(tiles), 0)
tiles = self._string_to_34_array(sou='129', pin='129', man='19', honors='123456')
self.assertEqual(shanten.calculate_shanten(tiles), 1)
tiles = self._string_to_34_array(sou='129', pin='129', man='129', honors='12345')
self.assertEqual(shanten.calculate_shanten(tiles), 2)
def test_shanten_number_and_open_sets(self):
shanten = Shanten()
tiles = self._string_to_34_array(sou='44467778', pin='222567')
open_sets = []
self.assertEqual(shanten.calculate_shanten(tiles, open_sets_34=open_sets), Shanten.AGARI_STATE)
open_sets = [self._string_to_open_34_set(sou='777')]
self.assertEqual(shanten.calculate_shanten(tiles, open_sets_34=open_sets), 0)
tiles = self._string_to_34_array(sou='23455567', pin='222', man='345')
open_sets = [self._string_to_open_34_set(man='345'), self._string_to_open_34_set(sou='555')]
self.assertEqual(shanten.calculate_shanten(tiles, open_sets_34=open_sets), 0)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from importlib import import_module
import logging
import os
import pkgutil
from horizon.utils import file_discovery
from openstack_dashboard import theme_settings
def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules
def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL')
or hasattr(submodule, 'PANEL_GROUP')
or hasattr(submodule, 'FEATURE')):
# If enabled and local.enabled contains a same filename,
# the file loaded later (i.e., local.enabled) will be used.
name = submodule.__name__.rsplit('.', 1)[1]
config[name] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the former. Then the configuration is applied to
horizon_config and installed_apps, in alphabetical order of files from
which the configurations were imported.
For example, given this setup:
| foo/__init__.py
| foo/_10_baz.py
| foo/_20_qux.py
| bar/__init__.py
| bar/_30_baz_.py
and being called with ``modules=[foo, bar]``, we will first have the
configuration from ``_10_baz`` and ``_30_baz`` merged, then the
configurations will be applied in order ``qux``, ``baz`` (``baz`` is
second, because the most recent file which contributed to it, ``_30_baz``,
comes after ``_20_qux``).
Panel specific configurations are stored in horizon_config. Dashboards
from both plugin-based and openstack_dashboard must be registered before
the panel configuration can be applied. Making changes to the panel is
deferred until the horizon autodiscover is completed, configurations are
applied in alphabetical order of files where it was imported.
"""
config_dashboards = horizon_config.get('dashboards', [])
if config_dashboards or horizon_config.get('default_dashboard'):
logging.warning(
'"dashboards" and "default_dashboard" in (local_)settings is '
'DEPRECATED now and may be unsupported in some future release. '
'The preferred way to specify the order of dashboards and the '
'default dashboard is the pluggable dashboard mechanism (in %s).',
', '.join([os.path.abspath(module.__path__[0])
for module in modules])
)
enabled_dashboards = []
disabled_dashboards = []
exceptions = horizon_config.get('exceptions', {})
apps = []
angular_modules = []
js_files = []
js_spec_files = []
scss_files = []
panel_customization = []
header_sections = []
update_horizon_config = {}
for key, config in import_dashboard_config(modules):
if config.get('DISABLED', False):
if config.get('DASHBOARD'):
disabled_dashboards.append(config.get('DASHBOARD'))
continue
_apps = config.get('ADD_INSTALLED_APPS', [])
apps.extend(_apps)
_header_sections = config.get('ADD_HEADER_SECTIONS', [])
header_sections.extend(_header_sections)
if config.get('AUTO_DISCOVER_STATIC_FILES', False):
for _app in _apps:
module = import_module(_app)
base_path = os.path.join(module.__path__[0], 'static/')
file_discovery.populate_horizon_config(horizon_config,
base_path)
add_exceptions = config.get('ADD_EXCEPTIONS', {}).items()
for category, exc_list in add_exceptions:
exceptions[category] = tuple(set(exceptions.get(category, ())
+ exc_list))
angular_modules.extend(config.get('ADD_ANGULAR_MODULES', []))
# avoid pulling in dashboard javascript dependencies multiple times
existing = set(js_files)
js_files.extend([f for f in config.get('ADD_JS_FILES', [])
if f not in existing])
js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', []))
scss_files.extend(config.get('ADD_SCSS_FILES', []))
update_horizon_config.update(
config.get('UPDATE_HORIZON_CONFIG', {}))
if config.get('DASHBOARD'):
dashboard = key
enabled_dashboards.append(dashboard)
if config.get('DEFAULT', False):
horizon_config['default_dashboard'] = dashboard
elif config.get('PANEL') or config.get('PANEL_GROUP'):
config.pop("__builtins__", None)
panel_customization.append(config)
# Preserve the dashboard order specified in settings
dashboards = ([d for d in config_dashboards
if d not in disabled_dashboards] +
[d for d in enabled_dashboards
if d not in config_dashboards])
horizon_config['panel_customization'] = panel_customization
horizon_config['header_sections'] = header_sections
horizon_config['dashboards'] = tuple(dashboards)
horizon_config.setdefault('exceptions', {}).update(exceptions)
horizon_config.update(update_horizon_config)
horizon_config.setdefault('angular_modules', []).extend(angular_modules)
horizon_config.setdefault('js_files', []).extend(js_files)
horizon_config.setdefault('js_spec_files', []).extend(js_spec_files)
horizon_config.setdefault('scss_files', []).extend(scss_files)
# apps contains reference to applications declared in the enabled folder
# basically a list of applications that are internal and external plugins
# installed_apps contains reference to applications declared in settings
# such as django.contribe.*, django_pyscss, compressor, horizon, etc...
# for translation, we are only interested in the list of external plugins
# so we save the reference to it before we append to installed_apps
horizon_config.setdefault('plugins', []).extend(apps)
installed_apps[0:0] = apps
# Order matters, list the xstatic module name and the entry point file(s) for
# that module (this is often defined as the "main" in bower.json, and
# as the xstatic module MAIN variable in the very few compliant xstatic
# modules). If the xstatic module does define a MAIN then set the files
# list to None.
# This list is to be used as the base list which is potentially added to in
# local_settings.py before being passed to get_xstatic_dirs()
BASE_XSTATIC_MODULES = [
('xstatic.pkg.jquery', ['jquery.js']),
('xstatic.pkg.jquery_migrate', ['jquery-migrate.js']),
('xstatic.pkg.angular', [
'angular.js',
'angular-cookies.js',
'angular-sanitize.js',
'angular-route.js'
]),
('xstatic.pkg.angular_bootstrap', ['angular-bootstrap.js']),
('xstatic.pkg.angular_gettext', None),
('xstatic.pkg.angular_lrdragndrop', None),
('xstatic.pkg.angular_smart_table', None),
('xstatic.pkg.angular_fileupload', ['ng-file-upload-all.js']),
('xstatic.pkg.d3', ['d3.js']),
('xstatic.pkg.jquery_quicksearch', ['jquery.quicksearch.js']),
('xstatic.pkg.jquery_tablesorter', ['jquery.tablesorter.js']),
('xstatic.pkg.jquery_ui', ['jquery-ui.js']),
('xstatic.pkg.bootstrap_scss', ['js/bootstrap.js']),
('xstatic.pkg.bootstrap_datepicker', ['bootstrap-datepicker.js']),
('xstatic.pkg.hogan', ['hogan.js']),
('xstatic.pkg.rickshaw', ['rickshaw.js']),
('xstatic.pkg.jsencrypt', None),
('xstatic.pkg.objectpath', ['ObjectPath.js']),
('xstatic.pkg.tv4', ['tv4.js']),
('xstatic.pkg.angular_schema_form', ['schema-form.js']),
# @imported in scss files diectly
('xstatic.pkg.font_awesome', []),
('xstatic.pkg.bootswatch', []),
('xstatic.pkg.roboto_fontface', []),
('xstatic.pkg.mdi', []),
# testing only, not included in application
('xstatic.pkg.jasmine', []),
('xstatic.pkg.termjs', []),
]
def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG['xstatic_lib_files'] = []
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
return STATICFILES_DIRS
def find_static_files(
HORIZON_CONFIG,
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH):
import horizon
import openstack_dashboard
os_dashboard_home_dir = openstack_dashboard.__path__[0]
horizon_home_dir = horizon.__path__[0]
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(horizon_home_dir, 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(os_dashboard_home_dir, 'static/'),
sub_path='app/'
)
# Discover theme static resources, and in particular any
# static HTML (client-side) that the theme overrides
theme_static_files = {}
theme_info = theme_settings.get_theme_static_dirs(
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH)
for url, path in theme_info:
discovered_files = {}
# discover static files provided by the theme
file_discovery.populate_horizon_config(
discovered_files,
path
)
# Get the theme name from the theme url
theme_name = url.split('/')[-1]
# build a dictionary of this theme's static HTML templates.
# For each overridden template, strip off the '/templates/' part of the
# theme filename then use that name as the key, and the location in the
# theme directory as the value. This allows the quick lookup of
# theme path for any file overridden by a theme template
template_overrides = {}
for theme_file in discovered_files['external_templates']:
# Example:
# external_templates_dict[
# 'framework/widgets/help-panel/help-panel.html'
# ] = 'themes/material/templates/framework/widgets/\
# help-panel/help-panel.html'
(templates_part, override_path) = theme_file.split('/templates/')
template_overrides[override_path] = 'themes/' + \
theme_name + theme_file
discovered_files['template_overrides'] = template_overrides
# Save all of the discovered file info for this theme in our
# 'theme_files' object using the theme name as the key
theme_static_files[theme_name] = discovered_files
# Add the theme file info to the horizon config for use by template tags
HORIZON_CONFIG['theme_static_files'] = theme_static_files
|
<reponame>Tenderize/audius-protocol
from datetime import date, timedelta
import redis
from src.challenges.challenge_event_bus import ChallengeEvent, ChallengeEventBus
from src.challenges.track_upload_challenge import track_upload_challenge_manager
from src.models import Block, Track, User
from src.models.models import Challenge
from src.utils.config import shared_config
from src.utils.db_session import get_db
REDIS_URL = shared_config["redis"]["url"]
def test_track_upload_challenge(app):
redis_conn = redis.Redis.from_url(url=REDIS_URL)
# create user
with app.app_context():
db = get_db()
today = date.today()
block1 = Block(blockhash="0x1", number=1)
block2 = Block(blockhash="0x2", number=30000000)
block3 = Block(blockhash="0x3", number=30000001)
user = User(
blockhash="0x1",
blocknumber=1,
txhash="xyz",
user_id=1,
handle="TestHandle",
handle_lc="testhandle",
is_current=True,
created_at=today - timedelta(days=100),
updated_at=today - timedelta(days=100),
)
track1 = Track(
blockhash="0x1",
blocknumber=1,
txhash="xyz",
owner_id=1,
track_id=1,
route_id="1",
track_segments=[],
is_unlisted=False,
is_current=True,
is_delete=False,
created_at=today - timedelta(days=100),
updated_at=today - timedelta(days=100),
)
track2 = Track(
blockhash="0x2",
blocknumber=30000000,
txhash="yzx",
owner_id=1,
track_id=2,
route_id="2",
track_segments=[],
is_unlisted=False,
is_current=True,
is_delete=False,
created_at=today - timedelta(days=1),
updated_at=today - timedelta(days=1),
)
track3 = Track(
blockhash="0x3",
blocknumber=30000001,
txhash="zxy",
owner_id=1,
track_id=3,
route_id="3",
track_segments=[],
is_unlisted=False,
is_current=True,
is_delete=False,
created_at=today,
updated_at=today,
)
track4 = Track(
blockhash="0x3",
blocknumber=30000001,
txhash="abc",
owner_id=1,
track_id=4,
route_id="4",
track_segments=[],
is_unlisted=False,
is_current=True,
is_delete=False,
created_at=today,
updated_at=today,
)
with db.scoped_session() as session:
bus = ChallengeEventBus(redis_conn)
# Register events with the bus
bus.register_listener(
ChallengeEvent.track_upload, track_upload_challenge_manager
)
# set challenge as active for purposes of test
session.query(Challenge).filter(Challenge.id == "track-upload").update(
{"active": True}
)
session.add(block1)
session.add(block2)
session.add(block3)
session.flush()
session.add(user)
session.add(track1)
# Process dummy event at block number before this challenge is added
bus.dispatch(ChallengeEvent.track_upload, 1, 1)
bus.flush()
bus.process_events(session)
user_challenges = track_upload_challenge_manager.get_user_challenge_state(
session, ["1"]
)
# We should not have registered a count for this event
assert not user_challenges
# Process dummy event at block number when challenge is added
session.add(track2)
bus.dispatch(ChallengeEvent.track_upload, 30000000, 1)
bus.flush()
bus.process_events(session)
user_challenge = track_upload_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
# We should have completed a single step (one track upload)
assert user_challenge.current_step_count == 1
assert not user_challenge.is_complete
# Process two more dummy events to reach the step count (i.e. 3) for completion
session.add(track3)
bus.dispatch(ChallengeEvent.track_upload, 30000001, 1)
session.add(track4)
bus.dispatch(ChallengeEvent.track_upload, 30000001, 1)
bus.flush()
bus.process_events(session)
user_challenge = track_upload_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
# We should have completed the challenge
assert user_challenge.current_step_count == 3
assert user_challenge.is_complete
# ensure that if we lose some data now that the thing is complete, we don't change the status of the challenge
session.query(Track).filter(Track.owner_id == user.user_id).update(
{"is_delete": True}
)
session.flush()
bus.dispatch(ChallengeEvent.track_upload, 3, 1)
bus.flush()
bus.process_events(session)
user_challenge = track_upload_challenge_manager.get_user_challenge_state(
session, ["1"]
)[0]
# The challenge should still be completed
assert user_challenge.current_step_count == 3
assert user_challenge.is_complete
|
<gh_stars>1-10
""" Runs a communication test with a target node."""
# ---- IMPORTS ----
from pygestalt import nodes, config
import time #for clocking data exchange rate
import csv #for outputting data
# ---- SYNTHETIC MODE ----
# config.syntheticModeOn() #Un-comment this line to run in synthetic mode (i.e. test mode)
# ---- DEFINE TEST NODE ----
testNode = nodes.arduinoGestaltNode(name = "Comm. Test Node", filename = "arduinoNode_commTest.py") #filename must be provided for synthetic mode
# testNode = nodes.networkedGestaltNode(name = "Comm. Test Node", filename = "gestaltNode_commTest.py") #filename must be provided for synthetic mode
# ---- GLOBAL DEFINITIONS ----
basePacketLength = 6 #length of packet frame
# ---- TEST FUNCTIONS ----
def exchange(outboundPayloadLength, inboundPayloadLength, numExchanges = 1, verbose = False):
"""Executes a series of bi-directional packet exchanges with the test node.
outboundPayloadLength -- the length (in bytes) of the outbound payload
inboundPayloadLength -- the length (in bytes) of the inbound payload
numExchanges -- the number of back-and-forth exchanges with the physical node
verbose -- if True, will print current test to the terminal
Returns the average round-trip time (in seconds) required for the requested exchanges.
"""
startTime = time.time()
if verbose:
print "Running " + str(numExchanges) + " exchanges with " + str(outboundPayloadLength) + " outbound and " + str(inboundPayloadLength) + " inbound bytes."
for exchangeIndex in range(numExchanges):
testNode.exchangeTestPacket(outboundPayloadLength, inboundPayloadLength)
elapsedTime = time.time() - startTime
averageRoundTripTime = elapsedTime / float(numExchanges)
if verbose:
print " -> " + str(round(1.0/averageRoundTripTime, 1)) + " exchanges per second."
return averageRoundTripTime
def symmetricPayloadSizeSweep(startSize, endSize, numExchanges, verbose = False):
"""Tests the packet exchange rate with a physical node across a range of payload sizes.
startSize -- the number of payload bytes in the outbound and inbound packet at the start of the sweep
endSize -- the number of payload bytes in the outbound and inbound packet at the end of the sweep
numExchanges -- the number of test packet exchanges to conduct for each payload size.
verbose -- if True, provides progress information
returns [(payloadSize, exchangeRate), ...] for each payload size in the sweep.
payloadSize -- the number of payload bytes in the outbound and inbound packets
exchangeRate -- the number of round-trip exchanges that were executed per second.
"""
return [(payloadSize, round(1.0 / exchange(payloadSize, payloadSize, numExchanges, verbose),1)) for payloadSize in range(startSize, endSize + 1)]
def printResult(sweepResult):
"""Prints the results to the terminal."""
print " "
print "PAYLOAD SWEEP RESULTS:"
for payloadSize, exchangeRate in sweepResult:
print " " + str(payloadSize) + " PAYLOAD BYTES: " + str(exchangeRate) + " ROUND-TRIP PACKETS PER SEC."
def outputResult(sweepResult, filename = 'commTestResults.csv'):
"""Outputs the results to a CSV file.
sweepResult -- the results of the sweep, in the format [(payloadSize, exchangeRate), ...]
filename -- the name of the file to which the results should be saved.
"""
outputFile = open(filename, 'wb')
csvWriter = csv.writer(outputFile)
csvWriter.writerow(["Payload Size", "Total Packet Size", "Round-Trip Rate (exchanges/sec)"]) #write header
for payloadSize, exchangeRate in sweepResult:
csvWriter.writerow([payloadSize, payloadSize + basePacketLength, exchangeRate])
outputFile.close()
# ---- LOAD NEW FIRMWARE ----
# testNode.loadProgram('firmware/gestaltNode_commTest/gestaltNode_commTest.hex')
# ---- RUN TEST ----
sweepResults = symmetricPayloadSizeSweep(0,200, 100, verbose = True)
printResult(sweepResults)
outputResult(sweepResults)
|
<reponame>froec/BQonRDM
import numpy as np
from core import geodesics
from core import utils
from bayesquad.bq_wrapper import BQWrapper
import time
import copy
import dill
class LandQuadrature():
def __init__(self, land):
self.land = land
self.integration_params = land.integration_params
def estimate_norm_constant(self, K):
pass
class MCQuadrature(LandQuadrature):
# This function estimates the normalization constant using Monte Carlo sampling on the tangent space
def estimate_norm_constant(self, k, consider_failed=False):
if self.integration_params['method'] != 'MC':
print("oops, this shouldn't happen. Integration parameters are probably misspecified.")
mu = self.land.means[k]
Sigma = self.land.sigmas[k]
S = self.land.model_params["S"]
start = time.time()
D = Sigma.shape[0]
Z_eucl = np.sqrt(((2 * np.pi) ** D) * np.linalg.det(Sigma)) # The Euclidean normalization constant
# Initialize the matrices for the samples and the matrix A
L, U = np.linalg.eigh(Sigma)
A = U @ np.diag(np.sqrt(L))
V_samples = np.zeros((S, D))
V_samples[:] = np.nan
X_samples = np.zeros((S, D))
X_samples[:] = np.nan
s = 0
while True:
try:
v = A @ np.random.randn(D, 1) # D x 1, get white noise to sample from N(0, Sigma)
curve, failed = geodesics.expmap(self.land.manifold, x=mu.reshape(-1, 1), v=v.reshape(-1, 1))
if not failed:
X_samples[s, :] = curve(1)[0].flatten()
V_samples[s, :] = v.flatten()
s = s + 1
else:
print('Expmap failed for v %s' % v)
except Exception:
print('Expmap failed for v %s' % v)
if s == S: # We have collected all the samples we need
break
inds = np.isnan(X_samples[:, 0]) # The failed exponential maps
X_samples = X_samples[~inds, :] # Keep the non-failed ones
V_samples = V_samples[~inds, :]
volM_samples = self.land.manifold.measure(X_samples.T).flatten() # Compute the volume element sqrt(det(M))
norm_constant = np.mean(volM_samples) * Z_eucl # Estimate the normalization constant
end = time.time()
time_mc = end - start
print("Const: %s" % norm_constant)
print("Runtime for MC: %s" % time_mc)
# save some debug information
if self.integration_params["logger"]:
savedir = self.integration_params["savedir"]
_means = self.land.means.copy()
_sigmas = self.land.sigmas.copy()
_consts = self.land.consts
_means[k] = mu
_sigmas[k] = Sigma
_consts[k] = norm_constant
res_dict = {'mu' : mu, 'Sigma' : Sigma, 'Z' : Z_eucl, 'X' : None, 'Y' : None, 'm_int' : norm_constant / Z_eucl, \
'x_preds' : None, 'y_preds' : None, 'pred_means' : None, 'pred_variances' : None,\
'lengthscale' : None, 'variance' : None,\
'V_samples' : V_samples, 'volM_samples' : volM_samples, 'X_samples' : X_samples,\
'runtime' : time_mc, 'logmaps' : self.land.logmaps[k], 'k' : k,\
'all_mus' : _means,\
'all_sigmas' : _sigmas,\
'all_logmaps' : self.land.logmaps,\
'all_consts' : _consts,\
'weights' : self.land.weights}
utils.logger(res_dict, savedir)
self.land.consts[k] = norm_constant
self.land.V_samples[k] = V_samples
self.land.volM_samples[:,k] = volM_samples.flatten()
self.land.Z_eucl[k] = Z_eucl
# we can compute the "true" manifold sigma = int v v^t exp(-0.5 <v, Gamma v>) dv
try:
self.land.manifold_sigmas[k] = 1/norm_constant * 1/V_samples.shape[0] * V_samples.T @ np.diag(volM_samples.flatten()) @ V_samples
#print("true Sigma is:")
#print(self.land.manifold_sigmas[k])
except:
pass
if self.integration_params.get("save_lands"):
# save the land object
landcopy = self.land.getDumpableLand()
utils.logger(landcopy, self.integration_params["savedir"] + "lands/", startswith="land_")
# TODO: variance for MC estimator
return norm_constant, 0
class BQuadrature(LandQuadrature):
# estimating the normalization constant using BQ
def estimate_norm_constant(self, k, consider_failed=False):
if self.integration_params['method'] != 'BQ':
print("oops, this shouldn't happen. Integration parameters are probably misspecified.")
mu = self.land.means[k]
Sigma = self.land.sigmas[k]
D = Sigma.shape[0]
S = self.land.model_params['S']
Z = np.sqrt(((2 * np.pi) ** D) * np.linalg.det(Sigma)) # The Euclidean normalization constant
# these containers are needed for the gradients
# but this requires that the BQ method uses MC inside to predict
V_samples = np.zeros((S+1, D))
V_samples[:] = np.nan
volM_samples = np.zeros(S+1)
volM_samples[:] = np.nan
# the integrand
# works directly on a tangent vector
# needs to be integrated against a gaussian with covariance matrix Sigma
def fTangent(v):
curve, failed = geodesics.expmap(self.land.manifold, x=mu, v=v) # Solve the IVP problem for expmap
if failed:
print("expmap failed for v: %s" % v)
print("the failed result is %s" % curve(1)[0].reshape(-1,1))
x = curve(1)[0].reshape(-1, 1) # D x 1, the point on the manifold
meas = self.land.manifold.measure(x) # The Riemannian measure |M(x)|^1/2
if np.isnan(np.array(meas)):
print("result is NaN")
print("v:")
print(v)
return meas
# for multiple tangent vectors at once
def fTangent_multiple(vs):
return np.apply_along_axis(fTangent, 1, vs).reshape(-1,1,)
def ExpmapCurve(v):
curve, failed = geodesics.expmap(self.land.manifold, x=mu, v=v) # Solve the IVP problem for expmap
if failed:
print("expmap failed for v: %s" % v)
print("the failed result is %s" % curve(1)[0].reshape(-1,1))
return curve
start = time.time()
fun = fTangent_multiple
# reuse information from the last iteration
last_mu = self.land.transfer_dict[k].get("last_mu")
last_X = self.land.transfer_dict[k].get("last_X")
last_Y = self.land.transfer_dict[k].get("last_Y")
last_kernel = self.land.transfer_dict[k].get("last_kernel")
reusing = False # indicates whether mu changed or not, we can reuse old observations if it didnt
if self.integration_params.get("ever_reuse"):
if last_mu is not None:
print("can BQ reuse information?")
print("last mu: %s " % last_mu)
print("new mu: %s" % mu)
if np.linalg.norm(last_mu - mu) < 1e-6:
print("reusing last " + str(last_X.shape[0]) + " observations.")
reusing = True
X = last_X
Y = last_Y
else:
print("not reusing")
else:
print("last mu is None!")
if True:#try:
if not reusing:
# if we don't reuse, then mu changed
# but in this case, we can use the logmaps!
# to do so, we have to evaluate the measure at the data points
# but only use non-failed logmaps
# if consider_failed=True, we have to check land.failed to see which
# logmaps succeeded. this is only for the initialization of the LAND
# afterwards, failed logmaps are NaN
if self.integration_params.get('use_logmaps') is None or self.integration_params.get('use_logmaps'):
logmaps = []
measures = []
if consider_failed:
#print("considering failed logmaps..")
for il, l in enumerate(self.land.logmaps[k]):
if self.land.inducing_points[il] == 1 and not self.land.failed[il,k]:
logmaps.append(l)
measures.append(self.land.data_measures[il])
else:
for il, l in enumerate(self.land.logmaps[k]):
if self.land.inducing_points[il] == 1 and not np.isnan(l).any():
logmaps.append(l)
measures.append(self.land.data_measures[il])
measures = np.array([measures]).reshape(-1,1)
logmaps = np.vstack(logmaps).reshape(-1,D)
X = logmaps
Y = measures
else:
X = np.array([])
Y = np.array([])
if self.integration_params.get('verbose'):
print("y init: %s" % str(Y.shape))
#last_kernel = None
if True:
print("last_mu is none!")
logger = utils.logger if (self.integration_params.get('logger') is not None \
and self.integration_params.get('logger') is not False) else None
w = BQWrapper(D, mu=mu, Z=Z, k=last_kernel, n_grad=S, plot=False, logger=logger, land=self.land)
n_samples = self.integration_params["reusing_samples"] if reusing else self.integration_params["new_samples"]
n_batches = self.integration_params["reusing_batches"] if reusing else self.integration_params["new_batches"]
gp_mean = self.land.manifold.asymptotic_measure() if self.integration_params.get("asymptotic_mean") else 0.
m_int, V_samples, volM_samples, int_variance = w.wsabi_integrate(fun, X, Y, n_batches, n_samples, \
variance=200., lengthscale=2.,\
prior_mean=np.zeros(D), prior_cov=Sigma, \
constant_mean = gp_mean,\
grad=True, \
integration_params=self.integration_params,\
component_k=k,
expmap=ExpmapCurve, f_manifold=self.land.manifold.measure)
# update the last mu container
last_mu = mu
print("collected observations: %s" % (w.X).shape[0])
last_X = w.X
last_Y = w.Y
else:#except Exception as e:
print("error in BQ")
print(e)
m_int = 0.0
w = None
# multiply with euclidean normalization constant
Const = Z * m_int
end = time.time()
time_bq = end - start
if self.integration_params.get("verbose"):
print("Const: %s" % Const)
print('Runtime for BQ : ' + str(time_bq))
self.land.transfer_dict[k] = {'last_mu' : last_mu.copy(), 'last_X' : last_X.copy(), 'last_Y' : last_Y.copy(),\
'last_kernel' : (w.k if w is not None else None)}
# we do not have any X samples to return, thus we return None
# but they are not used anyway
self.land.V_samples[k,:,:] = V_samples
self.land.volM_samples[:,k] = volM_samples.flatten()
self.land.consts[k,:] = Const
self.land.Z_eucl[k] = Z
# we can compute the "true" manifold sigma = int g(v) v v^t exp(-0.5 <v, Gamma v>) dv
#self.land.manifold_sigmas[k] = 1/Const * 1/V_samples.shape[0] * V_samples.T @ np.diag(volM_samples.flatten()) @ V_samples
self.land.manifold_sigmas[k] = 1/Const * 1/V_samples.shape[0] * np.einsum('nd,ne,n->de', V_samples, V_samples, volM_samples.flatten())
# save the land object
if self.integration_params.get("save_lands"):
landcopy = self.land.getDumpableLand()
utils.logger(landcopy, self.integration_params["savedir"] + "lands/", startswith="land_")
# return both the constant (mean) and its variance estimator
return Const, int_variance
|
from __future__ import annotations
from typing import Any, Dict, Optional
from .ast import TypeEnum, Type, TermEnum, Term, SemEnum, Sem
from .util import errorln
__all__ = ("reflect", "reify", "meaning", "nbe")
class Ctx:
"""Typing context"""
def __init__(self):
self._d: Dict[str, Any] = {}
def lookup(self, tm: Term) -> Any:
k = tm.v
try:
return self._d[k]
except KeyError:
errorln(tm.info, f"no such variable '{k}'")
def add(self, k: str, v: Any) -> Ctx:
if k in self._d:
errorln(v.info, f"variable '{k}' already defined")
self._d[k] = v
return self
_VAR = 0
def _fresh_var() -> str:
"""Retrieve a fresh variable"""
global _VAR
var = str(_VAR)
_VAR += 1
return var
def reflect(ty: Type, tm: Term) -> Sem:
"""Reflects the term syntax to the semantics"""
if ty.e == TypeEnum.Arrow:
arg_t, ret_t = ty.v
return Sem(
SemEnum.Lam,
lambda s: reflect(
ret_t, Term(TermEnum.App, (tm, reify(arg_t, s)))
),
)
elif ty.e == TypeEnum.Prod:
a, b = ty.v
return Sem(
SemEnum.Pair,
(
reflect(a, Term(TermEnum.Fst, tm)),
reflect(b, Term(TermEnum.Snd, tm)),
),
)
elif ty.e == TypeEnum.Unit:
return Sem(SemEnum.Syn, tm)
return errorln(tm.info, "IMPOSSIBLE")
def reify(ty: Type, sem: Sem) -> Term:
"""Reifies the semantics as a syntactic term"""
if ty.e == TypeEnum.Arrow and sem.e == SemEnum.Lam:
arg_t, ret_t = ty.v
fn = sem.v
x = _fresh_var()
return Term(
TermEnum.Lam,
(x, reify(ret_t, fn(reflect(arg_t, Term(TermEnum.Var, x))))),
)
elif ty.e == TypeEnum.Prod and sem.e == SemEnum.Pair:
a_t, b_t = ty.v
a, b = sem.v
return Term(TermEnum.Pair, (reify(a_t, a), reify(b_t, b)))
elif ty.e == TypeEnum.Unit and sem.e == SemEnum.Syn:
return sem.v
return errorln(ty.info, f"reify error: type '{ty}', semantic '{sem}')")
def meaning(ctx: Ctx, tm: Term) -> Sem:
"""Evaluation"""
if tm.e == TermEnum.Var:
return ctx.lookup(tm)
elif tm.e == TermEnum.Lam:
arg, body = tm.v
return Sem(SemEnum.Lam, lambda s: meaning(ctx.add(arg, s), body))
elif tm.e == TermEnum.App:
s, t = tm.v
fn = meaning(ctx, s)
if fn.e != SemEnum.Lam:
errorln(fn.info, f"eval error: {fn} is not a lambda function")
return fn.v(meaning(ctx, t))
elif tm.e == TermEnum.Pair:
a, b = tm.v
return Sem(SemEnum.Pair, (meaning(ctx, a), meaning(ctx, b)))
elif tm.e == TermEnum.Fst:
pair = meaning(ctx, tm.v)
if pair.e != SemEnum.Pair:
errorln(pair.info, f"eval error: {pair} is not a pair")
a, _ = pair.v
return Sem(SemEnum.Pair, a)
elif tm.e == TermEnum.Snd:
pair = meaning(ctx, tm.v)
if pair.e != SemEnum.Pair:
errorln(pair.info, f"eval errorln: {pair} is not a pair")
_, b = pair.v
return Sem(SemEnum.Pair, b)
return errorln(tm.info, f"eval errorln: ctx '{ctx}', term '{tm}'")
def nbe(ty: Type, tm: Term) -> Term:
"""Normalization"""
return reify(ty, meaning(Ctx(), tm))
|
from functools import lru_cache
import logging
from elecsim.plants.fuel.fuel_registry.fuel_registry import fuel_registry, plant_type_to_fuel
from elecsim.plants.plant_type.power_plant import PowerPlant
from elecsim.role.plants.costs.fuel_plant_cost_calculations import FuelPlantCostCalculations
logger = logging.getLogger(__name__)
""" fuel_plant.py: Child class of power plant which contains functions for a power plant which consumes fuel.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
class FuelPlant(PowerPlant):
def __init__(self, name, plant_type, capacity_mw, construction_year, average_load_factor, efficiency, pre_dev_period, construction_period, operating_period, pre_dev_spend_years, construction_spend_years, pre_dev_cost_per_mw, construction_cost_per_mw, infrastructure, fixed_o_and_m_per_mw, variable_o_and_m_per_mwh, insurance_cost_per_mw, connection_cost_per_mw):
"""
Initialisation of plant_type power plant object.
:param efficiency: Efficiency of power plant at converting plant_type energy into electrical energy.
"""
super().__init__(name=name, plant_type=plant_type, capacity_mw=capacity_mw, construction_year=construction_year, average_load_factor=average_load_factor, pre_dev_period=pre_dev_period, construction_period=construction_period, operating_period=operating_period, pre_dev_spend_years=pre_dev_spend_years, construction_spend_years=construction_spend_years, pre_dev_cost_per_mw=pre_dev_cost_per_mw, construction_cost_per_mw=construction_cost_per_mw, infrastructure=infrastructure, fixed_o_and_m_per_mw=fixed_o_and_m_per_mw, variable_o_and_m_per_mwh=variable_o_and_m_per_mwh, insurance_cost_per_mw=insurance_cost_per_mw, connection_cost_per_mw=connection_cost_per_mw)
self.efficiency = efficiency
# Finds fuel type of power plant eg. CCGT power plant plant_type returns gas.
fuel_string = plant_type_to_fuel(plant_type, self.construction_year)
# Fuel object, containing information on fuel.
self.fuel = fuel_registry(fuel_string)
if plant_type in ['Coal', 'Nuclear']:
self.min_running = 5000
else:
self.min_running = 0
def calculate_lcoe(self, discount_rate):
plant_cost_calculations = FuelPlantCostCalculations(plant_type=self.plant_type, capacity_mw=self.capacity_mw, construction_year=self.construction_year, average_load_factor=self.average_load_factor, efficiency=self.efficiency, pre_dev_period=self.pre_dev_period, construction_period=self.construction_period, operating_period=self.operating_period, pre_dev_spend_years=self.pre_dev_spend_years, construction_spend_years=self.construction_spend_years, pre_dev_cost_per_mw=self.pre_dev_cost_per_mw, construction_cost_per_mw=self.construction_cost_per_mw, infrastructure=self.infrastructure, fixed_o_and_m_per_mw=self.fixed_o_and_m_per_mw, variable_o_and_m_per_mwh=self.variable_o_and_m_per_mwh, insurance_cost_per_mw=self.insurance_cost_per_mw, connection_cost_per_mw=self.connection_cost_per_mw)
lcoe = plant_cost_calculations.calculate_lcoe(discount_rate)
return lcoe
def short_run_marginal_cost(self, model, genco, fuel_price = None, co2_price = None):
plant_cost_calculations = FuelPlantCostCalculations(plant_type=self.plant_type, capacity_mw=self.capacity_mw, construction_year=self.construction_year, average_load_factor=self.average_load_factor, efficiency=self.efficiency, pre_dev_period=self.pre_dev_period, construction_period=self.construction_period, operating_period=self.operating_period, pre_dev_spend_years=self.pre_dev_spend_years, construction_spend_years=self.construction_spend_years, pre_dev_cost_per_mw=self.pre_dev_cost_per_mw, construction_cost_per_mw=self.construction_cost_per_mw, infrastructure=self.infrastructure, fixed_o_and_m_per_mw=self.fixed_o_and_m_per_mw, variable_o_and_m_per_mwh=self.variable_o_and_m_per_mwh, insurance_cost_per_mw=self.insurance_cost_per_mw, connection_cost_per_mw=self.connection_cost_per_mw)
marginal_cost = plant_cost_calculations.calculate_short_run_marginal_cost(model, genco, fuel_price, co2_price)
return marginal_cost
def __repr__(self):
return 'FuelPlant({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})'.format(self.name, self.plant_type, self.capacity_mw, self.construction_year, self.average_load_factor, self.pre_dev_period, self.construction_period, self.operating_period, self.pre_dev_spend_years, self.construction_spend_years, self.pre_dev_cost_per_mw, self.construction_cost_per_mw, self.infrastructure, self.fixed_o_and_m_per_mw, self.variable_o_and_m_per_mwh, self.insurance_cost_per_mw, self.connection_cost_per_mw)
|
<reponame>chivandikwa/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['MemberArgs', 'Member']
@pulumi.input_type
class MemberArgs:
def __init__(__self__, *,
account_id: pulumi.Input[str],
email: pulumi.Input[str],
invitation_disable_email_notification: Optional[pulumi.Input[str]] = None,
invitation_message: Optional[pulumi.Input[str]] = None,
invite: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Member resource.
:param pulumi.Input[str] account_id: The AWS account ID for the account.
:param pulumi.Input[str] email: The email address for the account.
:param pulumi.Input[str] invitation_disable_email_notification: Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
:param pulumi.Input[str] invitation_message: A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
:param pulumi.Input[bool] invite: Send an invitation to a member
:param pulumi.Input[str] status: Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
"""
pulumi.set(__self__, "account_id", account_id)
pulumi.set(__self__, "email", email)
if invitation_disable_email_notification is not None:
pulumi.set(__self__, "invitation_disable_email_notification", invitation_disable_email_notification)
if invitation_message is not None:
pulumi.set(__self__, "invitation_message", invitation_message)
if invite is not None:
pulumi.set(__self__, "invite", invite)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Input[str]:
"""
The AWS account ID for the account.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def email(self) -> pulumi.Input[str]:
"""
The email address for the account.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: pulumi.Input[str]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="invitationDisableEmailNotification")
def invitation_disable_email_notification(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
"""
return pulumi.get(self, "invitation_disable_email_notification")
@invitation_disable_email_notification.setter
def invitation_disable_email_notification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invitation_disable_email_notification", value)
@property
@pulumi.getter(name="invitationMessage")
def invitation_message(self) -> Optional[pulumi.Input[str]]:
"""
A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
"""
return pulumi.get(self, "invitation_message")
@invitation_message.setter
def invitation_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invitation_message", value)
@property
@pulumi.getter
def invite(self) -> Optional[pulumi.Input[bool]]:
"""
Send an invitation to a member
"""
return pulumi.get(self, "invite")
@invite.setter
def invite(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "invite", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _MemberState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
administrator_account_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
invitation_disable_email_notification: Optional[pulumi.Input[str]] = None,
invitation_message: Optional[pulumi.Input[str]] = None,
invite: Optional[pulumi.Input[bool]] = None,
invited_at: Optional[pulumi.Input[str]] = None,
master_account_id: Optional[pulumi.Input[str]] = None,
relationship_status: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Member resources.
:param pulumi.Input[str] account_id: The AWS account ID for the account.
:param pulumi.Input[str] administrator_account_id: The AWS account ID for the administrator account.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the account.
:param pulumi.Input[str] email: The email address for the account.
:param pulumi.Input[str] invitation_disable_email_notification: Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
:param pulumi.Input[str] invitation_message: A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
:param pulumi.Input[bool] invite: Send an invitation to a member
:param pulumi.Input[str] invited_at: The date and time, in UTC and extended RFC 3339 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.
:param pulumi.Input[str] relationship_status: The current status of the relationship between the account and the administrator account.
:param pulumi.Input[str] status: Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
:param pulumi.Input[str] updated_at: The date and time, in UTC and extended RFC 3339 format, of the most recent change to the status of the relationship between the account and the administrator account.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if administrator_account_id is not None:
pulumi.set(__self__, "administrator_account_id", administrator_account_id)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if email is not None:
pulumi.set(__self__, "email", email)
if invitation_disable_email_notification is not None:
pulumi.set(__self__, "invitation_disable_email_notification", invitation_disable_email_notification)
if invitation_message is not None:
pulumi.set(__self__, "invitation_message", invitation_message)
if invite is not None:
pulumi.set(__self__, "invite", invite)
if invited_at is not None:
pulumi.set(__self__, "invited_at", invited_at)
if master_account_id is not None:
pulumi.set(__self__, "master_account_id", master_account_id)
if relationship_status is not None:
pulumi.set(__self__, "relationship_status", relationship_status)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID for the account.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="administratorAccountId")
def administrator_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID for the administrator account.
"""
return pulumi.get(self, "administrator_account_id")
@administrator_account_id.setter
def administrator_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_account_id", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the account.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def email(self) -> Optional[pulumi.Input[str]]:
"""
The email address for the account.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="invitationDisableEmailNotification")
def invitation_disable_email_notification(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
"""
return pulumi.get(self, "invitation_disable_email_notification")
@invitation_disable_email_notification.setter
def invitation_disable_email_notification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invitation_disable_email_notification", value)
@property
@pulumi.getter(name="invitationMessage")
def invitation_message(self) -> Optional[pulumi.Input[str]]:
"""
A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
"""
return pulumi.get(self, "invitation_message")
@invitation_message.setter
def invitation_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invitation_message", value)
@property
@pulumi.getter
def invite(self) -> Optional[pulumi.Input[bool]]:
"""
Send an invitation to a member
"""
return pulumi.get(self, "invite")
@invite.setter
def invite(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "invite", value)
@property
@pulumi.getter(name="invitedAt")
def invited_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time, in UTC and extended RFC 3339 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.
"""
return pulumi.get(self, "invited_at")
@invited_at.setter
def invited_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invited_at", value)
@property
@pulumi.getter(name="masterAccountId")
def master_account_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "master_account_id")
@master_account_id.setter
def master_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_account_id", value)
@property
@pulumi.getter(name="relationshipStatus")
def relationship_status(self) -> Optional[pulumi.Input[str]]:
"""
The current status of the relationship between the account and the administrator account.
"""
return pulumi.get(self, "relationship_status")
@relationship_status.setter
def relationship_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "relationship_status", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time, in UTC and extended RFC 3339 format, of the most recent change to the status of the relationship between the account and the administrator account.
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
class Member(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
invitation_disable_email_notification: Optional[pulumi.Input[str]] = None,
invitation_message: Optional[pulumi.Input[str]] = None,
invite: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a resource to manage an [Amazon Macie Member](https://docs.aws.amazon.com/macie/latest/APIReference/members-id.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_account = aws.macie2.Account("exampleAccount")
example_member = aws.macie2.Member("exampleMember",
account_id="AWS ACCOUNT ID",
email="EMAIL",
invite=True,
invitation_message="Message of the invitation",
invitation_disable_email_notification="true",
opts=pulumi.ResourceOptions(depends_on=[example_account]))
```
## Import
`aws_macie2_member` can be imported using the account ID of the member account, e.g.,
```sh
$ pulumi import aws:macie2/member:Member example 123456789012
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The AWS account ID for the account.
:param pulumi.Input[str] email: The email address for the account.
:param pulumi.Input[str] invitation_disable_email_notification: Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
:param pulumi.Input[str] invitation_message: A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
:param pulumi.Input[bool] invite: Send an invitation to a member
:param pulumi.Input[str] status: Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MemberArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage an [Amazon Macie Member](https://docs.aws.amazon.com/macie/latest/APIReference/members-id.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_account = aws.macie2.Account("exampleAccount")
example_member = aws.macie2.Member("exampleMember",
account_id="AWS ACCOUNT ID",
email="EMAIL",
invite=True,
invitation_message="Message of the invitation",
invitation_disable_email_notification="true",
opts=pulumi.ResourceOptions(depends_on=[example_account]))
```
## Import
`aws_macie2_member` can be imported using the account ID of the member account, e.g.,
```sh
$ pulumi import aws:macie2/member:Member example 123456789012
```
:param str resource_name: The name of the resource.
:param MemberArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MemberArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
invitation_disable_email_notification: Optional[pulumi.Input[str]] = None,
invitation_message: Optional[pulumi.Input[str]] = None,
invite: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MemberArgs.__new__(MemberArgs)
if account_id is None and not opts.urn:
raise TypeError("Missing required property 'account_id'")
__props__.__dict__["account_id"] = account_id
if email is None and not opts.urn:
raise TypeError("Missing required property 'email'")
__props__.__dict__["email"] = email
__props__.__dict__["invitation_disable_email_notification"] = invitation_disable_email_notification
__props__.__dict__["invitation_message"] = invitation_message
__props__.__dict__["invite"] = invite
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["administrator_account_id"] = None
__props__.__dict__["arn"] = None
__props__.__dict__["invited_at"] = None
__props__.__dict__["master_account_id"] = None
__props__.__dict__["relationship_status"] = None
__props__.__dict__["tags_all"] = None
__props__.__dict__["updated_at"] = None
super(Member, __self__).__init__(
'aws:macie2/member:Member',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
administrator_account_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
invitation_disable_email_notification: Optional[pulumi.Input[str]] = None,
invitation_message: Optional[pulumi.Input[str]] = None,
invite: Optional[pulumi.Input[bool]] = None,
invited_at: Optional[pulumi.Input[str]] = None,
master_account_id: Optional[pulumi.Input[str]] = None,
relationship_status: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None) -> 'Member':
"""
Get an existing Member resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The AWS account ID for the account.
:param pulumi.Input[str] administrator_account_id: The AWS account ID for the administrator account.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the account.
:param pulumi.Input[str] email: The email address for the account.
:param pulumi.Input[str] invitation_disable_email_notification: Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
:param pulumi.Input[str] invitation_message: A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
:param pulumi.Input[bool] invite: Send an invitation to a member
:param pulumi.Input[str] invited_at: The date and time, in UTC and extended RFC 3339 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.
:param pulumi.Input[str] relationship_status: The current status of the relationship between the account and the administrator account.
:param pulumi.Input[str] status: Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
:param pulumi.Input[str] updated_at: The date and time, in UTC and extended RFC 3339 format, of the most recent change to the status of the relationship between the account and the administrator account.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MemberState.__new__(_MemberState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["administrator_account_id"] = administrator_account_id
__props__.__dict__["arn"] = arn
__props__.__dict__["email"] = email
__props__.__dict__["invitation_disable_email_notification"] = invitation_disable_email_notification
__props__.__dict__["invitation_message"] = invitation_message
__props__.__dict__["invite"] = invite
__props__.__dict__["invited_at"] = invited_at
__props__.__dict__["master_account_id"] = master_account_id
__props__.__dict__["relationship_status"] = relationship_status
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["updated_at"] = updated_at
return Member(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[str]:
"""
The AWS account ID for the account.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="administratorAccountId")
def administrator_account_id(self) -> pulumi.Output[str]:
"""
The AWS account ID for the administrator account.
"""
return pulumi.get(self, "administrator_account_id")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the account.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def email(self) -> pulumi.Output[str]:
"""
The email address for the account.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="invitationDisableEmailNotification")
def invitation_disable_email_notification(self) -> pulumi.Output[Optional[str]]:
"""
Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to `true`.
"""
return pulumi.get(self, "invitation_disable_email_notification")
@property
@pulumi.getter(name="invitationMessage")
def invitation_message(self) -> pulumi.Output[Optional[str]]:
"""
A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.
"""
return pulumi.get(self, "invitation_message")
@property
@pulumi.getter
def invite(self) -> pulumi.Output[bool]:
"""
Send an invitation to a member
"""
return pulumi.get(self, "invite")
@property
@pulumi.getter(name="invitedAt")
def invited_at(self) -> pulumi.Output[str]:
"""
The date and time, in UTC and extended RFC 3339 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.
"""
return pulumi.get(self, "invited_at")
@property
@pulumi.getter(name="masterAccountId")
def master_account_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "master_account_id")
@property
@pulumi.getter(name="relationshipStatus")
def relationship_status(self) -> pulumi.Output[str]:
"""
The current status of the relationship between the account and the administrator account.
"""
return pulumi.get(self, "relationship_status")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The date and time, in UTC and extended RFC 3339 format, of the most recent change to the status of the relationship between the account and the administrator account.
"""
return pulumi.get(self, "updated_at")
|
###############################################################################
# Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
#
# SPDX-License-Identifier: Apache-2.0
#
###############################################################################
"""Snapshot script for analysis/debug of models in Habana model_garden
This python script is called for collecting docker related info from the host:
docker inspect <container_ID>
docker stats --no-stream --no-trunc <container_ID>
docker ps <container_ID> to get the Habana docker image being run
It needs to be run as root or with sudo from a shell on the same machine on
which the user is running the docker training session.
"""
import os
import sys
import argparse
import subprocess
import tarfile
from helper_functions import get_canonical_path, create_output_dir
ACTUAL_CMD = os.path.realpath(sys.argv[0])
INFO_TARBALL = "{}".format(os.path.splitext(os.path.basename(ACTUAL_CMD))[0])
EXECCMDLINEFILE = "execution-command-line-{}.txt".format(os.path.basename(ACTUAL_CMD))
TMPDIR = INFO_TARBALL
class GatherInfoHostArgParser(argparse.ArgumentParser):
def __init__(self):
super(GatherInfoHostArgParser, self).__init__()
self.add_argument('-o', '--outdir', type = str, default = './', help="""
The output directory where all the files and other information will be stored.
The output will be stored as an archive as well as the actual directory where
all the contents are copied.
"""
)
self.add_argument('-c', '--clear', action="store_true", help="""
Delete the existing contents of the output directory before copying over
files from this run.
"""
)
self.add_argument('-i', '--container_id', type = str, required=True, help="""
This is the Docker container ID for the running docker session, obtained
by \"docker ps\" and selecting the CONTAINER ID for the session for
which we want to run the snapshot script.
"""
)
def parse_args(self):
args = super(GatherInfoHostArgParser, self).parse_args()
args.outdir = str(get_canonical_path(args.outdir).joinpath(TMPDIR))
if os.getuid() != 0:
print("*** Rerun this script as user 'root' or as sudo ***\n\n")
self.print_help()
sys.exit(1)
return args
class SnapshotScriptHost():
STANDARD_FILE_NAMES = {"docker_inspect" : "docker_inspect.txt", "docker_stats" : "docker_stats.txt", "docker_ps_cmd" : "docker_ps_cmd.txt"}
def __init__(self, args, outdir_path):
self.args = args
self.outdir_path = outdir_path
def generateHeader(self, str, sep='='):
print(f"\n")
print(f"{sep}" * 80)
print(str)
print(f"{sep}" * 80)
def get_outdir_filename(self, filename):
return str(self.outdir_path.joinpath(filename))
def run_cmd(self, cmd=str):
print(cmd)
with subprocess.Popen(cmd, shell=True, executable='/bin/bash') as proc:
proc.wait()
def saveDockerContainerInfoFromHost(self):
try:
self.generateHeader('Saving docker inspect, docker stats, docker ps results')
cmd = f"docker inspect {self.args.container_id} > " + self.get_outdir_filename(SnapshotScriptHost.STANDARD_FILE_NAMES["docker_inspect"])
self.run_cmd(cmd)
cmd = f"docker stats --no-stream --no-trunc {self.args.container_id} > " + self.get_outdir_filename(SnapshotScriptHost.STANDARD_FILE_NAMES["docker_stats"])
self.run_cmd(cmd)
query_str = f"\'CONTAINER ID|{self.args.container_id}\'"
cmd = f"docker ps|grep -E {query_str} > " + self.get_outdir_filename(SnapshotScriptHost.STANDARD_FILE_NAMES["docker_ps_cmd"])
self.run_cmd(cmd)
except Exception as exc:
raise RuntimeError("Error in saveDockerContainerInfoFromHost") from exc
# This generates a <args.outdir>.tar.gz in the parent directory of args.outdir
def generateTarball(self):
try:
cmd = f"chmod -R 755 {str(self.outdir_path)}"
self.run_cmd(cmd)
parent_dir = os.path.dirname(self.outdir_path)
tardir_name = os.path.basename(os.path.normpath(self.outdir_path))
tarfile_name = f"{tardir_name}.tar.gz"
self.generateHeader(f"Generating {parent_dir}/{tardir_name}.tar.gz")
os.chdir(parent_dir)
tar = tarfile.open(tarfile_name, 'x:gz')
tar.add(tardir_name)
tar.close()
cmd = f"chmod -R 755 {tarfile_name}"
self.run_cmd(cmd)
except Exception as exc:
raise RuntimeError("Error in generateTarball") from exc
def uploadTarball(self): pass
def run(self):
self.saveDockerContainerInfoFromHost()
self.generateTarball()
self.uploadTarball()
print("Snapshot script completed successfully (from host)")
def main():
argparser = GatherInfoHostArgParser()
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
args = argparser.parse_args()
outdir_path = create_output_dir(args.outdir, args.clear)
with open(outdir_path.joinpath(EXECCMDLINEFILE), "w") as fdout:
fdout.write("Command executed as: python3 {}\n".format(" ".join(sys.argv)))
pkg = SnapshotScriptHost(args, outdir_path)
pkg.run()
if __name__ == "__main__":
main()
|
<gh_stars>10-100
import random
from Levenshtein import distance
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from playwright.async_api import async_playwright, Playwright, Browser, Page
import shortuuid
import cv2
import numpy as np
from typing import Any, Dict, List, Optional, Tuple
app = FastAPI()
playwright = async_playwright()
qr_decoder = cv2.QRCodeDetector()
sessions: Dict[str, Tuple[Playwright, Browser, Page, str]] = {}
class SessionInput(BaseModel):
session_id: Optional[str] = None
phone: str
class LoginInput(BaseModel):
code: str
class OrderInput(BaseModel):
query: str
quantity: int
is_exact: bool
class SessionOutput(BaseModel):
id: str
state: str
metadata: Optional[Dict[str, Any]] = None
@app.get('/')
async def root():
return {'status': 'OK'}
@app.post('/sessions', response_model=SessionOutput)
async def create_session(input: SessionInput):
session_id = input.session_id if input.session_id is not None else 'test'
# session_id = shortuuid.uuid()
session = await playwright.start()
browser = await session.chromium.launch_persistent_context(
headless=False,
user_data_dir='./browsercache',
devtools=True,
viewport={
'width': 1600,
'height': 900,
},
)
page = browser.pages[0]
# page = await browser.new_page()
await page.goto('https://mcd.cn')
await page.click('div.language')
await page.click('div.languageList > div.title:text("简体中文")')
await page.wait_for_timeout(1000)
await page.click('span:text("开始订餐")')
state = 'logged_in'
# await page.click('div.treaty-plane > img')
# await page.fill('input.txt-phone', input.phone)
# await page.click('button.countDownBtn')
# state = 'requested_code'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
@app.post('/sessions/{session_id}/login', response_model=SessionOutput)
async def login_session(session_id: str, input: LoginInput):
try:
session, browser, page, state = sessions[session_id]
await page.fill('input.txt-sms', input.code)
await page.click('button.login-button')
await page.wait_for_timeout(1000)
await page.click('span:text("开始订餐")')
state = 'logged_in'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.post('/sessions/{session_id}/order', response_model=SessionOutput)
async def order_session(session_id: str, input: OrderInput):
try:
session, browser, page, state = sessions[session_id]
await page.fill('//input[contains(@placeholder, "最多")]', input.query)
await page.click('button.ant-input-search-button')
await page.wait_for_timeout(1000)
items: List[Dict[str, Any]] = []
if input.is_exact is True:
# loop through the items and try to match the name
titles = await page.eval_on_selector_all(
'//div[@class="buy-box"]/div[@class="left"]/p',
'(boxes) => boxes.map(box => box.innerText)',
)
distances = [distance(input.query, x) for x in titles]
item_matches = sorted(zip(titles, distances, range(1, len(titles) + 1)), key=lambda x: x[1])
print(item_matches)
if len(item_matches) > 0:
# select the closest one
button_selector = f'(//div[@class="buy-box"])[{item_matches[0][2]}]/*[contains(@class, "button")]'
button_class_dict: Dict[str, str] = await page.eval_on_selector(
button_selector,
'(button) => button.classList',
)
if 'custom' not in button_class_dict.values():
# just click on the button
await page.click(button_selector, click_count=input.quantity)
item_title = await page.eval_on_selector(
'(//div[@class="buy-box"])[1]/div[@class="left"]/p',
'(title) => title.innerText',
)
items.append({'name': item_title, 'quantity': input.quantity})
else:
# go into customization
await page.click(button_selector)
await page.wait_for_timeout(500)
# get the first sub-item
item_title = await page.eval_on_selector(
'(//h1[@class="title"])[1]',
'(title) => title.innerText',
)
await page.fill('//input[contains(@class, "count")]', str(input.quantity))
await page.wait_for_timeout(500)
await page.click('button.to-cart')
await page.wait_for_timeout(1000)
await page.goto('https://mcd.cn/product')
# await page.click('div:text("餐品菜单")')
items.append({'name': item_title, 'quantity': input.quantity})
else:
result_count = await page.eval_on_selector_all(
'//div[@class="buy-box"]/div[@class="left"]/p',
'(boxes) => boxes.length',
)
# pick one at random
item_idx = random.sample(range(1, result_count + 1), input.quantity)
for idx in item_idx:
button_selector = f'(//div[@class="buy-box"])[{idx}]/*[contains(@class, "button")]'
button_class_dict: Dict[str, str] = await page.eval_on_selector(
button_selector,
'(button) => button.classList',
)
if 'custom' not in button_class_dict.values():
# just click on the button
await page.click(button_selector, click_count=1)
item_title = await page.eval_on_selector(
'(//div[@class="buy-box"])[1]/div[@class="left"]/p',
'(title) => title.innerText',
)
items.append({'name': item_title, 'quantity': 1})
else:
# go into customization
await page.click(button_selector)
await page.wait_for_timeout(500)
# get the first sub-item
item_title = await page.eval_on_selector(
'(//h1[@class="title"])[1]',
'(title) => title.innerText',
)
await page.fill('//input[contains(@class, "count")]', '1')
await page.wait_for_timeout(500)
await page.click('button.to-cart')
await page.wait_for_timeout(1000)
items.append({'name': item_title, 'quantity': 1})
# await page.click('div:text("餐品菜单")')
await page.goto('https://mcd.cn/product')
await page.wait_for_timeout(2000)
await page.fill('//input[contains(@placeholder, "最多")]', input.query)
await page.click('button.ant-input-search-button')
await page.wait_for_timeout(1000)
state = 'ordered'
metadata = {'items': items}
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state, metadata=metadata)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.post('/sessions/{session_id}/cart/clear', response_model=SessionOutput)
async def clear_session_cart(session_id: str):
try:
session, browser, page, state = sessions[session_id]
await page.click('//div[@class="car"]')
await page.wait_for_timeout(500)
await page.click('span:text("清空购物车")')
await page.wait_for_timeout(500)
await page.click('//div[@class="ant-popover-buttons"]/button[contains(@class, "ant-btn-primary")]')
state = 'cart_cleared'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.get('/sessions/{session_id}/cart', response_model=SessionOutput)
async def get_session_cart(session_id: str):
try:
session, browser, page, state = sessions[session_id]
cart_price_texts = await page.eval_on_selector_all(
'//div[@class="price-info"]/span',
'(spans) => spans.map((span) => span.innerText)',
)
address_texts = await page.eval_on_selector_all(
'//div[@class="othpart address"]/div[@class="center"]/div',
'(spans) => spans.map((span) => span.innerText)',
)
deliver_time_texts = await page.eval_on_selector_all(
'//div[@class="othpart time"]/div[@class="center"]/div',
'(spans) => spans.map((span) => span.innerText)',
)
checkout_button_class_dict: Dict[str, str] = await page.eval_on_selector(
'//button[contains(@class, "to-check")]',
'(button) => button.classList',
)
if 'grey' in checkout_button_class_dict.values():
state = 'cart_empty'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(
id=session_id,
state=state,
metadata={
'items': [],
'cart_price_texts': cart_price_texts,
'address_texts': address_texts,
'deliver_time_texts': deliver_time_texts,
},
)
else:
await page.click('//div[@class="car"]')
await page.wait_for_timeout(500)
item_titles = await page.eval_on_selector_all(
'//div[contains(@class, "cart-panel-details")]/div[@class="main"]/div/div/div[@class="name"]',
'(titles) => titles.map(title => title.innerText)',
)
item_quantities = await page.eval_on_selector_all(
'//div[contains(@class, "cart-panel-details")]/div[@class="main"]/div/div/div[@class="count-panel"]/div/input',
'(quantities) => quantities.map(q => q.value)',
)
await page.wait_for_timeout(500)
await page.click('//div[@class="close"]')
state = 'cart_viewed'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(
id=session_id,
state=state,
metadata={
'items': list(map(lambda item: { "name": item[0], "quantity": item[1] }, zip(item_titles, item_quantities))),
'cart_price_texts': cart_price_texts,
'address_texts': address_texts,
'deliver_time_texts': deliver_time_texts,
},
)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.post('/sessions/{session_id}/checkout', response_model=SessionOutput)
async def checkout_session(session_id: str):
try:
session, browser, page, state = sessions[session_id]
await page.click('//button[contains(@class, "to-check")]')
await page.wait_for_timeout(2000)
await page.click('button.btn-pay')
await page.wait_for_timeout(500)
await page.click('p:text("支付宝")')
await page.wait_for_timeout(500)
await page.click('button.sure')
await page.wait_for_timeout(5000)
payment_page: Page = browser.pages[1]
screenshot = await payment_page.screenshot(full_page=True)
bytes_as_np_array = np.frombuffer(screenshot, dtype=np.uint8)
img = cv2.imdecode(bytes_as_np_array, cv2.IMREAD_ANYCOLOR)
qr_data, bbox, rectified_image = qr_decoder.detectAndDecode(img)
# if len(qr_data) > 0:
# rectified_image = np.uint8(rectified_image)
# cv2.startWindowThread()
# cv2.imshow('qr', rectified_image)
# cv2.waitKey()
state = 'payment_triggered'
metadata = {'payment_qr_url': qr_data}
sessions[session_id] = (session, browser, page, state)
# await page.wait_for_timeout(1000)
# await page.click('div.payFooterContainer > button.sure') # on success
# await page.click('div.payFooterContainer > button.defaultBtn') # on failure
return SessionOutput(id=session_id, state=state, metadata=metadata)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.post('/sessions/{session_id}/checkout/retry', response_model=SessionOutput)
async def checkout_session_retry(session_id: str):
try:
session, browser, page, state = sessions[session_id]
if len(browser.pages) > 1:
payment_page: Page = browser.pages[1]
screenshot = await payment_page.screenshot(full_page=True)
bytes_as_np_array = np.frombuffer(screenshot, dtype=np.uint8)
img = cv2.imdecode(bytes_as_np_array, cv2.IMREAD_ANYCOLOR)
qr_data, bbox, rectified_image = qr_decoder.detectAndDecode(img)
state = 'payment_triggered'
metadata = {'payment_qr_url': qr_data}
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
else:
await page.click('div.payFooterContainer > button.defaultBtn') # on failure
await page.wait_for_timeout(500)
await page.click('button.sure')
await page.wait_for_timeout(5000)
payment_page: Page = browser.pages[1]
screenshot = await payment_page.screenshot(full_page=True)
bytes_as_np_array = np.frombuffer(screenshot, dtype=np.uint8)
img = cv2.imdecode(bytes_as_np_array, cv2.IMREAD_ANYCOLOR)
qr_data, bbox, rectified_image = qr_decoder.detectAndDecode(img)
state = 'payment_triggered'
metadata = {'payment_qr_url': qr_data}
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.post('/sessions/{session_id}/checkout/success', response_model=SessionOutput)
async def checkout_session_success(session_id: str):
try:
session, browser, page, state = sessions[session_id]
if len(browser.pages) > 1:
payment_page: Page = browser.pages[1]
await payment_page.close()
await page.wait_for_timeout(1000)
await page.click('div.payFooterContainer > button.sure') # on success
# await page.click('div.payFooterContainer > button.defaultBtn') # on failure
state = 'payment_success'
sessions[session_id] = (session, browser, page, state)
return SessionOutput(id=session_id, state=state)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
@app.get('/sessions/{session_id}', response_model=SessionOutput)
async def get_session(session_id: str):
try:
session, browser, page, state = sessions[session_id]
return SessionOutput(id=session_id, state=state)
except KeyError:
raise HTTPException(state_code=404, detail='session not found')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author: FesianXu
# 20210227
import json
import copy
import os
import sys
import argparse
UNK_MARK = ""
CATE_SPLIT_MARK = '.'
PAGES_MARK = '--'
CSV_SPLIT_MARK = ','
ROOT_MARK = "root"
REGISTER_CATE_JSON = '../papers_categories/register_categories.json'
CATE_TREE_PATH = '../papers_categories/categories_tree.txt'
JSON_LIST_PATH = "../papers_categories/json_list/"
CSV_TABLE_PATH = "../papers_categories/csv_tabls.csv"
PAPERS_POOL = "../papers_pool/"
PAPERS_DIST = "../papers_dist/"
def parse_file(name):
with open(name, 'r', encoding='utf-8') as f:
ret_dict = json.load(f)
return ret_dict
class key_type_enum:
required = "required"
optional = "optional"
class key_dtype_enum:
string = "string"
list = "list"
class key_names:
paper_names = "paper_names"
alias = "alias"
author = "authors"
categories = "categories"
year = "year"
publisher = "publisher"
pages = "pages"
brief = "brief"
blogs = "blogs"
maintainers = "maintainers"
download_link = "download_link"
register_name = "register_name"
register_keys = [
{"key_name": key_names.paper_names, "type": key_type_enum.required, "dtype": key_dtype_enum.string},
{'key_name': key_names.register_name, "type": key_type_enum.optional, "dtype": key_dtype_enum.string},
{"key_name": key_names.alias, "type": key_type_enum.optional, "dtype": key_dtype_enum.list},
{"key_name": key_names.author, "type": key_type_enum.required, "dtype": key_dtype_enum.string},
{"key_name": key_names.categories, "type": key_type_enum.required, "dtype": key_dtype_enum.list},
{'key_name': key_names.year, "type": key_type_enum.required, "dtype": key_dtype_enum.string},
{'key_name': key_names.publisher, "type": key_type_enum.required, "dtype": key_dtype_enum.string},
{"key_name": key_names.pages, "type": key_type_enum.optional, "dtype": key_dtype_enum.string},
{"key_name": key_names.brief, "type": key_type_enum.required, "dtype": key_dtype_enum.string},
{"key_name": key_names.blogs, "type": key_type_enum.optional, "dtype": key_dtype_enum.list},
{"key_name": key_names.maintainers, "type": key_type_enum.required, "dtype": key_dtype_enum.list},
{"key_name": key_names.download_link, "type": key_type_enum.optional, "dtype": key_dtype_enum.string}
]
valid_year_range = [0,9999]
valid_keys = [x['key_name'] for x in register_keys]
required_keys = [x['key_name'] for x in register_keys if x['type'] == key_type_enum.required]
dtype_map = {}
for each in register_keys:
dtype_map[each['key_name']] = each['dtype']
def _recur_builder(key, cate_tree, register_cates):
if key in register_cates:
cate_tree[key] = dict()
cates = register_cates[key]
for k,v in cates.items():
if v == UNK_MARK:
continue
cate_tree[key][v] = dict()
_recur_builder(v, cate_tree[key], register_cates)
def parse_register_cate_tree(json_file):
cate_tree = dict()
register_cates = parse_file(json_file)
root_cates = register_cates['root']
# appending root cates
for rk, rv in root_cates.items():
cate_tree[rv] = {}
# recursively search the sub-categories of each root cate
for rk, rv in cate_tree.items():
_recur_builder(rk, cate_tree, register_cates)
return cate_tree
def show(string):
print("INFO:{}".format(string))
def _check_valid_pages(iv):
if not (PAGES_MARK in iv):
raise ValueError('The pages format error, should be begin_pages--end_pages, like 100--108')
split_iv = iv.split(PAGES_MARK)
begin_page = split_iv[0]
end_page = split_iv[1]
if '.' in begin_page:
raise ValueError('begin page should be an integer, but got {}'.format(begin_page))
if '.' in end_page:
raise ValueError('end page should be an integer, but got {}'.format(begin_page))
begin_page = int(begin_page)
end_page = int(end_page)
if begin_page < 0 or end_page < 0:
raise ValueError('pages out of range! check the page to ensure it larger than 0!')
if begin_page > end_page:
raise ValueError('Woo, begin_pages should less than end_page, but now got {} > {}'.format(begin_page, end_page))
return True
def _check_valid_year(iv):
year = int(iv)
if year < valid_year_range[0] or year > valid_year_range[1]:
raise ValueError('The year valud should limited at the range of [{}, {}]'.format(valid_year_range[0], valid_year_range[1]))
else:
return True
def _check_valid_categories(cv):
hier_cv = cv.split(CATE_SPLIT_MARK)
tmp_tree = valid_cate_tree
for each in hier_cv:
if each in tmp_tree:
tmp_tree = tmp_tree[each]
else:
raise ValueError('Category error with [{}]. INFO: The category [{}] is NOT in valid categories list. \
Check your json or contact administer to append a new category!'.format(cv, each))
return True
def check_info_valid(info, file_name, silence=False):
assert isinstance(info, dict), 'Your json parsing result invalid, it should be a dict, check your json!'
file_name = file_name.split('.')[0]
key = list(info.keys())
print("current process: [{}]".format(key[0]))
# check whether the file name equal to json register key name
if file_name != key[0]:
raise ValueError('Your json register key name is [{}], while your file name is [{}]! They should be equal!'.format(key, file_name))
# check if all required segments ready
all_info_required_keys = set(copy.deepcopy(required_keys))
for k,v in info.items():
for ik, iv in v.items():
if ik in all_info_required_keys:
all_info_required_keys.remove(ik)
if len(all_info_required_keys) != 0:
raise ValueError("required fileds {} are missing! check again and commit!".format(str(all_info_required_keys)))
# check all segments dtype valid
for k,v in info.items():
for ik, iv in v.items():
if dtype_map[ik] == key_dtype_enum.list:
if not isinstance(iv, dict):
raise ValueError('The segment {} is expected to be dtype of {}, but got a {}, check and commit again!'.format(key_dtype_enum.list, type(iv)))
# check segments' content valid
for k,v in info.items():
for ik, iv in v.items():
if ik not in valid_keys:
raise ValueError("doc [{}] with key = [{}] is not in valid key list!".format(k, ik))
if ik == key_names.categories:
for ck,cv in iv.items():
_check_valid_categories(cv)
if ik == key_names.year:
_check_valid_year(iv)
if ik == key_names.pages:
_check_valid_pages(iv)
if not silence:
show("Thanks for your contribution ! This commit is valid! Please appending your paper in paper pools. (make sure your paper is open access.)")
return True
def replace_comma(info):
k = list(info.keys())[0]
for ik,iv in info[k].items():
if dtype_map[ik] == key_dtype_enum.string:
info[k][ik] = info[k][ik].replace(",", ";")
def dump2csv(root_path, csv_name):
all_jsons = os.listdir(root_path)
f_write = open(csv_name, 'w', encoding='gbk')
keys_len = len(register_keys)
keys_seq = []
for ind, each in enumerate(register_keys):
title = each['key_name']
keys_seq.append(title)
f_write.write(title)
if ind != keys_len-1:
f_write.write(CSV_SPLIT_MARK)
f_write.write('\n')
for each in all_jsons:
path = root_path+each
paper_info = parse_file(path)
check_info_valid(paper_info, each, silence=True)
replace_comma(paper_info)
for ind, each in enumerate(keys_seq):
for k,v in paper_info.items():
if each in v:
if dtype_map[each] != key_dtype_enum.list:
f_write.write(str(v[each]))
else:
f_write.write(str(v[each]).replace(",",";"))
if each == key_names.register_name and each not in v:
f_write.write(k)
if ind != keys_len-1:
f_write.write(CSV_SPLIT_MARK)
f_write.write('\n')
f_write.close()
def check_exists(all_jsons):
title_names = {}
for each in all_jsons:
key = list(each.keys())[0]
print("processing :[{}]".format(key))
if key in title_names:
raise ValueError('[{}] is duplicated! You may be adding a existed paper. Please use another register name'.format(key))
title_names[key] = each[key][key_names.paper_names].strip().lower()
# check the key name with all lower case
keys_set = set()
for k,v in title_names.items():
if k.lower() in keys_set:
raise ValueError("[{}] may be duplicated since the register name with lower case is duplicated!".format(k))
else:
keys_set.add(k.lower())
# check the paper name
paper_name_set = set()
for k,v in title_names.items():
if v in paper_name_set:
raise ValueError('[{}], paper name is duplicated!'.format(v))
else:
paper_name_set.add(v)
def tree_hierachy(tree):
stack = []
tree = {ROOT_MARK: tree}
stack.append((ROOT_MARK, tree, 0))
depth_bound = {}
while len(stack) != 0:
cur_k, cur_v, depth = stack.pop()
if depth in depth_bound:
depth_bound[depth] = max(len(cur_k), depth_bound[depth])
else:
depth_bound[depth] = len(cur_k)
keys = list(cur_v[cur_k].keys())
keys.reverse() # make it begin from left, DLR
for k in keys:
v = {k: cur_v[cur_k][k]}
stack.append((k, v, depth+1))
return depth_bound
def draw_tree(tree, root_mark, root_path = './all_categories/', tree_name='papers_categories_tree.txt'):
# pre-order
assert isinstance(tree, dict), 'your passing json tree should be a parsed dict, check your code !'
if os.path.exists(tree_name):
print('[Warning]: The tree name {} already existed, deleting it now!'.format(tree_name))
os.system('rm -rf {}'.format(tree_name))
if os.path.exists(root_path):
print('[Warning]: The tmp folder {} already existed, deleting it now!'.format(root_path))
os.system('rm -rf {}'.format(root_path))
stack = []
tree = {ROOT_MARK: tree}
f_write = open('{}'.format(tree_name), 'w', encoding='utf-8')
stack.append((root_mark, tree, 0, root_path+root_mark+'/'))
while len(stack) != 0:
cur_node = stack.pop()
cur_k, cur_v, depth, last_folder = cur_node
if not os.path.exists(last_folder):
os.makedirs(last_folder)
keys = list(cur_v[cur_k].keys())
keys.reverse() # make it begin from left, DLR
for k in keys:
cur_folder = last_folder+k+'/'
v = {k: cur_v[cur_k][k]}
stack.append((k, v, depth+1, cur_folder))
# make dirs
os.system('tree {} > ./{}'.format(root_path, tree_name))
os.system('rm -rf {}'.format(root_path))
print('[Warning]: Deleted the tmp root folder {}!'.format(root_path))
f_write.close()
def paper_dist(all_jsons):
for each in all_jsons:
cur_key = list(each.keys())[0]
cates = each[cur_key][key_names.categories]
paper_name = each[cur_key][key_names.paper_names].replace(":", "_")
paper_name = paper_name.replace(" ", "_")
print("Proccessing... [{}]".format(cur_key))
for k, each_c in cates.items():
src = PAPERS_POOL+cur_key+'.pdf'
each_c = each_c.replace('.', "/")
dst_folder = PAPERS_DIST+each_c+"/"
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
dst_name = dst_folder+paper_name+'.pdf'
cmd = "cp {} {}".format(src, dst_name)
os.system(cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check",action="store_true", help="Check the new submissions valid or not")
parser.add_argument("--gen_catetree", action="store_true", help="Geneate the categories tree")
parser.add_argument("--gen_csv", action="store_true", help="Geneate the papers csv table")
parser.add_argument("--dist", action="store_true", help="Distribute the papers from pool to seperate folders")
args = parser.parse_args()
valid_cate_tree = parse_register_cate_tree(REGISTER_CATE_JSON)
print('Current args {}'.format(args))
if args.gen_catetree:
tree = parse_register_cate_tree(REGISTER_CATE_JSON)
draw_tree(tree, ROOT_MARK,root_path='../papers_categories/all_categories/', tree_name=CATE_TREE_PATH)
print("Generate the categories tree at [{}]".format(CATE_TREE_PATH))
if args.check:
dirs = os.listdir(JSON_LIST_PATH)
all_jsons = []
for each in dirs:
all_jsons.append(parse_file(JSON_LIST_PATH+each))
check_exists(all_jsons)
print("[SUCCESS] There is not duplication")
for each in dirs:
path = JSON_LIST_PATH+each
paper_info = parse_file(path)
check_info_valid(paper_info, each, silence=True)
print('[SUCCESS] Json information is valid')
if args.gen_csv:
dump2csv(JSON_LIST_PATH, CSV_TABLE_PATH)
print('[SUCCESS] generate the csv table at {}'.format(CSV_TABLE_PATH))
if args.dist:
dirs = os.listdir(JSON_LIST_PATH)
all_jsons = []
for each in dirs:
all_jsons.append(parse_file(JSON_LIST_PATH+each))
paper_dist(all_jsons)
print("[SUCCESS] The papers distributed to [{}] successed!".format(PAPERS_DIST))
|
<reponame>SayanGhoshBDA/code-backup<filename>python/advanced_sw/GIT-SCRAPER/scraper.py<gh_stars>10-100
import json
from pprint import pprint
from selenium import webdriver
from bs4 import BeautifulSoup
from pathlib import Path
from tqdm import tqdm
import json
import time
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# setting profile
profile = webdriver.FirefoxProfile()
profile.set_preference("general.useragent.override", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0")
driver = webdriver.Firefox(profile)
"""
lL16AQItG1
0ZuxGhiqo5U
https://www.youtube.com/watch?v=TEAGqUkQVdM&list=PLAwxTw4SYaPkQXg8TkVdIvYv4HfLG7SiH&index=80&t=0s
PLAwxTw4SYaPkQXg8TkVdIvYv4HfLG7SiH
pl-video-title-link yt-uix-tile-link yt-uix-sessionlink spf-link
https://r4---sn-npoeenez.googlevideo.com/videoplayback?id=o-AEzO4sSvQGyOsTYltnKEJyK5mG3yLzpuWO38lOvC-
R7L&itag=43&source=youtube&requiressl=yes&pl=18&ei=wmDDXNaLEoG8yAXRw4O4CA&mime=video%2Fwebm&gir=yes
&clen=1973658&ratebypass=yes&dur=0.000&lmt=1412609098134912&fvip=15&beids=9466586&c=WEB&ip=192.168.3.11
&ipbits=0&expire=1556329762&sparams=clen,dur,ei,expire,gir,id,ip,ipbits,itag,lmt,mime,mip,mm,mn,ms,mv,pl,
ratebypass,requiressl,source&signature=2BAA31A34BF0D4AA60FBBE2B7DDD1C80B711A9.045DFCDC120404084BD068D4C88842E91F8D2952
&key=cms1&video_id=TEAGqUkQVdM&title=SVM+Response+to+Outliers+-+Intro+to+Machine+Learning&rm=sn-xaxjugvn8t51-hjml7z
&fexp=9466586&req_id=b00e1d5a1b74a3ee&redirect_counter=2&cm2rm=sn-n8vyle6&cms_redirect=yes&mip=172.16.31.10&mm=34
&mn=sn-npoeenez&ms=ltu&mt=1556308486&mv=m
https://r5---sn-npoe7nes.googlevideo.com/videoplayback?id=o-AA25BzPZ9DbOQ8zFL0SKyTCLhrGbl8AMAP-yp5_
7y9Ml&itag=43&source=youtube&requiressl=yes&pl=18&ei=MWPDXOC6DumgzLUP_a2H2Ac&mime=video%2Fwebm&gir=yes
&clen=3466902&ratebypass=yes&dur=0.000&lmt=1411417282671471&fvip=5&c=WEB&ip=192.168.127.12
&ipbits=0&expire=1556330385&sparams=clen,dur,ei,expire,gir,id,ip,ipbits,itag,lmt,mime,mip,mm,mn,ms,mv,pl,
ratebypass,requiressl,source&signature=60AA22CBF4759B159A2D8AFF4246265A3E829CFE.3F4414689A5963849DFCE75C134D087DDDCBB3F3
&key=cms1&video_id=0ZuxGhiqo5U&title=Introduction+Pt.+III&redirect_counter=1&cm2rm=sn-vgqeey76&req_id=95566288b0e0a3ee
&cms_redirect=yes&mip=172.16.31.10&mm=34&mn=sn-npoe7nes&ms=ltu&mt=1556308658&mv=m
#TODO
3.Set option for download all?
"""
def is_playList(this_item):
# is it playlist(1) or video(0)
if 'playlist' in str(this_item):
return 1
return 0
def download_video(this_link):
# to download the video
save_link = 'https://en.savefrom.net/#url='+str(this_link)
driver.get(save_link)
time.sleep(5)
print(driver.page_source)
pass
#url = 'https://www.youtube.com/playlist?list=PLAwxTw4SYaPkQXg8TkVdIvYv4HfLG7SiH' # The first url that is entered, required for ignition
url = 'https://www.youtube.com/watch?v=TEAGqUkQVdM'
if is_playList(url) ==1:
# it's playlist
# get the links
print('REQUESTING... please have patience!')
driver.get(url)
while True:
try:
loadMoreButton = driver.find_element_by_xpath("//button[contains(@aria-label,'Load more')]")
time.sleep(2)
loadMoreButton.click()
time.sleep(5)
except Exception as e:
print(e)
break
soup = BeautifulSoup(driver.page_source, 'html.parser')
tags = soup.findAll("a", {"class": "pl-video-title-link"})
get_all_href = []
for item in tqdm(tags):
item_ = str(item)
first = str(item_).find('href="')
#print(first)
clean_item = item_[first+6:]
#print(clean_item)
sec = str(clean_item).find('"')
#print(sec)
total_clean = 'https://www.youtube.com'+clean_item[:sec]
#print("TC::",total_clean)
get_all_href.append(total_clean)
NUM_LINKS = len(get_all_href)
print(NUM_LINKS," video LINKS found")
else:
# to download a particular video
download_video(url) |
<filename>hmlvaraus/api/hml_reservation.py<gh_stars>1-10
import arrow
import django_filters
import re
import hashlib
import logging
from arrow.parser import ParserError
from django.core.exceptions import PermissionDenied, ImproperlyConfigured, SuspiciousOperation, ValidationError
from django.utils import timezone
from rest_framework import viewsets, serializers, filters, exceptions, permissions, pagination, status
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from munigeo import api as munigeo_api
from resources.models import Reservation
from hmlvaraus.api.reservation import ReservationSerializer
from hmlvaraus.api.berth import BerthSerializer
from hmlvaraus.models.hml_reservation import HMLReservation
from hmlvaraus.models.purchase import Purchase
from resources.api.base import TranslatedModelSerializer, register_view
from hmlvaraus.utils.utils import RelatedOrderingFilter
from django.utils.translation import ugettext_lazy as _
from hmlvaraus.models.berth import Berth, GroundBerthPrice
from resources.models.resource import Resource, ResourceType
from resources.models.unit import Unit
from paytrailpayments.payments import *
from rest_framework.views import APIView
from django.conf import settings
from django.http import HttpResponseRedirect
from datetime import timedelta
from django.db.models import Q
from rest_framework.exceptions import ParseError
from hmlvaraus import tasks
from hmlvaraus.models.sms_message import SMSMessage
LOG = logging.getLogger(__name__)
class HMLReservationSerializer(TranslatedModelSerializer, munigeo_api.GeoModelSerializer):
reservation = ReservationSerializer(required=True)
is_paid = serializers.BooleanField(required=False)
reserver_ssn = serializers.CharField(required=False)
berth = BerthSerializer(required=True)
has_ended = serializers.SerializerMethodField()
is_renewed = serializers.SerializerMethodField()
has_started = serializers.SerializerMethodField()
reserved_by_citizen = serializers.SerializerMethodField()
resend_renewal = serializers.BooleanField(required=False)
partial = True
class Meta:
model = HMLReservation
fields = ['id', 'berth', 'is_paid', 'reserver_ssn', 'reservation', 'state_updated_at', 'is_paid_at', 'key_returned', 'key_returned_at', 'has_ended', 'is_renewed', 'has_started', 'resend_renewal', 'reserved_by_citizen']
def get_reserved_by_citizen(self, obj):
return hasattr(obj, 'purchase') and obj.purchase != None
def get_has_started(self, obj):
return obj.reservation.begin > timezone.now()
def get_has_ended(self, obj):
return obj.reservation.end < timezone.now()
def get_is_renewed(self, obj):
return obj.child.filter(reservation__state=Reservation.CONFIRMED).exists()
def validate(self, data):
request_user = self.context['request'].user
reservation_data = data.get('reservation')
hml_reservation_id = self.context['request'].data.get('id')
if reservation_data != None:
resource = reservation_data.get('resource')
if resource:
overlaps_existing = False
if hml_reservation_id:
overlaps_existing = HMLReservation.objects.filter(reservation__begin__lt=reservation_data.get('end'), reservation__end__gt=reservation_data.get('begin'), berth=resource.berth, reservation__state=Reservation.CONFIRMED).exclude(pk=hml_reservation_id).exists()
else:
overlaps_existing = HMLReservation.objects.filter(reservation__begin__lt=reservation_data.get('end'), reservation__end__gt=reservation_data.get('begin'), berth=resource.berth, reservation__state=Reservation.CONFIRMED).exists()
if overlaps_existing:
raise serializers.ValidationError(_('New reservation overlaps existing reservation'))
# if request_user.is_staff:
# two_minutes_ago = timezone.now() - timedelta(minutes=2)
# if resource.berth.reserving and resource.berth.reserving > two_minutes_ago:
# raise serializers.ValidationError(_('Someone is reserving the berth at the moment'))
return data
def create(self, validated_data):
request_user = self.context['request'].user
request_reservation_data = validated_data.pop('reservation')
reservation_data = {}
if not request_user.is_staff:
reservation_data['begin'] = request_reservation_data.get('begin')
reservation_data['end'] = request_reservation_data.get('end')
reservation_data['reserver_name'] = request_reservation_data.get('reserver_name', '')
reservation_data['reserver_email_address'] = request_reservation_data.get('reserver_email_address', '')
reservation_data['reserver_phone_number'] = request_reservation_data.get('reserver_phone_number', '')
reservation_data['reserver_address_street'] = request_reservation_data.get('reserver_address_street', '')
reservation_data['reserver_address_zip'] = request_reservation_data.get('reserver_address_zip', '')
reservation_data['reserver_address_city'] = request_reservation_data.get('reserver_address_city', '')
reservation_data['state'] = Reservation.CONFIRMED
reservation_data['resource'] = request_reservation_data.get('resource')
else:
reservation_data = request_reservation_data
reservation = Reservation.objects.create(**reservation_data)
resource = reservation_data['resource']
resource.reservable = False
resource.save()
validated_data.pop('berth')
key_returned = resource.berth.type == Berth.DOCK
hmlReservation = HMLReservation.objects.create(reservation=reservation, key_returned=key_returned, berth=resource.berth, **validated_data)
return hmlReservation
def update(self, instance, validated_data):
if self.context['request'].method == 'PUT':
return self.update_reservation_info(instance, validated_data)
elif self.context['request'].method == 'PATCH':
return self.update_reservation_status(instance, validated_data)
def update_reservation_info(self, instance, validated_data):
reservation_data = validated_data.pop('reservation')
reservation = instance.reservation
reservation.begin = reservation_data.get('begin', reservation.begin)
reservation.end = reservation_data.get('end', reservation.end)
reservation.event_description = reservation_data.get('event_description', reservation.event_description)
reservation.reserver_name = reservation_data.get('reserver_name', reservation.reserver_name)
reservation.reserver_email_address = reservation_data.get('reserver_email_address', reservation.reserver_email_address)
reservation.reserver_phone_number = reservation_data.get('reserver_phone_number', reservation.reserver_phone_number)
reservation.reserver_address_street = reservation_data.get('reserver_address_street', reservation.reserver_address_street)
reservation.reserver_address_zip = reservation_data.get('reserver_address_zip', reservation.reserver_address_zip)
reservation.reserver_address_city = reservation_data.get('reserver_address_city', reservation.reserver_address_city)
reservation.save()
return instance
def update_reservation_status(self, instance, validated_data):
is_paid = validated_data.get('is_paid')
key_returned = validated_data.get('key_returned')
resend_renewal = validated_data.get('resend_renewal')
if is_paid != None:
if is_paid:
instance.is_paid_at = timezone.now()
instance.is_paid = True
else:
instance.is_paid_at = None
instance.is_paid = False
elif key_returned != None:
if key_returned:
instance.key_returned_at = timezone.now()
instance.key_returned = True
else:
instance.key_returned_at = None
instance.key_returned = False
elif resend_renewal:
tasks.send_initial_renewal_notification.delay(instance.pk)
else:
instance.cancel_reservation(self.context['request'].user)
instance.save()
return instance
def to_representation(self, instance):
data = super(HMLReservationSerializer, self).to_representation(instance)
return data
def validate_reserver_ssn(self, value):
number_array = re.findall(r'\d+', value[:-1])
if not number_array or len(value) != 11:
raise serializers.ValidationError(_('Social security number not valid'))
ssn_numbers = int(''.join(str(x) for x in number_array))
test_array = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y']
check_char = test_array[ssn_numbers % 31]
if not value.endswith(check_char):
raise serializers.ValidationError(_('Social security number not valid'))
return value
class HMLReservationGroundBerthSerializer(HMLReservationSerializer):
reservation = serializers.DictField(required=True)
berth = serializers.DictField(required=True)
def validate(self, data):
request_user = self.context['request'].user
if data['berth']['type'] != Berth.GROUND and request_user.is_staff and data.get('reservation'):
two_minutes_ago = timezone.now() - timedelta(minutes=2)
reservation_data = data.get('reservation')
resource = reservation_data['resource']
if resource.berth.reserving and resource.berth.reserving > two_minutes_ago:
raise serializers.ValidationError(_('Someone is reserving the berth at the moment'))
return data
def to_representation(self, instance):
serializer = HMLReservationSerializer(instance, context=self.context)
return serializer.data
def create(self, validated_data):
request_user = self.context['request'].user
request_reservation_data = validated_data.pop('reservation')
reservation_data = {}
if not request_user.is_staff:
reservation_data['begin'] = request_reservation_data.get('begin')
reservation_data['end'] = request_reservation_data.get('end')
reservation_data['reserver_name'] = request_reservation_data.get('reserver_name', '')
reservation_data['reserver_email_address'] = request_reservation_data.get('reserver_email_address', '')
reservation_data['reserver_phone_number'] = request_reservation_data.get('reserver_phone_number', '')
reservation_data['reserver_address_street'] = request_reservation_data.get('reserver_address_street', '')
reservation_data['reserver_address_zip'] = request_reservation_data.get('reserver_address_zip', '')
reservation_data['reserver_address_city'] = request_reservation_data.get('reserver_address_city', '')
reservation_data['state'] = Reservation.CONFIRMED
else:
reservation_data = request_reservation_data
if not reservation_data.get('begin') or not reservation_data.get('end'):
reservation_data['begin'] = timezone.now()
reservation_data['end'] = timezone.now() + timedelta(days=365)
berth_dict = validated_data.pop('berth')
berth = None
if not berth_dict.get('id'):
if berth_dict.get('type') != Berth.GROUND:
raise serializers.ValidationError(_('Only ground type berths can be created with reservation'))
if request_user.is_staff:
berth = self.create_berth(berth_dict)
else:
ground_berth_price = 30.00
try:
ground_berth_price = GroundBerthPrice.objects.latest('id').price
except:
pass
berth = self.create_berth({
"price": ground_berth_price,
"type":"ground",
"resource":{
"name":"Numeroimaton",
"name_fi":"Numeroimaton",
"unit": Unit.objects.get(name__icontains='poletti'),
"reservable": True
},
"length_cm":0,
"width_cm":0,
"depth_cm":0
})
reservation_data['resource'] = berth.resource
reservation = Reservation.objects.create(**reservation_data)
resource = reservation.resource
resource.reservable = False
resource.save()
hmlReservation = HMLReservation.objects.create(reservation=reservation, berth=berth, **validated_data)
return hmlReservation
def create_berth(self, berth):
resource_data = berth.pop('resource')
if not berth.get('price'):
ground_berth_price = 30.00
try:
ground_berth_price = GroundBerthPrice.objects.latest('id').price
except:
pass
berth['price'] = ground_berth_price
if not resource_data.get('unit_id') and not resource_data.get('unit'):
resource_data['unit'] = Unit.objects.get(name__icontains='poletti')
if not resource_data.get('type_id' and not resource_data.get('type')):
resource_data['type'] = ResourceType.objects.get(Q(name__icontains='vene') | Q(name__icontains='boat'))
resource = Resource.objects.create(**resource_data)
new_berth = Berth.objects.create(resource=resource, **berth)
return new_berth
class PurchaseSerializer(TranslatedModelSerializer, munigeo_api.GeoModelSerializer):
hml_reservation = HMLReservationSerializer(read_only=True)
class Meta:
model = Purchase
fields = ['id', 'hml_reservation', 'purchase_code', 'reserver_name', 'reserver_email_address', 'reserver_phone_number', 'reserver_address_street', 'reserver_address_zip', 'reserver_address_city', 'vat_percent', 'price_vat', 'product_name', 'purchase_process_started', 'purchase_process_success', 'purchase_process_failure', 'purchase_process_notified']
class HMLReservationFilter(django_filters.FilterSet):
unit_id = django_filters.CharFilter(name="reservation__resource__unit_id")
begin = django_filters.DateTimeFromToRangeFilter(name="reservation__resource__begin")
is_paid = django_filters.BooleanFilter(name="is_paid")
class Meta:
model = HMLReservation
fields = ['unit_id', 'is_paid']
class HMLReservationFilterBackend(filters.BaseFilterBackend):
"""
Filter reservations by time.
"""
def filter_queryset(self, request, queryset, view):
params = request.query_params
times = {}
filter_type = 'all'
if not 'show_cancelled' in params:
queryset = queryset.exclude(reservation__state='cancelled')
if 'date_filter_type' in params:
filter_type = params['date_filter_type'];
for name in ('begin', 'end'):
if name not in params:
continue
try:
times[name] = arrow.get(params[name]).to('utc').datetime
except ParserError:
raise exceptions.ParseError("'%s' must be a timestamp in ISO 8601 format" % name)
if filter_type == 'all':
if times.get('begin', None):
queryset = queryset.filter(reservation__end__gte=times['begin'])
if times.get('end', None):
queryset = queryset.filter(reservation__begin__lte=times['end'])
elif filter_type == 'begin':
if times.get('begin', None):
queryset = queryset.filter(reservation__begin__gte=times['begin'])
if times.get('end', None):
queryset = queryset.filter(reservation__begin__lte=times['end'])
elif filter_type == 'end':
if times.get('begin', None):
queryset = queryset.filter(reservation__end__gte=times['begin'])
if times.get('end', None):
queryset = queryset.filter(reservation__end__lte=times['end'])
return queryset
class HMLReservationPagination(pagination.PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 5000
def get_paginated_response(self, data):
next_page = ''
previous_page = ''
if self.page.has_next():
next_page = self.page.next_page_number()
if self.page.has_previous():
previous_page = self.page.previous_page_number()
return Response({
'next': next_page,
'previous': previous_page,
'count': self.page.paginator.count,
'results': data
})
class StaffWriteOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS or request.user.is_staff
class HMLReservationViewSet(munigeo_api.GeoModelAPIView, viewsets.ModelViewSet):
queryset = HMLReservation.objects.all().select_related('reservation', 'reservation__user', 'reservation__resource', 'reservation__resource__unit')
serializer_class = HMLReservationSerializer
lookup_field = 'id'
permission_classes = [StaffWriteOnly, permissions.IsAuthenticated]
filter_class = HMLReservationFilter
filter_backends = (DjangoFilterBackend,filters.SearchFilter,RelatedOrderingFilter,HMLReservationFilterBackend)
filter_fields = ('reserver_ssn')
search_fields = ['reserver_ssn', 'reservation__billing_address_street', 'reservation__reserver_email_address', 'reservation__reserver_name', 'reservation__reserver_phone_number']
ordering_fields = ('__all__')
pagination_class = HMLReservationPagination
def get_serializer_class(self):
if self.request.method == 'POST' and self.request.data.get('berth'):
if self.request.data.get('berth').get('type') == Berth.GROUND and not self.request.data.get('berth').get('id'):
return HMLReservationGroundBerthSerializer
else:
return HMLReservationSerializer
return HMLReservationSerializer
def perform_create(self, serializer):
request = self.request
if request.data.get('berth').get('type') != Berth.GROUND:
code = request.data.pop('code')
berth = Berth.objects.get(pk=request.data['berth']['id'], is_deleted=False)
if code != hashlib.sha1(str(berth.reserving).encode('utf-8')).hexdigest():
raise ValidationError(_('Invalid meta data'))
hml_reservation = serializer.save()
tasks.send_confirmation.delay(hml_reservation.pk)
class PurchaseView(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, format=None):
if request.user.is_authenticated():
PermissionDenied(_('This API is only for non-authenticated users'))
if not settings.PAYTRAIL_MERCHANT_ID or not settings.PAYTRAIL_MERCHANT_SECRET:
raise ImproperlyConfigured(_('Paytrail credentials are incorrect or missing'))
reservation = request.data['reservation']
reservation['begin'] = timezone.now()
reservation['end'] = timezone.now() + timedelta(days=365)
request.data['reservation'] = reservation
if request.data.get('berth').get('type') != Berth.GROUND:
code = request.data.pop('code')
berth = Berth.objects.get(pk=request.data['berth']['id'], is_deleted=False)
if code != hashlib.sha1(str(berth.reserving).encode('utf-8')).hexdigest():
raise ValidationError(_('Invalid meta data'))
serializer = HMLReservationSerializer(data=request.data, context={'request': request})
else:
serializer = HMLReservationGroundBerthSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
reservation = serializer.save()
url = request.build_absolute_uri()
purchase_code = hashlib.sha1(str(reservation.reservation.created_at).encode('utf-8') + str(reservation.pk).encode('utf-8')).hexdigest()
contact = PaytrailContact(**reservation.get_payment_contact_data())
product = PaytrailProduct(**reservation.get_payment_product_data())
url_set = PaytrailUrlset(success_url=url + '?success=' + purchase_code, failure_url=url + '?failure=' + purchase_code, notification_url=url + '?notification=' + purchase_code)
purchase = Purchase.objects.create(hml_reservation=reservation,
purchase_code=purchase_code,
reserver_name=reservation.reservation.reserver_name,
reserver_email_address=reservation.reservation.reserver_email_address,
reserver_phone_number=reservation.reservation.reserver_phone_number,
reserver_address_street=reservation.reservation.reserver_address_street,
reserver_address_zip=reservation.reservation.reserver_address_zip,
reserver_address_city=reservation.reservation.reserver_address_city,
vat_percent=product.get_data()['vat'],
price_vat=product.get_data()['price'],
product_name=product.get_data()['title']
)
payment = PaytrailPaymentExtended(
service='VARAUS',
product='VENEPAIKKA',
product_type=product.get_data()['berth_type'],
order_number=purchase.pk,
contact=contact,
urlset=url_set
)
payment.add_product(product)
query_string = PaytrailArguments(
merchant_auth_hash=settings.PAYTRAIL_MERCHANT_SECRET,
merchant_id=settings.PAYTRAIL_MERCHANT_ID,
url_success=url + '?success=' + purchase_code,
url_cancel=url + '?failure=' + purchase_code,
url_notify=url + '?notification=' + purchase_code,
order_number=payment.get_data()['orderNumber'],
params_in=(
'MERCHANT_ID,'
'URL_SUCCESS,'
'URL_CANCEL,'
'URL_NOTIFY,'
'ORDER_NUMBER,'
'PARAMS_IN,'
'PARAMS_OUT,'
'PAYMENT_METHODS,'
'ITEM_TITLE[0],'
'ITEM_ID[0],'
'ITEM_QUANTITY[0],'
'ITEM_UNIT_PRICE[0],'
'ITEM_VAT_PERCENT[0],'
'ITEM_DISCOUNT_PERCENT[0],'
'ITEM_TYPE[0],'
'PAYER_PERSON_PHONE,'
'PAYER_PERSON_EMAIL,'
'PAYER_PERSON_FIRSTNAME,'
'PAYER_PERSON_LASTNAME,'
'PAYER_PERSON_ADDR_STREET,'
'PAYER_PERSON_ADDR_POSTAL_CODE,'
'PAYER_PERSON_ADDR_TOWN'
),
params_out='PAYMENT_ID,TIMESTAMP,STATUS',
payment_methods='1,2,3,5,6,10,50,51,52,61',
item_title=product.get_data()['title'],
item_id=product.get_data()['code'],
item_quantity=product.get_data()['amount'],
item_unit_price=product.get_data()['price'],
item_vat_percent=product.get_data()['vat'],
item_discount_percent=product.get_data()['discount'],
item_type=product.get_data()['type'],
payer_person_phone=contact.get_data()['mobile'],
payer_person_email=contact.get_data()['email'],
payer_person_firstname=contact.get_data()['firstName'],
payer_parson_lastname=contact.get_data()['lastName'],
payer_person_addr_street=contact.get_data()['address']['street'],
payer_person_add_postal_code=contact.get_data()['address']['postalCode'],
payer_person_addr_town=contact.get_data()['address']['postalOffice'],
)
return Response({'query_string': query_string.get_data()}, status=status.HTTP_200_OK)
else:
LOG.info(serializer.errors)
raise ValidationError(_('Invalid payment data'))
def get(self, request, format=None):
if request.GET.get('success', None):
if not settings.PAYTRAIL_MERCHANT_ID or not settings.PAYTRAIL_MERCHANT_SECRET:
raise ImproperlyConfigured(_('Paytrail credentials are incorrect or missing'))
client = PaytrailAPIClient(merchant_id=settings.PAYTRAIL_MERCHANT_ID, merchant_secret=settings.PAYTRAIL_MERCHANT_SECRET)
if not client.validate_callback_data(request.GET):
raise ValidationError(_('Checksum failed. Invalid payment.'))
purchase_code = request.GET.get('success', None)
purchase = Purchase.objects.get(purchase_code=purchase_code)
purchase.payment_service_order_number = request.GET.get('ORDER_NUMBER', None)
purchase.payment_service_timestamp = request.GET.get('TIMESTAMP', None)
purchase.payment_service_paid = request.GET.get('PAID', None)
purchase.payment_service_method = request.GET.get('METHOD', None)
purchase.payment_service_return_authcode = request.GET.get('RETURN_AUTHCODE', None)
purchase.save()
purchase.set_success()
return HttpResponseRedirect('/#purchase/' + purchase_code)
elif request.GET.get('failure', None):
if not settings.PAYTRAIL_MERCHANT_ID or not settings.PAYTRAIL_MERCHANT_SECRET:
raise ImproperlyConfigured(_('Paytrail credentials are incorrect or missing'))
client = PaytrailAPIClient(merchant_id=settings.PAYTRAIL_MERCHANT_ID, merchant_secret=settings.PAYTRAIL_MERCHANT_SECRET)
if not client.validate_callback_data(request.GET):
raise ValidationError(_('Checksum failed. Invalid payment.'))
purchase_code = request.GET.get('failure', None)
purchase = Purchase.objects.get(purchase_code=purchase_code)
purchase.payment_service_order_number = request.GET.get('ORDER_NUMBER', None)
purchase.payment_service_timestamp = request.GET.get('TIMESTAMP', None)
purchase.payment_service_paid = request.GET.get('PAID', None)
purchase.payment_service_method = request.GET.get('METHOD', None)
purchase.payment_service_return_authcode = request.GET.get('RETURN_AUTHCODE', None)
purchase.save()
purchase.set_failure()
purchase.hml_reservation.cancel_reservation(self.request.user)
return HttpResponseRedirect('/#purchase/' + purchase_code)
elif request.GET.get('notification', None):
if not settings.PAYTRAIL_MERCHANT_ID or not settings.PAYTRAIL_MERCHANT_SECRET:
raise ImproperlyConfigured(_('Paytrail credentials are incorrect or missing'))
client = PaytrailAPIClient(merchant_id=settings.PAYTRAIL_MERCHANT_ID, merchant_secret=settings.PAYTRAIL_MERCHANT_SECRET)
if not client.validate_callback_data(request.GET):
raise ValidationError(_('Checksum failed. Invalid payment.'))
purchase_code = request.GET.get('notification', None)
purchase = Purchase.objects.get(purchase_code=purchase_code)
purchase.hml_reservation.set_paid(True)
purchase.set_notification()
return Response({}, status=status.HTTP_200_OK)
elif request.GET.get('code', None):
if not settings.PAYTRAIL_MERCHANT_ID or not settings.PAYTRAIL_MERCHANT_SECRET:
raise ImproperlyConfigured(_('Paytrail credentials are incorrect or missing'))
client = PaytrailAPIClient(merchant_id=settings.PAYTRAIL_MERCHANT_ID, merchant_secret=settings.PAYTRAIL_MERCHANT_SECRET)
purchase_code = request.GET.get('code', None)
purchase = Purchase.objects.get(purchase_code=purchase_code)
if purchase.report_is_seen():
raise PermissionDenied(_('Youre not allowed to see this purchase'))
serializer = PurchaseSerializer(purchase, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
else:
if not request.user.is_authenticated() or not request.user.is_staff:
raise PermissionDenied(_('This API is only for authenticated users'))
if 'start' not in self.request.GET or 'end' not in self.request.GET:
raise ParseError(_('Invalid parameters provided'))
start = self.request.GET.get('start')
end = self.request.GET.get('end')
show_failed = self.request.GET.get('show_failed')
if show_failed == 'true':
show_failed = True
else:
show_failed = False
purchases = Purchase.objects.filter(purchase_process_started__gte=start, purchase_process_started__lte=end)
if not show_failed:
purchases = purchases.exclude(purchase_process_success__isnull=True)
purchases = purchases.order_by('-purchase_process_started')
serializer = PurchaseSerializer(purchases, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
return Response({}, status=status.HTTP_404_NOT_FOUND)
def patch(self, request, format=None):
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
if body.get('report_seen', None) and body.get('code', None):
purchase_code = body.get('code', None)
purchase = Purchase.objects.get(purchase_code=purchase_code)
purchase.set_report_seen()
return Response({}, status=status.HTTP_200_OK)
if body.get('resource', None):
time = timezone.now()
berth = Berth.objects.get(resource_id=body.get('resource', None), is_deleted=False)
if not berth.reserving or (time - berth.reserving).total_seconds() > 59 or berth.reserving_staff_member == request.user:
berth.reserving = time
if request.user and request.user.is_staff:
berth.reserving_staff_member = request.user
else:
berth.reserving_staff_member = None
berth.save()
else:
return Response(None, status=status.HTTP_404_NOT_FOUND)
return Response({'code': hashlib.sha1(str(berth.reserving).encode('utf-8')).hexdigest()}, status=status.HTTP_200_OK)
return Response(None, status=status.HTTP_404_NOT_FOUND)
class RenewalView(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, format=None):
if(request.data.get('code')):
code = request.data.pop('code')
if not code:
raise ValidationError(_('Invalid renewal code'))
old_hml_reservation_qs = HMLReservation.objects.filter(renewal_code=code, reservation__state=Reservation.CONFIRMED, reservation__end__gte=timezone.now()).exclude(child__reservation__state=Reservation.CONFIRMED).distinct()
if len(old_hml_reservation_qs) != 1:
raise ValidationError(_('Invalid reservation id'))
old_hml_reservation = old_hml_reservation_qs.first()
old_reservation = old_hml_reservation.reservation
new_start = old_reservation.end
new_end = new_start + timedelta(days=365)
parent_id = old_hml_reservation.pk
old_reservation.pk = None
old_hml_reservation.pk = None
new_reservation = old_reservation
new_hml_reservation = old_hml_reservation
new_reservation.begin = new_start
new_reservation.end = new_end
overlaps_existing = HMLReservation.objects.filter(reservation__begin__lt=new_end, reservation__end__gt=new_start, berth=new_hml_reservation.berth, reservation__state=Reservation.CONFIRMED).exists()
if overlaps_existing:
raise serializers.ValidationError(_('New reservation overlaps existing reservation'))
if request.data.get('reserver_email_address'):
new_reservation.reserver_email_address = request.data.get('reserver_email_address')
if request.data.get('reserver_phone_number'):
new_reservation.reserver_phone_number = request.data.get('reserver_phone_number')
if request.data.get('reserver_address_street'):
new_reservation.reserver_address_street = request.data.get('reserver_address_street')
if request.data.get('reserver_address_zip'):
new_reservation.reserver_address_zip = request.data.get('reserver_address_zip')
if request.data.get('reserver_address_city'):
new_reservation.reserver_address_city = request.data.get('reserver_address_city')
new_reservation.save()
new_hml_reservation.reservation = new_reservation
new_hml_reservation.parent_id = parent_id
new_hml_reservation.renewal_notification_day_sent_at = None
new_hml_reservation.renewal_notification_week_sent_at = None
new_hml_reservation.renewal_notification_month_sent_at = None
new_hml_reservation.is_paid_at = None
new_hml_reservation.is_paid = False
new_hml_reservation.renewal_code = None
new_hml_reservation.end_notification_sent_at = None
new_hml_reservation.key_return_notification_sent_at = None
new_hml_reservation.save()
location = '//%s' % '/api/purchase/'
url = request.build_absolute_uri(location)
purchase_code = hashlib.sha1(str(new_hml_reservation.reservation.created_at).encode('utf-8') + str(new_hml_reservation.pk).encode('utf-8')).hexdigest()
contact = PaytrailContact(**new_hml_reservation.get_payment_contact_data())
product = PaytrailProduct(**new_hml_reservation.get_payment_product_data())
url_set = PaytrailUrlset(success_url=url + '?success=' + purchase_code, failure_url=url + '?failure=' + purchase_code, notification_url=url + '?notification=' + purchase_code)
purchase = Purchase.objects.create(hml_reservation=new_hml_reservation,
purchase_code=purchase_code,
reserver_name=new_hml_reservation.reservation.reserver_name,
reserver_email_address=new_hml_reservation.reservation.reserver_email_address,
reserver_phone_number=new_hml_reservation.reservation.reserver_phone_number,
reserver_address_street=new_hml_reservation.reservation.reserver_address_street,
reserver_address_zip=new_hml_reservation.reservation.reserver_address_zip,
reserver_address_city=new_hml_reservation.reservation.reserver_address_city,
vat_percent=product.get_data()['vat'],
price_vat=product.get_data()['price'],
product_name=product.get_data()['title'])
payment = PaytrailPaymentExtended(
service='VARAUS',
product='VENEPAIKKA',
product_type=product.get_data()['berth_type'],
order_number=purchase.pk,
contact=contact,
urlset=url_set
)
payment.add_product(product)
query_string = PaytrailArguments(
merchant_auth_hash=settings.PAYTRAIL_MERCHANT_SECRET,
merchant_id=settings.PAYTRAIL_MERCHANT_ID,
url_success=url + '?success=' + purchase_code,
url_cancel=url + '?failure=' + purchase_code,
url_notify=url + '?notification=' + purchase_code,
order_number=payment.get_data()['orderNumber'],
params_in=(
'MERCHANT_ID,'
'URL_SUCCESS,'
'URL_CANCEL,'
'URL_NOTIFY,'
'ORDER_NUMBER,'
'PARAMS_IN,'
'PARAMS_OUT,'
'PAYMENT_METHODS,'
'ITEM_TITLE[0],'
'ITEM_ID[0],'
'ITEM_QUANTITY[0],'
'ITEM_UNIT_PRICE[0],'
'ITEM_VAT_PERCENT[0],'
'ITEM_DISCOUNT_PERCENT[0],'
'ITEM_TYPE[0],'
'PAYER_PERSON_PHONE,'
'PAYER_PERSON_EMAIL,'
'PAYER_PERSON_FIRSTNAME,'
'PAYER_PERSON_LASTNAME,'
'PAYER_PERSON_ADDR_STREET,'
'PAYER_PERSON_ADDR_POSTAL_CODE,'
'PAYER_PERSON_ADDR_TOWN'
),
params_out='PAYMENT_ID,TIMESTAMP,STATUS',
payment_methods='1,2,3,5,6,10,50,51,52,61',
item_title=product.get_data()['title'],
item_id=product.get_data()['code'],
item_quantity=product.get_data()['amount'],
item_unit_price=product.get_data()['price'],
item_vat_percent=product.get_data()['vat'],
item_discount_percent=product.get_data()['discount'],
item_type=product.get_data()['type'],
payer_person_phone=contact.get_data()['mobile'],
payer_person_email=contact.get_data()['email'],
payer_person_firstname=contact.get_data()['firstName'],
payer_parson_lastname=contact.get_data()['lastName'],
payer_person_addr_street=contact.get_data()['address']['street'],
payer_person_add_postal_code=contact.get_data()['address']['postalCode'],
payer_person_addr_town=contact.get_data()['address']['postalOffice'],
)
return Response({'query_string': query_string.get_data()}, status=status.HTTP_200_OK)
elif request.data.get('reservation_id'):
if not request.user.is_authenticated() or not request.user.is_staff:
raise PermissionDenied(_('This API is only for authenticated users'))
old_hml_reservation_qs = HMLReservation.objects.filter(pk=request.data.get('reservation_id'), reservation__state=Reservation.CONFIRMED, reservation__end__gte=timezone.now()).exclude(child__reservation__state=Reservation.CONFIRMED).distinct()
if len(old_hml_reservation_qs) != 1:
raise ValidationError(_('Invalid reservation id'))
old_hml_reservation = old_hml_reservation_qs.first()
old_reservation = old_hml_reservation.reservation
new_start = old_reservation.end
new_end = new_start + timedelta(days=365)
parent_id = old_hml_reservation.pk
old_reservation.pk = None
old_hml_reservation.pk = None
new_reservation = old_reservation
new_hml_reservation = old_hml_reservation
new_reservation.begin = new_start
new_reservation.end = new_end
overlaps_existing = HMLReservation.objects.filter(reservation__begin__lt=new_end, reservation__end__gt=new_start, berth=new_hml_reservation.berth, reservation__state=Reservation.CONFIRMED).exists()
if overlaps_existing:
raise serializers.ValidationError(_('New reservation overlaps existing reservation'))
if request.data.get('reserver_email_address'):
new_reservation.reserver_email_address = request.data.get('reserver_email_address')
if request.data.get('reserver_phone_number'):
new_reservation.reserver_phone_number = request.data.get('reserver_phone_number')
if request.data.get('reserver_address_street'):
new_reservation.reserver_address_street = request.data.get('reserver_address_street')
if request.data.get('reserver_address_zip'):
new_reservation.reserver_address_zip = request.data.get('reserver_address_zip')
if request.data.get('reserver_address_city'):
new_reservation.reserver_address_city = request.data.get('reserver_address_city')
new_reservation.save()
new_hml_reservation.reservation = new_reservation
new_hml_reservation.parent_id = parent_id
new_hml_reservation.renewal_notification_day_sent_at = None
new_hml_reservation.renewal_notification_week_sent_at = None
new_hml_reservation.renewal_notification_month_sent_at = None
new_hml_reservation.is_paid_at = None
new_hml_reservation.is_paid = False
new_hml_reservation.renewal_code = None
new_hml_reservation.end_notification_sent_at = None
new_hml_reservation.key_return_notification_sent_at = None
new_hml_reservation.save()
serializer = HMLReservationSerializer(new_hml_reservation, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def get(self, request, format=None):
if request.GET.get('code', None):
code = request.GET.get('code', None)
reservation_qs = HMLReservation.objects.filter(reservation__state=Reservation.CONFIRMED, renewal_code=code).exclude(child__reservation__state=Reservation.CONFIRMED).distinct()
if len(reservation_qs) != 1:
raise ValidationError(_('Invalid renewal code'))
reservation = reservation_qs.first()
if reservation.reservation.end < timezone.now():
return Response(None, status=status.HTTP_404_NOT_FOUND)
serializer = HMLReservationSerializer(reservation, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(None, status=status.HTTP_404_NOT_FOUND)
class SmsView(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, format=None):
if request.data.get('AccountSid') != settings.TWILIO_ACCOUNT_SID:
raise PermissionDenied(_('Authentication failed'))
if request.data.get('SmsStatus') == 'delivered':
sms = SMSMessage.objects.get(twilio_id=request.data.get('SmsSid'))
sms.success = True
sms.save()
return Response(status=status.HTTP_204_NO_CONTENT)
register_view(HMLReservationViewSet, 'hml_reservation')
|
"""Tests for command handling"""
from __future__ import annotations
import argparse
from contextlib import contextmanager
import os
import pytest
from sopel import config
from sopel.cli.run import (
build_parser,
get_configuration,
get_pid_filename,
get_running_pid,
)
TMP_CONFIG = """
[core]
owner = testnick
nick = TestBot
enable = coretasks
"""
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
@pytest.fixture
def config_dir(tmpdir):
"""Pytest fixture used to generate a temporary configuration directory"""
test_dir = tmpdir.mkdir("config")
test_dir.join('config.cfg').write('')
test_dir.join('extra.ini').write('')
test_dir.join('module.cfg').write('')
test_dir.join('README').write('')
return test_dir
@pytest.fixture(autouse=True)
def default_empty_config_env(monkeypatch):
"""Pytest fixture used to ensure dev ENV does not bleed into tests"""
monkeypatch.delenv("SOPEL_CONFIG", raising=False)
monkeypatch.delenv("SOPEL_CONFIG_DIR", raising=False)
def test_build_parser_start():
"""Assert parser's namespace exposes start's options (default values)"""
parser = build_parser()
options = parser.parse_args(['start'])
assert isinstance(options, argparse.Namespace)
assert hasattr(options, 'config')
assert hasattr(options, 'configdir')
assert hasattr(options, 'daemonize')
assert hasattr(options, 'quiet')
assert options.config == 'default'
assert options.configdir == config.DEFAULT_HOMEDIR
assert options.daemonize is False
assert options.quiet is False
def test_build_parser_start_config():
parser = build_parser()
options = parser.parse_args(['start', '-c', 'custom'])
assert options.config == 'custom'
options = parser.parse_args(['start', '--config', 'custom'])
assert options.config == 'custom'
def test_build_parser_start_configdir():
parser = build_parser()
options = parser.parse_args(['start', '--config-dir', 'custom'])
assert options.configdir == 'custom'
def test_build_parser_start_daemonize():
parser = build_parser()
options = parser.parse_args(['start', '-d'])
assert options.daemonize is True
options = parser.parse_args(['start', '--fork'])
assert options.daemonize is True
def test_build_parser_start_quiet():
parser = build_parser()
options = parser.parse_args(['start', '--quiet'])
assert options.quiet is True
def test_build_parser_stop():
"""Assert parser's namespace exposes stop's options (default values)"""
parser = build_parser()
options = parser.parse_args(['stop'])
assert isinstance(options, argparse.Namespace)
assert hasattr(options, 'config')
assert hasattr(options, 'configdir')
assert hasattr(options, 'kill')
assert hasattr(options, 'quiet')
assert options.config == 'default'
assert options.configdir == config.DEFAULT_HOMEDIR
assert options.kill is False
assert options.quiet is False
def test_build_parser_stop_config():
parser = build_parser()
options = parser.parse_args(['stop', '-c', 'custom'])
assert options.config == 'custom'
options = parser.parse_args(['stop', '--config', 'custom'])
assert options.config == 'custom'
def test_build_parser_stop_configdir():
parser = build_parser()
options = parser.parse_args(['stop', '--config-dir', 'custom'])
assert options.configdir == 'custom'
def test_build_parser_stop_kill():
parser = build_parser()
options = parser.parse_args(['stop', '-k'])
assert options.kill is True
options = parser.parse_args(['stop', '--kill'])
assert options.kill is True
def test_build_parser_stop_quiet():
parser = build_parser()
options = parser.parse_args(['stop', '--quiet'])
assert options.quiet is True
def test_build_parser_restart():
"""Assert parser's namespace exposes restart's options (default values)"""
parser = build_parser()
options = parser.parse_args(['restart'])
assert isinstance(options, argparse.Namespace)
assert hasattr(options, 'config')
assert hasattr(options, 'configdir')
assert hasattr(options, 'quiet')
assert options.config == 'default'
assert options.configdir == config.DEFAULT_HOMEDIR
assert options.quiet is False
def test_build_parser_restart_config():
parser = build_parser()
options = parser.parse_args(['restart', '-c', 'custom'])
assert options.config == 'custom'
options = parser.parse_args(['restart', '--config', 'custom'])
assert options.config == 'custom'
def test_build_parser_restart_configdir():
parser = build_parser()
options = parser.parse_args(['restart', '--config-dir', 'custom'])
assert options.configdir == 'custom'
def test_build_parser_restart_quiet():
parser = build_parser()
options = parser.parse_args(['restart', '--quiet'])
assert options.quiet is True
def test_build_parser_configure():
"""Assert parser's namespace exposes configure's options (default values)"""
parser = build_parser()
options = parser.parse_args(['configure'])
assert isinstance(options, argparse.Namespace)
assert hasattr(options, 'config')
assert hasattr(options, 'configdir')
assert hasattr(options, 'plugins')
assert options.config == 'default'
assert options.configdir == config.DEFAULT_HOMEDIR
assert options.plugins is False
def test_build_parser_configure_config():
parser = build_parser()
options = parser.parse_args(['configure', '-c', 'custom'])
assert options.config == 'custom'
options = parser.parse_args(['configure', '--config', 'custom'])
assert options.config == 'custom'
def test_build_parser_configure_configdir():
parser = build_parser()
options = parser.parse_args(['configure', '--config-dir', 'custom'])
assert options.configdir == 'custom'
def test_build_parser_configure_modules():
parser = build_parser()
options = parser.parse_args(['configure', '--plugins'])
assert options.plugins is True
def test_get_configuration(tmpdir):
"""Assert function returns a Sopel ``Config`` object"""
working_dir = tmpdir.mkdir("working")
working_dir.join('default.cfg').write('\n'.join([
'[core]',
'owner = TestName'
]))
parser = build_parser()
options = parser.parse_args(['start', '-c', 'default.cfg'])
with cd(working_dir.strpath):
result = get_configuration(options)
assert isinstance(result, config.Config)
assert result.core.owner == 'TestName'
def test_get_pid_filename_default(configfactory):
"""Assert function returns the default filename from given ``pid_dir``"""
pid_dir = '/pid'
settings = configfactory('default.cfg', TMP_CONFIG)
result = get_pid_filename(settings, pid_dir)
assert result == pid_dir + '/sopel.pid'
def test_get_pid_filename_named(configfactory):
"""Assert function returns a specific filename when config (with extension) is set"""
pid_dir = '/pid'
settings = configfactory('test.cfg', TMP_CONFIG)
result = get_pid_filename(settings, pid_dir)
assert result == pid_dir + '/sopel-test.pid'
def test_get_pid_filename_ext_not_cfg(configfactory):
"""Assert function keeps the config file extension when it is not cfg"""
pid_dir = '/pid'
settings = configfactory('test.ini', TMP_CONFIG)
result = get_pid_filename(settings, pid_dir)
assert result == pid_dir + '/sopel-test.ini.pid'
def test_get_running_pid(tmpdir):
"""Assert function retrieves an integer from a given filename"""
pid_file = tmpdir.join('sopel.pid')
pid_file.write('7814')
result = get_running_pid(pid_file.strpath)
assert result == 7814
def test_get_running_pid_not_integer(tmpdir):
"""Assert function returns None when the content is not an Integer"""
pid_file = tmpdir.join('sopel.pid')
pid_file.write('')
result = get_running_pid(pid_file.strpath)
assert result is None
pid_file.write('abcdefg')
result = get_running_pid(pid_file.strpath)
assert result is None
def test_get_running_pid_no_file(tmpdir):
"""Assert function returns None when there is no such file"""
pid_file = tmpdir.join('sopel.pid')
result = get_running_pid(pid_file.strpath)
assert result is None
|
<filename>python_indent.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME> <<EMAIL>>
# This file is released under the terms of the MIT license.
""" Python PEP8 indent
Sublime text plugin for automatic PEP8-style indent.
"""
import re
import traceback
from itertools import izip
try:
import sublime
import sublime_plugin
except ImportError:
# hack for import outside of sublime_text, for testing purposes
class FakeSublime(object):
def Region(self, a, b):
return tuple((a, b))
def load_settings(self, *args, **kwargs):
return None
sublime = FakeSublime()
sublime_plugin = type('sublime_plugin', (), {'EventListener': object})
sublime_plugin.TextCommand = object
MAX_LINE_LOOKUP_COUNT = 1000
else:
# maximum number of previous lines to lookup
settings = sublime.load_settings('python_indent.sublime-settings')
MAX_LINE_LOOKUP_COUNT = settings.get("max_line_lookup_count", 1000)
## new line indent
reverse_enumerate = lambda l: izip(reversed(xrange(len(l))), reversed(l))
newblock_start_pattern = re.compile(
r'^\s*(class|def|elif|else|except|finally|for|if|try|with|while)\b.*$')
escape_pattern = r'(?<!(%s\\))'
string_regex = re.compile(r"""(?P<quote>["'])(?P<str>.*?)(%s)(?P=quote)""" % (
escape_pattern % escape_pattern % escape_pattern % escape_pattern % ''))
comment_regex = re.compile(r"([^#]*)(#(.*))?")
stopexecution_pattern = re.compile(
r'^\s*(pass\b|return\b.*|continue\b|break\b|raise\b.*|yield\b.*)\s*$')
blankline_pattern = re.compile(r'^\s*$')
def _replace_reserved_char(s):
"""Return the given string with all its brackets and # replaced by _."""
for c in '()[]{}#':
s = s.replace(c, '_')
return s
def _filter_quote(m):
return ''.join((m.group('quote'),
_replace_reserved_char(m.group('str')),
m.group('quote')
))
def line_filter(s):
"""Remove brackets from literal string in lines. Remove comments."""
s = string_regex.sub(_filter_quote, s)
s = comment_regex.search(s).group(1)
return s
def get_line_current_indent(string, tab_size=4):
"""Return the index of the first non-blank character of the given string.
If there is no non-blank characters, return the length of the string.
argument
--------
tab_size: tabs are count as 'tab_size' spaces (default 4).
"""
indent = 0
for c in string:
if c == ' ':
indent += 1
elif c == '\t':
indent += tab_size
else:
break
return indent
def unmatched_bracket_lookup(s, in_counter=None):
"""Look for an unmatched bracket ((), [] and {}) in the given string.
Arguments
---------
s: string to analyze
in_counter: dict containing the count of brackets from a following line
Return
------
('balanced', None) if there is no unbalanced bracket.
('unmatch_close', counter_dict) if the string contain an unmatched closing
bracket. counter_dict is the dict containing the count of brackets
('unmatch_open', index) if the string contains an unmatched opening bracket
index is the last unmatched opening bracket index (int)
If there is both opening and closing unmatched brackets, the last one takes
the priority.
"""
matching = {')': '(', '}': '{', ']': '['}
counter = {'(': 0, '{': 0, '[': 0}
if in_counter:
counter.update(in_counter)
for i, c in reverse_enumerate(s):
if c in '({[':
if counter[c] is 0:
return ('unmatch_open', i)
else:
counter[c] -= 1
if c in ')}]':
counter[matching[c]] += 1
if counter.values() == [0, 0, 0]:
return ('balanced', None)
else:
return ('unmatch_close', counter)
def string_to_next_line_indent(line, brackets_counter=None):
"""Return the indentation for a line following the given string
If the given line is not enough to determine the indentation (unmatched
closing brackets), can be call again, with the brackets_counter
returned as argument.
Argument
--------
line: current line (string).
brackets_counter: dict containing a brackets count (as returned by the
function).
Return
------
('increase_level', i) if the indentation level should be increased. i is
the number of level increase needed.
('decrease_level', i) if the indentation level should be decreased. i is
the number of level decrease needed
('absolute', i) to indicate an absolute level of indentation. i is the
number of spaces to insert before the line.
('unchanged', 0) if the indentation level should not be changed
('unmatched_closing_bracket', brackets_counter) if there is unmatched
closing brackets. The function must be called again with the previous
line.
"""
status, param = unmatched_bracket_lookup(line, brackets_counter)
if status == 'balanced':
if newblock_start_pattern.match(line):
return ('increase_level', 1)
elif stopexecution_pattern.match(line):
return ('decrease_level', 1)
else:
return ('unchanged', None)
elif status == 'unmatch_open':
if param is len(line)-1:
if newblock_start_pattern.match(line):
return ('increase_level', 2)
else:
return ('increase_level', 1)
else:
return ('absolute', param+1)
elif status == 'unmatch_close':
return 'unmatched_closing_bracket', param
else:
print "unexcepted return value from unmatched_bracket_lookup"
def get_new_line_indent(view, cursor):
"""Return the proper indentation of a new line inserted at the cursor.
Arguments
---------
view: sublime.View
cursor: sublime text's cursor (int)
Return
------
Number of spaces to insert before a new line inserted at the cursor.
"""
tab_size = view.settings().get('tab_size')
start_line = view.line(cursor).begin()
line = line_filter(view.substr(sublime.Region(start_line, cursor)))
new_indent, indent_param = string_to_next_line_indent(line)
line_lookup_count = MAX_LINE_LOOKUP_COUNT
while new_indent == 'unmatched_closing_bracket' and line_lookup_count:
line_lookup_count -= 1
if start_line is 0 or not line_lookup_count:
new_indent = 'error'
indent_param = None
break
line = view.line(start_line-1)
start_line = line.begin()
line = line_filter(view.substr(line))
if blankline_pattern.match(line):
continue
new_indent, indent_param = string_to_next_line_indent(
line, indent_param)
current_indent = get_line_current_indent(
line, view.settings().get('tab_size'))
if new_indent == 'absolute':
return indent_param
elif new_indent == 'unchanged':
return current_indent
elif new_indent == 'increase_level':
return current_indent + tab_size * indent_param
elif new_indent == 'decrease_level':
return max(0, current_indent - tab_size * indent_param)
else:
return 0
class NewPythonLine(sublime_plugin.TextCommand):
"""Insert a properly indented python line.
This command should be mapped to any key or shortcut used to add a
new line.
"""
def run(self, edit, register='', full_line=False, forward=True):
try:
new_sel = []
for region in self.view.sel():
# set the insert point
if full_line:
if forward:
cursor = self.view.line(region).end()
else:
cursor = self.view.line(region).begin() - 1
else:
cursor = region.begin()
indent = get_new_line_indent(self.view, cursor)
if self.view.line_endings() == 'Windows':
new_line_char = '\r\n'
elif self.view.line_endings() == 'CR':
new_line_char = '\r'
else:
new_line_char = '\n' # Linux is default
row, col = self.view.rowcol(cursor)
if not full_line:
to_replace = sublime.Region(
cursor, self.view.line(region).end())
if region.empty():
new_line_content = \
self.view.substr(to_replace).lstrip()
else:
new_line_content = self.view.substr(sublime.Region(
region.end(), to_replace.end())).lstrip()
self.view.replace(
edit, to_replace,
new_line_char + ' '*indent + new_line_content)
cursor += indent + 1
new_sel.append(sublime.Region(cursor, cursor))
else:
self.view.insert(edit, cursor, new_line_char + ' '*indent)
cursor += indent + 1
new_sel.append(sublime.Region(cursor, cursor))
if new_sel:
self.view.sel().clear()
for region in new_sel:
self.view.sel().add(region)
except:
# fail safe
print traceback.format_exc()
if self.view.line_endings() == 'Windows':
new_line_char = '\r\n'
elif self.view.line_endings() == 'CR':
new_line_char = '\r'
else:
new_line_char = '\n' # Linux is default
for sel in self.view.sel():
self.view.insert(edit, sel.end(), new_line_char)
## deindent on keywords
def previous_keyword_lookup(view, cursor, keywords, ignore):
"""Search for a previous keyword.
Arguments
---------
view: sublime.View
cursor: current sublime text cursor (int)
keywords: list of keywords to search
ignore: list of keywords to ignore
Return
------
Indentation of the line with the searched keyword.
If it is not found, return -1.
"""
if isinstance(keywords, basestring):
keywords = [keywords]
tab_size = view.settings().get('tab_size')
line_lookup_count = MAX_LINE_LOOKUP_COUNT
kw_regex = re.compile(r'^\s*(%s)\b' % '|'.join(keywords))
ignore_regex = re.compile(r'^\s*(%s)\b' % '|'.join(ignore))
line = view.line(cursor)
start_line = line.begin()
max_indent = get_line_current_indent(view.substr(line), tab_size)
while line_lookup_count:
if start_line is 0:
return -1
line_lookup_count -= 1
line = view.line(start_line-1)
start_line = line.begin()
str_line = view.substr(line)
indent = get_line_current_indent(str_line, tab_size)
if kw_regex.match(str_line):
if indent <= max_indent:
return indent
elif not ignore_regex.match(str_line):
max_indent = min(indent - tab_size, max_indent)
if max_indent < 0:
return -1
else:
print "max line lookup reach"
return -1
indent_regex = re.compile(r'^\s*')
else_pattern = re.compile(r'^\s*else\s*:')
finally_pattern = re.compile(r'^\s*finally\s*:')
except_pattern = re.compile(r'^\s*except\b')
elif_pattern = re.compile(r'^\s*elif\b')
class PythonDeindenter(sublime_plugin.EventListener):
"""Auto-deindentation on appropriated keywords."""
def change_indent(self, str, new_indent):
return indent_regex.sub(' '*new_indent, str, count=1)
def on_modified(self, view):
cmd, param, count = view.command_history(0, False)
if cmd != 'insert' or param['characters'][-1] not in ': ':
return
sel = view.sel()[0] # XXX multi selection
begin_line = view.substr(sublime.Region(view.line(sel).begin(),
sel.end()))
# new_sel = []
if sel.empty():
pattern = ''
if else_pattern.match(begin_line):
if param['characters'].endswith(':'):
pattern = 'else'
align_with = ['if', 'except']
ignore = ['elif']
elif finally_pattern.match(begin_line):
if param['characters'].endswith(':'):
pattern = 'finally'
align_with = ['try']
ignore = ['except', 'else']
elif except_pattern.match(begin_line):
if (param['characters'].endswith(' ')
or param['characters'].endswith(':')):
pattern = 'except'
align_with = ['try']
ignore = ['except']
elif elif_pattern.match(begin_line):
if param['characters'].endswith(' '):
pattern = 'elif'
align_with = ['if']
ignore = ['elif']
if pattern:
str_line = view.substr(view.line(sel))
indent = previous_keyword_lookup(view, sel.end(), align_with,
ignore)
if indent is not -1:
edit = view.begin_edit()
try:
new_line = self.change_indent(str_line, indent)
view.replace(edit, view.line(sel), new_line)
finally:
view.end_edit(edit)
|
<filename>lib/models/transformer_encoding.py
import copy
from turtle import forward
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from torch import Tensor
from .position_encoding import build_position_encoding, _onnx_nested_tensor_from_tensor_list
from .misc import NestedTensor
from einops import rearrange
class Reasoning(nn.Module):
def __init__(self, d_model=256, nhead=4, num_encoder_layers=1,
dim_feedforward=2048, dropout=0.1, num_decoder_layers=6,
activation="relu", normalize_before=False,
return_intermediate_dec=False):
super(Reasoning, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.transformer_encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
def forward(self, feature):
src = feature.flatten(2).permute(2, 0, 1) # flatten NxCxHxW to HWxNxC
c = list(feature.shape)[1]
h = list(feature.shape)[2]
positison_embedding = build_position_encoding(c)
nested_tensor = _onnx_nested_tensor_from_tensor_list([feature])
pos_embed =positison_embedding(nested_tensor).flatten(2).permute(2, 0, 1) # flatten NxCxHxW to HWxNxC
memory = self.transformer_encoder(src, pos=pos_embed)
# print('-------------------')
# print(memory.shape)
reasoning_out = rearrange(memory, '(h w) n c -> n c h w', h=h)
# print(memory.shape)
return reasoning_out
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers=1, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
<filename>test/test_bleu.py
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import namedtuple
import pytest
import sacrebleu
from sacrebleu.metrics import BLEU
EPSILON = 1e-8
Statistics = namedtuple('Statistics', ['common', 'total'])
test_raw_bleu_cases = [
# This now returns 0.0 score (#141)
(["this is a test", "another test"], [["ref1", "ref2"]], 0.0),
(["this is a test"], [["this is a test"]], 1.0),
(["this is a fest"], [["this is a test"]], 0.223606797749979)]
# test for README example with empty hypothesis strings check
_refs = [
['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'],
['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'],
]
_hyps = [
'The dog bit the man.',
"It wasn't surprising.",
'The man had just bitten him.',
]
test_corpus_bleu_cases = [
(_hyps, _refs, {}, 48.530827), # test for default BLEU settings
(('', '', ''), _refs, {}, 0.0), # ensure that empty hypotheses are not removed
(_hyps, _refs, {'tokenize': 'none'}, 49.1919566),
(_hyps, _refs, {'tokenize': '13a'}, 48.530827),
(_hyps, _refs, {'tokenize': 'intl'}, 43.91623493),
(_hyps, _refs, {'smooth_method': 'none'}, 48.530827),
]
test_case_offset = [(["am I am a character sequence"], [["I am a symbol string sequence a a"]], 0.1555722182, 0)]
# statistic structure:
# - common counts
# - total counts
# - hyp_count
# - ref_count
test_case_statistics = [(["am I am a character sequence"], [["I am a symbol string sequence a a"]],
Statistics([4, 2, 1, 0], [6, 5, 4, 3]))]
test_case_scoring = [((Statistics([9, 7, 5, 3], [10, 8, 6, 4]), 11, 11), 0.8375922397)]
test_case_effective_order = [(["test"], [["a test"]], 0.3678794411714425),
(["a test"], [["a test"]], 1.0),
(["a little test"], [["a test"]], 0.03218297948685433)]
# testing that right score is returned for null statistics and different offsets
# format: stat, offset, expected score
test_case_degenerate_stats = [((Statistics([0, 0, 0, 0], [4, 4, 2, 1]), 0, 1), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [10, 11, 12, 0]), 14, 10), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 0, 0), 0.0, 0.0),
((Statistics([6, 5, 4, 0], [6, 5, 4, 3]), 6, 6), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 0, 0), 0.1, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 1, 5), 0.01, 0.0)]
@pytest.mark.parametrize("hypotheses, references, expected_bleu", test_raw_bleu_cases)
def test_raw_bleu(hypotheses, references, expected_bleu):
bleu = sacrebleu.raw_corpus_bleu(hypotheses, references, .01).score / 100
assert abs(bleu - expected_bleu) < EPSILON
@pytest.mark.parametrize("hypotheses, references, kwargs, expected_bleu", test_corpus_bleu_cases)
def test_corpus_bleu(hypotheses, references, kwargs, expected_bleu):
bleu = sacrebleu.corpus_bleu(hypotheses, references, **kwargs).score
assert abs(bleu - expected_bleu) < EPSILON
@pytest.mark.parametrize("hypotheses, references, expected_bleu", test_case_effective_order)
def test_effective_order(hypotheses, references, expected_bleu):
bleu = sacrebleu.raw_corpus_bleu(hypotheses, references, .01).score / 100
assert abs(bleu - expected_bleu) < EPSILON
@pytest.mark.parametrize("hypothesis, reference, expected_stat", test_case_statistics)
def test_statistics(hypothesis, reference, expected_stat):
result = sacrebleu.raw_corpus_bleu(hypothesis, reference, .01)
stat = Statistics(result.counts, result.totals)
assert stat == expected_stat
@pytest.mark.parametrize("statistics, expected_score", test_case_scoring)
def test_scoring(statistics, expected_score):
score = BLEU.compute_bleu(statistics[0].common, statistics[0].total, statistics[1], statistics[2]).score / 100
assert abs(score - expected_score) < EPSILON
@pytest.mark.parametrize("hypothesis, reference, expected_with_offset, expected_without_offset",
test_case_offset)
def test_offset(hypothesis, reference, expected_with_offset, expected_without_offset):
score_without_offset = sacrebleu.raw_corpus_bleu(hypothesis, reference, 0.0).score / 100
assert abs(expected_without_offset - score_without_offset) < EPSILON
# let it use BLEU's internal default of 0.1 through passing `None`
score_with_offset = sacrebleu.raw_corpus_bleu(hypothesis, reference, None).score / 100
assert abs(expected_with_offset - score_with_offset) < EPSILON
# let it use BLEU's internal default of 0.1
score_with_offset = sacrebleu.raw_corpus_bleu(hypothesis, reference).score / 100
assert abs(expected_with_offset - score_with_offset) < EPSILON
@pytest.mark.parametrize("statistics, offset, expected_score", test_case_degenerate_stats)
def test_degenerate_statistics(statistics, offset, expected_score):
score = BLEU.compute_bleu(
statistics[0].common,
statistics[0].total,
statistics[1],
statistics[2],
smooth_method='floor', smooth_value=offset).score / 100
assert score == expected_score
|
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow, QTableWidget
from PyQt5.uic import loadUi
from src.Users.View.UserCardView import UserCardView
from src.Users.controllers.UserManager import UserManager
from src.Utils.UI import Popup, DeletePopup, EnableDisableUserPopup
class UserView(QMainWindow):
userM = UserManager()
def __init__(self, widget):
super(UserView, self).__init__()
loadUi("../designer/Users/UserView.ui", self)
self.users = self.userM.list()
self.widget = widget
self.pop = ''
self.load_data()
self.setup()
def setup(self):
# Funzionalità dei Bottoni
self.userButton.clicked.connect(self.__go_new_user)
self.backButton.clicked.connect(self.close)
self.schedaButton.clicked.connect(self.__go_user_card)
self.disableButton.clicked.connect(self.enable_disable)
self.enableButton.clicked.connect(self.enable_disable)
self.deleteButton.clicked.connect(self.delete)
self.disableButton.setVisible(True)
self.enableButton.setVisible(False)
#enableButton
# Ricerca Dinamica
self.nameField.textChanged.connect(lambda: self.search())
self.surnameField.textChanged.connect(lambda: self.search())
self.style()
#checkbox
self.checkBoxUserDisabled.stateChanged.connect(lambda: self.search())
def style(self):
self.schedaButton.setStyleSheet(open("../designer/style/ButtonTheme.txt", "r").read())
"""
Questo metodo setta lo stile della view
:return:
"""
self.userButton.setStyleSheet(open("../designer/style/ButtonTheme.txt", "r").read())
self.backButton.setStyleSheet(open("../designer/style/ButtonTheme.txt", "r").read())
self.userTable.setStyleSheet(open("../designer/style/TableTheme.txt", "r").read())
self.nameField.setStyleSheet(open("../designer/style/TextBoxTheme.txt", "r").read())
self.surnameField.setStyleSheet(open("../designer/style/TextBoxTheme.txt", "r").read())
def load_table(self, users):
"""
Questo metodo permette di rimpire la QTableWidget presente nella view con una lista di utenti
:param users:
:return: None
"""
row = 0
self.userTable.setRowCount(len(users))
for user in self.users:
self.userTable.setItem(row, 0, QtWidgets.QTableWidgetItem(user.name))
self.userTable.setItem(row, 1, QtWidgets.QTableWidgetItem(user.surname))
self.userTable.setItem(row, 2, QtWidgets.QTableWidgetItem(user.fiscal_code))
self.userTable.setItem(row, 3, QtWidgets.QTableWidgetItem(user.city))
row = row + 1
self.userTable.setEditTriggers(QTableWidget.NoEditTriggers)
def load_data(self, users=None):
if users is None:
self.users = self.userM.list(self.checkBoxUserDisabled.isChecked())
self.load_table(self.users)
else:
self.load_table(users)
# Region 'User Operation'
def search(self):
self.disableButton.setVisible(not self.checkBoxUserDisabled.isChecked())
self.enableButton.setVisible(self.checkBoxUserDisabled.isChecked())
if (self.nameField.text() == '') and (self.surnameField.text() == ''):
self.load_data()
# Search User by name
elif (self.nameField.text() != '') and (self.surnameField.text() == ''):
self.load_data_research(self.userM.findName(self.nameField.text(), self.checkBoxUserDisabled.isChecked()))
# Search User by surname
elif (self.nameField.text() == '') and (self.surnameField.text() != ''):
self.load_data_research(self.userM.findSurname(self.surnameField.text(), self.checkBoxUserDisabled.isChecked()))
# Search User by both
elif (self.nameField.text() != '') and (self.surnameField.text() != ''):
self.load_data_research(self.userM.findNameSurname(self.nameField.text(), self.surnameField.text(), self.checkBoxUserDisabled.isChecked()))
def delete(self):
rowtable = self.userTable.currentRow()
if rowtable == -1:
self.show_popup()
else:
self.pop = DeletePopup(self.delete_user)
self.pop.show()
def enable_disable(self):
rowtable = self.userTable.currentRow()
if rowtable == -1:
self.show_popup()
else:
if self.checkBoxUserDisabled.isChecked():
text = "Sicuro di voler abilitare l' utente?"
else:
text = "Sicuro di voler disabilitare l' utente?"
self.pop = EnableDisableUserPopup(self.enable_disable_user, text)
self.pop.show()
# endregion
def load_data_research(self, users):
"""
Questo metodo riempe la tabella con quegli utenti che sono il risultato della ricerca
:param users:
:return: None
"""
self.users = users
self.load_table(self.users)
def delete_user(self):
"""
Questo metodo permette di rimuovere l'utente selezionato dal sistema
:return: None
"""
row = self.userTable.currentRow()
if self.userM.count_movement_with_the_same_user_id(self.users[row].id) == 0:
self.userM.delete(self.users[row].id)
self.users.remove(self.users[row])
self.userTable.removeRow(row)
else:
self.pop = Popup("Utente già presente in almeno 1 movimento.")
self.pop.show()
def enable_disable_user(self):
"""
Questo metodo permette di disabilitare l'utente selezionato dal sistema
:return: None
"""
row = self.userTable.currentRow()
#self.userM.delete(self.users[row].id)
self.users[row].disabled = not self.checkBoxUserDisabled.isChecked()
self.userM.set(self.users[row])
self.users.remove(self.users[row])
self.userTable.removeRow(row)
def show_popup(self):
self.pop = Popup("Selezionare un utente")
self.pop.show()
# Region 'View Links'
def __go_new_user(self):
"""
Questo metodo consente di andare nella view che permette di creare un nuovo utente
:return: None
"""
user = None
self.view = UserCardView(self.widget, user, self.load_data)
self.view.show()
def __go_user_card(self):
"""
Questo metodo consente di andare nella view che permette di visualizzare le informazioni
dell'utente selezionato a schermo
:return: None
"""
rowtable = self.userTable.currentRow()
if rowtable == -1:
self.show_popup()
else:
user = self.users[rowtable]
self.view = UserCardView(self.widget, user, self.load_data)
self.view.show()
# endregion
|
<reponame>DeepRank/DeepRank_VariantPred<gh_stars>0
import os
from tempfile import mkdtemp, mkstemp
from shutil import rmtree
import h5py
import numpy
import torch.optim as optim
from nose.tools import ok_
from deeprank.models.variant import PdbVariantSelection
from deeprank.generate.DataGenerator import DataGenerator
from deeprank.learn.DataSet import DataSet
from deeprank.learn.NeuralNet import NeuralNet
from deeprank.learn.model3d import cnn_reg
from deeprank.domain.amino_acid import valine, cysteine, serine
import deeprank.config
deeprank.config.DEBUG = True
def test_learn():
""" This test will simply run deeprank's learning code. It doesn't
test any particular feature or target classes.
The result of deeprank's learning is not verified. This test
only runs the code to be sure there are no exceptions thrown.
"""
feature_modules = ["test.feature.feature1", "test.feature.feature2"]
target_modules = ["test.target.target1"]
atomic_densities = {'C': 1.7, 'N': 1.55, 'O': 1.52, 'S': 1.8}
grid_info = {
'number_of_points': [30,30,30],
'resolution': [1.,1.,1.],
'atomic_densities': atomic_densities,
}
variants = [PdbVariantSelection("test/101m.pdb", "A", 10, valine, cysteine, {"A": "test/101M.A.pdb.pssm"},
protein_accession="P02144", protein_residue_number=10),
PdbVariantSelection("test/data/pdb/5EYU/5EYU.pdb", "A", 8, serine, cysteine, {"A": "test/data/pssm/5EYU/5eyu.A.pdb.pssm",
"B": "test/data/pssm/5EYU/5eyu.B.pdb.pssm",
"C": "test/data/pssm/5EYU/5eyu.C.pdb.pssm",
"D": "test/data/pssm/5EYU/5eyu.D.pdb.pssm"},
protein_accession="Q9L4P8")]
work_dir_path = mkdtemp()
try:
hdf5_path = os.path.join(work_dir_path, "test.hdf5")
# data_augmentation has been set to a high number, so that
# the train, valid and test set can be large enough.
data_generator = DataGenerator(variants, data_augmentation=25,
compute_targets=target_modules,
compute_features=feature_modules,
hdf5=hdf5_path)
data_generator.create_database()
data_generator.map_features(grid_info)
dataset = DataSet(hdf5_path, grid_info=grid_info,
select_feature='all',
select_target='target1',
normalize_features=False)
ok_(len(dataset) > 0)
ok_(dataset[0] is not None)
net_output_dir_path = os.path.join(work_dir_path, 'net-output')
neural_net = NeuralNet(dataset, cnn_reg, model_type='3d',task='reg',
cuda=False, plot=True, outdir=net_output_dir_path)
neural_net.optimizer = optim.SGD(neural_net.net.parameters(),
lr=0.001,
momentum=0.9,
weight_decay=0.005)
epoch_data_path = "epoch_data.hdf5"
neural_net.train(nepoch = 50, divide_trainset=0.8, train_batch_size = 5, num_workers=0, hdf5=epoch_data_path)
# Check the contents of the variant data output
with h5py.File(os.path.join(work_dir_path, "net-output", epoch_data_path), 'r') as f5:
variant_data = f5['epoch_0000/train/variant'][()]
assert len(variant_data.shape) == 2, "unexpected variant data shape: {}".format(variant_data.shape)
assert variant_data.shape[1] == 7, "unexpected variant data row format: {}".format(variant_data[0, :])
assert variant_data[0, 0].decode().lower().endswith(".pdb"), "unexpected structure {}".format(variant_data[0, 0])
finally:
rmtree(work_dir_path)
def test_plot_mcc():
plot_file, plot_path = mkstemp(prefix="plot-mcc", suffix=".png")
os.close(plot_file)
try:
with h5py.File("test/data/epoch_data.hdf5", "r") as f5:
NeuralNet.plot_mcc(f5, plot_path)
finally:
if os.path.isfile(plot_path):
os.remove(plot_path)
|
import streamlit as st
st.set_page_config(layout="wide")
import argparse
import numpy as np
import torch
from additional_utils.models import LSeg_MultiEvalModule
from modules.lseg_module import LSegModule
from PIL import Image
import matplotlib.pyplot as plt
from encoding.models.sseg import BaseNet
import matplotlib.patches as mpatches
import torchvision.transforms as transforms
def get_new_pallete(num_cls):
n = num_cls
pallete = [0] * (n * 3)
for j in range(0, n):
lab = j
pallete[j * 3 + 0] = 0
pallete[j * 3 + 1] = 0
pallete[j * 3 + 2] = 0
i = 0
while (lab > 0):
pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i = i + 1
lab >>= 3
return pallete
def get_new_mask_pallete(npimg, new_palette, out_label_flag=False, labels=None):
"""Get image color pallete for visualizing masks"""
# put colormap
out_img = Image.fromarray(npimg.squeeze().astype('uint8'))
print('out_img shape:', out_img)
out_img.putpalette(new_palette)
if out_label_flag:
assert labels is not None
u_index = np.unique(npimg)
print('u_index:', u_index)
patches = []
for i, index in enumerate(u_index):
label = labels[index]
cur_color = [new_palette[index * 3] / 255.0, new_palette[index * 3 + 1] / 255.0,
new_palette[index * 3 + 2] / 255.0]
red_patch = mpatches.Patch(color=cur_color, label=label)
patches.append(red_patch)
return out_img, patches
@st.cache(allow_output_mutation=True)
def load_model():
class Options:
def __init__(self):
parser = argparse.ArgumentParser(description="PyTorch Segmentation")
# model and dataset
parser.add_argument(
"--model", type=str, default="encnet", help="model name (default: encnet)"
)
parser.add_argument(
"--backbone",
type=str,
default="clip_vitl16_384",
help="backbone name (default: resnet50)",
)
parser.add_argument(
"--dataset",
type=str,
default="vizwiz",
help="dataset name (default: pascal12)",
)
parser.add_argument(
"--workers", type=int, default=16, metavar="N", help="dataloader threads"
)
parser.add_argument(
"--base-size", type=int, default=520, help="base image size"
)
parser.add_argument(
"--crop-size", type=int, default=480, help="crop image size"
)
parser.add_argument(
"--train-split",
type=str,
default="train",
help="dataset train split (default: train)",
)
parser.add_argument(
"--aux", action="store_true", default=False, help="Auxilary Loss"
)
parser.add_argument(
"--se-loss",
action="store_true",
default=False,
help="Semantic Encoding Loss SE-loss",
)
parser.add_argument(
"--se-weight", type=float, default=0.2, help="SE-loss weight (default: 0.2)"
)
parser.add_argument(
"--batch-size",
type=int,
default=16,
metavar="N",
help="input batch size for \
training (default: auto)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=16,
metavar="N",
help="input batch size for \
testing (default: same as batch size)",
)
# cuda, seed and logging
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
# checking point
parser.add_argument(
"--weights", type=str, default='', help="checkpoint to test"
)
# evaluation option
parser.add_argument(
"--eval", action="store_true", default=False, help="evaluating mIoU"
)
parser.add_argument(
"--export",
type=str,
default=None,
help="put the path to resuming file if needed",
)
parser.add_argument(
"--acc-bn",
action="store_true",
default=False,
help="Re-accumulate BN statistics",
)
parser.add_argument(
"--test-val",
action="store_true",
default=False,
help="generate masks on val set",
)
parser.add_argument(
"--no-val",
action="store_true",
default=False,
help="skip validation during training",
)
parser.add_argument(
"--module",
default='lseg',
help="select model definition",
)
# test option
parser.add_argument(
"--data-path", type=str, default='../datasets/', help="path to test image folder"
)
parser.add_argument(
"--no-scaleinv",
dest="scale_inv",
default=True,
action="store_false",
help="turn off scaleinv layers",
)
parser.add_argument(
"--widehead", default=False, action="store_true", help="wider output head"
)
parser.add_argument(
"--widehead_hr",
default=False,
action="store_true",
help="wider output head",
)
parser.add_argument(
"--ignore_index",
type=int,
default=-1,
help="numeric value of ignore label in gt",
)
parser.add_argument(
"--label_src",
type=str,
default="default",
help="how to get the labels",
)
parser.add_argument(
"--arch_option",
type=int,
default=0,
help="which kind of architecture to be used",
)
parser.add_argument(
"--block_depth",
type=int,
default=0,
help="how many blocks should be used",
)
parser.add_argument(
"--activation",
choices=['lrelu', 'tanh'],
default="lrelu",
help="use which activation to activate the block",
)
self.parser = parser
def parse(self):
args = self.parser.parse_args(args=[])
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args)
return args
args = Options().parse()
print('args:', args)
torch.manual_seed(args.seed)
args.test_batch_size = 1
alpha = 0.5
args.scale_inv = False
args.widehead = True
args.dataset = 'vizwiz'
args.backbone = 'clip_vitl16_384'
args.weights = 'checkpoints/result-epoch=10-val_acc_epoch=0.83.ckpt'
args.ignore_index = 255
print('dataset:', args.dataset)
print('weight path:', args.weights)
module = LSegModule.load_from_checkpoint(
checkpoint_path=args.weights,
data_path=args.data_path,
dataset=args.dataset,
backbone=args.backbone,
aux=args.aux,
num_features=256,
aux_weight=0,
se_loss=False,
se_weight=0,
base_lr=0,
batch_size=1,
max_epochs=0,
ignore_index=args.ignore_index,
dropout=0.0,
scale_inv=args.scale_inv,
augment=False,
no_batchnorm=False,
widehead=args.widehead,
widehead_hr=args.widehead_hr,
map_locatin="cpu",
arch_option=0,
block_depth=0,
activation='lrelu',
)
input_transform = module.val_transform
# dataloader
loader_kwargs = (
{"num_workers": args.workers, "pin_memory": True} if args.cuda else {}
)
# model
if isinstance(module.net, BaseNet):
model = module.net
else:
model = module
model = model.eval()
model = model.cpu()
scales = (
[0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25]
if args.dataset == "citys"
else [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
)
model.mean = [0.5, 0.5, 0.5]
model.std = [0.5, 0.5, 0.5]
# evaluator = LSeg_MultiEvalModule(
# model, scales=scales, flip=True
# ).cuda()
evaluator = LSeg_MultiEvalModule(
model, scales=scales, flip=True
).cpu()
evaluator.eval()
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
transforms.Resize([512, 512]),
]
)
return evaluator, transform
"""
# LSeg Demo
"""
lseg_model, lseg_transform = load_model()
uploaded_file = st.file_uploader("Choose an image...")
input_labels = st.text_input("Input labels", value="dog, grass, other")
st.write("The labels are", input_labels)
if uploaded_file is not None:
image = Image.open(uploaded_file)
pimage = lseg_transform(np.array(image)).unsqueeze(0)
labels = []
for label in input_labels.split(","):
labels.append(label.strip())
print('labels:', labels)
with torch.no_grad():
# outputs = lseg_model.parallel_forward(pimage, labels)
outputs = lseg_model.forward(pimage, labels)
print('output shape:', np.array(outputs).shape) # [bs=1, 3, h, w]
predicts = [
torch.max(output, 0)[1].cpu().numpy()
for output in outputs
]
# predicts = torch.max(outputs, 1).cpu().numpy()
print('predict shape:', np.array(predicts).shape)
image = pimage[0].permute(1, 2, 0)
image = image * 0.5 + 0.5
image = Image.fromarray(np.uint8(255 * image)).convert("RGBA")
pred = predicts[0]
print('pred shape:', np.array(pred).shape)
new_palette = get_new_pallete(len(labels))
mask, patches = get_new_mask_pallete(pred, new_palette, out_label_flag=True, labels=labels)
seg = mask.convert("RGBA")
fig = plt.figure()
plt.subplot(121)
plt.imshow(image)
plt.axis('off')
plt.subplot(122)
plt.imshow(seg)
plt.legend(handles=patches, loc='upper right', bbox_to_anchor=(1.3, 1), prop={'size': 5})
plt.axis('off')
plt.tight_layout()
# st.image([image,seg], width=700, caption=["Input image", "Segmentation"])
st.pyplot(fig)
|
<reponame>weihaosky/SMVmatching
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_ssim
import time
import IPython, cv2
SSIM_WIN = 5
class WrappedModel(nn.Module):
def __init__(self, module):
super(WrappedModel, self).__init__()
self.module = module # that I actually define.
def forward(self, x):
return self.module(x)
def gradient_xy(img):
gx = img[:, :, :, :-1] - img[:, :, :, 1:]
gy = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx, gy
def warp_disp(x, disp, args):
# result + flow(-disp) = x
# warp back to result
N, _, H, W = x.shape
x_ = torch.arange(W).view(1, -1).expand(H, -1)
y_ = torch.arange(H).view(-1, 1).expand(-1, W)
grid = torch.stack([x_, y_], dim=0).float()
if args.cuda:
grid = grid.cuda()
grid = grid.unsqueeze(0).expand(N, -1, -1, -1)
grid[:, 0, :, :] = 2 * grid[:, 0, :, :] / (W - 1) - 1
grid[:, 1, :, :] = 2 * grid[:, 1, :, :] / (H - 1) - 1
# disp = 30*torch.ones(N, H, W).cuda()
grid2 = grid.clone()
grid2[:, 0, :, :] = grid[:, 0, :, :] + 2*disp/W
grid2 = grid2.permute(0, 2, 3, 1)
return F.grid_sample(x, grid2, padding_mode='zeros')
# loss1
# appearance loss: the difference between reconstructed image and original image
def criterion1(imgC, imgR, imgL, outputR, outputL, maxdisp, args, down_factor=1):
if down_factor != 1:
imgC = F.interpolate(imgC, scale_factor=1.0/down_factor, mode='bicubic')
imgR = F.interpolate(imgR, scale_factor=1.0/down_factor, mode='bicubic')
imgL = F.interpolate(imgL, scale_factor=1.0/down_factor, mode='bicubic')
outputR = F.interpolate(outputR.unsqueeze(1), scale_factor=1.0/down_factor, mode='bicubic') / down_factor
outputL = F.interpolate(outputL.unsqueeze(1), scale_factor=1.0/down_factor, mode='bicubic') / down_factor
outputR = outputR.squeeze(1)
outputL = outputL.squeeze(1)
imgR2C = warp_disp(imgR, -outputR, args)
imgL2C = warp_disp(imgL, outputL, args)
imgR2C2 = warp_disp(imgR, -outputL, args)
imgL2C2 = warp_disp(imgL, outputR, args)
alpha2 = 0.85
crop_edge = 200
if imgC.shape[2] > SSIM_WIN:
ssim_loss = pytorch_ssim.SSIM(window_size = SSIM_WIN)
else:
ssim_loss = pytorch_ssim.SSIM(window_size = imgC.shape[2])
if crop_edge == 0:
diff_ssim = (1 - ssim_loss(imgC, imgR2C)) / 2.0 + \
(1 - ssim_loss(imgC, imgL2C)) / 2.0 + \
(1 - ssim_loss(imgC, imgR2C2)) / 2.0 + \
(1 - ssim_loss(imgC, imgL2C2)) / 2.0
diff_L1 = (F.smooth_l1_loss(imgC, imgR2C, reduction='mean')) + \
(F.smooth_l1_loss(imgC, imgL2C, reduction='mean')) + \
(F.smooth_l1_loss(imgC, imgR2C2, reduction='mean')) + \
(F.smooth_l1_loss(imgC, imgL2C2, reduction='mean'))
else:
diff_ssim = (1 - ssim_loss(imgC[:,:,:,crop_edge:], imgR2C[:,:,:,crop_edge:])) / 2.0 + \
(1 - ssim_loss(imgC[:,:,:,:-crop_edge], imgL2C[:,:,:,:-crop_edge])) / 2.0 + \
(1 - ssim_loss(imgC[:,:,:,crop_edge:], imgR2C2[:,:,:,crop_edge:])) / 2.0 + \
(1 - ssim_loss(imgC[:,:,:,:-crop_edge], imgL2C2[:,:,:,:-crop_edge])) / 2.0
diff_L1 = (F.smooth_l1_loss(imgC[:,:,:,crop_edge:], imgR2C[:,:,:,crop_edge:], reduction='mean')) + \
(F.smooth_l1_loss(imgC[:,:,:,:-crop_edge], imgL2C[:,:,:,:-crop_edge], reduction='mean')) + \
(F.smooth_l1_loss(imgC[:,:,:,crop_edge:], imgR2C2[:,:,:,crop_edge:], reduction='mean')) + \
(F.smooth_l1_loss(imgC[:,:,:,:-crop_edge], imgL2C2[:,:,:,:-crop_edge], reduction='mean'))
loss1 = 1.0/4 * (alpha2 * diff_ssim + (1-alpha2) * diff_L1)
return loss1, imgR2C, imgL2C, imgC, outputR
def criterion1_2frame(imgC, imgR, outputR, maxdisp, args, down_factor=1):
if down_factor != 1:
imgC = F.interpolate(imgC, scale_factor=1.0/down_factor, mode='bicubic')
imgR = F.interpolate(imgR, scale_factor=1.0/down_factor, mode='bicubic')
outputR = F.interpolate(outputR.unsqueeze(1), scale_factor=1.0/down_factor, mode='bicubic') / down_factor
outputR = outputR.squeeze(1)
imgR2C = warp_disp(imgR, -outputR, args)
alpha2 = 0.85
crop_edge = 0
if imgC.shape[2] > SSIM_WIN:
ssim_loss = pytorch_ssim.SSIM(window_size = SSIM_WIN)
else:
ssim_loss = pytorch_ssim.SSIM(window_size = imgC.shape[2])
if crop_edge == 0:
diff_ssim = (1 - ssim_loss(imgC, imgR2C)) / 2.0
diff_L1 = (F.smooth_l1_loss(imgC, imgR2C, reduction='mean'))
else:
diff_ssim = (1 - ssim_loss(imgC[:,:,:,crop_edge:], imgR2C[:,:,:,crop_edge:])) / 2.0
diff_L1 = (F.smooth_l1_loss(imgC[:,:,:,crop_edge:], imgR2C[:,:,:,crop_edge:], reduction='mean'))
loss1 = (alpha2 * diff_ssim + (1-alpha2) * diff_L1)
return loss1, imgR2C
# loss2
# consistency loss the difference between left output and right output
def criterion2(R, L):
alpha1 = 0
tau = 10 # truncation for occluded region
L1loss = F.smooth_l1_loss(R, L, reduction='none').clamp(min=0, max=tau).mean()
return L1loss
# R = R.unsqueeze(1)
# L = L.unsqueeze(1)
# R_gx, R_gy = gradient_xy(R)
# L_gx, L_gy = gradient_xy(L)
# gxloss = F.smooth_l1_loss(R_gx, L_gx, reduction='none').clamp(min=0, max=tau).mean()
# gyloss = F.smooth_l1_loss(R_gy, L_gy, reduction='none').clamp(min=0, max=tau).mean()
# g1loss = 0.5 * (gxloss + gyloss)
# R_gxx, R_gxy = gradient_xy(R_gx)
# R_gyx, R_gyy = gradient_xy(R_gy)
# L_gxx, L_gxy = gradient_xy(L_gx)
# L_gyx, L_gyy = gradient_xy(L_gy)
# gxxloss = F.smooth_l1_loss(R_gxx, L_gxx, reduction='none').clamp(min=0, max=tau).mean()
# gyyloss = F.smooth_l1_loss(R_gyy, L_gyy, reduction='none').clamp(min=0, max=tau).mean()
# g2loss = 0.5 * (gxxloss + gyyloss)
# return 0.5 * (L1loss + (g1loss*10 + g2loss*10)/2.0 )
# loss3
# smooth loss: force grident of intensity to be small
def criterion3(disp, img):
disp = disp.unsqueeze(1)
disp_gx, disp_gy = gradient_xy(disp)
intensity_gx, intensity_gy = gradient_xy(img)
weights_x = torch.exp(-10 * torch.abs(intensity_gx).mean(1).unsqueeze(1))
weights_y = torch.exp(-10 * torch.abs(intensity_gy).mean(1).unsqueeze(1))
disp_gx = torch.abs(disp_gx)
gx = disp_gx.clone()
gx[gx>0.5] = disp_gx[disp_gx>0.5] + 10
disp_gy = torch.abs(disp_gy)
gy = disp_gy.clone()
gy[gy>0.5] = disp_gy[disp_gy>0.5] + 10
smoothness_x = gx * weights_x
smoothness_y = gy * weights_y
return smoothness_x.mean() + smoothness_y.mean()
# loss4
# regularization term:
def criterion4(disp, maxdisp):
# r1 = disp.mean()
# r = torch.exp(-1 / 5.0 * disp) + torch.exp(1 / 5.0 * (disp - 90))
# r = torch.exp(-1 / 5.0 * disp)
r = (disp*2/maxdisp - 1).pow(2)
return r.mean()
def evaluate(model, imgL, imgC, imgR, gt, args, maxd):
use_cuda = args.cuda
# use_cuda = False
height = imgL.shape[1]
width = imgL.shape[2]
maxdisp = maxd
pad_h = (height // 32 + 1) * 32
pad_w = (width // 32 + 1) * 32
imgL = np.reshape(imgL, [1, imgL.shape[0], imgL.shape[1], imgL.shape[2]])
imgR = np.reshape(imgR, [1, imgR.shape[0], imgR.shape[1], imgR.shape[2]])
if imgC is not None:
imgC = np.reshape(imgC, [1, imgC.shape[0], imgC.shape[1], imgC.shape[2]])
# pad to (M x 32, N x 32)
top_pad = pad_h - imgL.shape[2]
left_pad = pad_w - imgL.shape[3]
imgL = np.lib.pad(imgL, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgR = np.lib.pad(imgR, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
if imgC is not None:
imgC = np.lib.pad(imgC, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgL = torch.from_numpy(imgL)
imgR = torch.from_numpy(imgR)
if imgC is not None:
imgC = torch.from_numpy(imgC)
model.eval()
if imgC is not None:
# multiscopic mode
imgC_rot = imgC.flip(2).flip(3)
imgL_rot = imgL.flip(2).flip(3)
if use_cuda:
imgL, imgR, imgC, imgC_rot, imgL_rot = \
imgL.cuda(), imgR.cuda(), imgC.cuda(), imgC_rot.cuda(), imgL_rot.cuda()
if args.model == 'stackhourglass':
outputR, outputR_prob, _, _ = model(imgC, imgR, maxdisp)
if args.cuda and (not use_cuda):
outputR = outputR.cpu()
outputR_prob = outputR_prob.cpu()
outputL_rot, outputL_prob_rot, _, _ = model(imgC_rot, imgL_rot, maxdisp)
outputL = outputL_rot.flip(1).flip(2)
outputL_prob = outputL_prob_rot.flip(2).flip(3)
if args.cuda and (not use_cuda):
outputL = outputL.cpu()
outputL_prob = outputL_prob.cpu()
elif args.model == 'basic':
outputR = model(imgC, imgR, maxdisp)
outputL_rot = model(imgC_rot, imgL_rot)
outputL = outputL_rot.flip(1).flip(2)
mindisp = torch.min(torch.cat([outputR, outputL]), 0)[0]
diff = (outputR - outputL).squeeze()
outputR = outputR.squeeze()
outputL = outputL.squeeze()
outputR[diff>3] = mindisp[diff>3]
disp = outputL
disp = disp[top_pad:, :-left_pad]
else:
# stereo mode
if use_cuda:
imgL, imgR = imgL.cuda(), imgR.cuda()
if args.model == 'stackhourglass':
output, _, _, _ = model(imgL, imgR, maxdisp)
elif args.model == 'basic':
output = model(imgL, imgR, maxdisp)
if args.cuda and (not use_cuda):
output = output.cpu()
disp = output.squeeze()[top_pad:, :-left_pad]
gt = torch.from_numpy(gt).float()
if(use_cuda): gt = gt.cuda()
mask = (gt != 0)
diff = torch.abs(disp[mask] - gt[mask])
avgerr = torch.mean(diff)
rms = torch.sqrt( (diff**2).mean() )
bad05 = len(diff[diff>0.5])/float(len(diff))
bad1 = len(diff[diff>1])/float(len(diff))
bad2 = len(diff[diff>2])/float(len(diff))
bad3 = len(diff[diff>3])/float(len(diff))
return [avgerr.data.item(), rms.data.item(), bad05, bad1, bad2, bad3], disp.cpu().numpy()
def evaluate_kitti(model, imgL, imgR, gt_occ, gt_noc, args, maxd=160):
height = imgL.shape[1]
width = imgL.shape[2]
maxdisp = maxd
pad_h = (height / 32 + 1) * 32
pad_w = (width / 32 + 1) * 32
imgL = np.reshape(imgL, [1, imgL.shape[0], imgL.shape[1], imgL.shape[2]])
imgR = np.reshape(imgR, [1, imgR.shape[0], imgR.shape[1], imgR.shape[2]])
# pad to (M x 32, N x 32)
top_pad = pad_h - imgL.shape[2]
left_pad = pad_w - imgL.shape[3]
imgL = np.lib.pad(imgL, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgR = np.lib.pad(imgR, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgL = torch.from_numpy(imgL)
imgR = torch.from_numpy(imgR)
model.eval()
if args.cuda:
imgL, imgR = imgL.cuda(), imgR.cuda()
if args.model == 'stackhourglass':
output, _, _, _ = model(imgL, imgR, maxdisp)
elif args.model == 'basic':
output = model(imgL, imgR, maxdisp)
disp = output.squeeze()[top_pad:, :-left_pad]
if gt_noc.any() == None:
return disp.cpu().numpy()
gt_occ = torch.from_numpy(gt_occ).float()
gt_noc = torch.from_numpy(gt_noc).float()
if args.cuda:
gt_noc = gt_noc.cuda()
gt_occ = gt_occ.cuda()
mask_occ = (gt_occ != 0)
mask_noc = (gt_noc != 0)
diff_occ = torch.abs(disp[mask_occ] - gt_occ[mask_occ])
diff_noc = torch.abs(disp[mask_noc] - gt_noc[mask_noc])
# bad3_occ = len(diff_occ[diff_occ>3])/float(len(diff_occ))
# bad3_noc = len(diff_noc[diff_noc>3])/float(len(diff_noc))
bad3_occ = torch.sum((diff_occ>3) & (diff_occ/gt_occ[mask_occ]>0.05)).float() / float(len(diff_occ))
bad3_noc = torch.sum((diff_noc>3) & (diff_noc/gt_noc[mask_noc]>0.05)).float() / float(len(diff_noc))
return [bad3_occ, bad3_noc], disp.cpu().numpy()
def predict(model, imgL, imgR, args, maxd):
height = imgL.shape[1]
width = imgL.shape[2]
pad_h = (height / 32 + 1) * 32
pad_w = (width / 32 + 1) * 32
imgL = np.reshape(imgL, [1, imgL.shape[0], imgL.shape[1], imgL.shape[2]])
imgR = np.reshape(imgR, [1, imgR.shape[0], imgR.shape[1], imgR.shape[2]])
# pad to (M x 32, N x 32)
top_pad = pad_h - imgL.shape[2]
left_pad = pad_w - imgL.shape[3]
imgL = np.lib.pad(imgL, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgR = np.lib.pad(imgR, ((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)
imgL = torch.from_numpy(imgL)
imgR = torch.from_numpy(imgR)
model.eval()
if args.cuda:
imgL, imgR = imgL.cuda(), imgR.cuda()
if args.model == 'stackhourglass':
output, _, _, _ = model(imgL, imgR, maxd)
elif args.model == 'basic':
output = model(imgL, imgR, maxd)
disp = output.squeeze()[top_pad:, :-left_pad]
return disp.cpu().numpy() |
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
import RPi.GPIO as GPIO
from time import sleep
import threading
import logging
# LED pin assignments. All except the white led are simple led connections.
# The white led pin is also connected to the relay so it energizes the
# relay as well as lights the white led.
RELAY_AND_WHITE = 5
LED_BLUE = 26
LED_GREEN = 19
LED_YELLOW = 6
LED_RED = 20
leds = (RELAY_AND_WHITE, LED_BLUE, LED_GREEN, LED_YELLOW, LED_RED)
# Pushbutton pin assignments
# White: Single-step the clock one minute
PB_WHITE = 25
# Time in seconds between repeated pulses to the clock
REPEAT_INTERVAL_SECONDS = 1.2
# Time in seconds to hold the relay each time it is actuated
HOLD_SECONDS = 1.0
# Hold must be less than repeat interval (with some safety factor),
# otherwise repeat will not work!
if HOLD_SECONDS >= REPEAT_INTERVAL_SECONDS - 0.1:
raise Exception('Invalid setting of hold and repeat times. Contact the developer!')
# Blue: Repeat-step the clock BLUE_COUNT times
PB_BLUE = 24
BLUE_COUNT = 30
# Black: Repeat-step the clock BLACK_COUNT times
PB_BLACK = 27
BLACK_COUNT = 180
# Yellow: Repeat-step the clock YELLOW_COUNT times
PB_YELLOW = 17
YELLOW_COUNT = 60
# Red: Stop the clock, toggle to restart. If stopped with no
# restart, remain stopped 60 minutes (RED_COUNT) before restarting automatically.
PB_RED = 14
RED_COUNT = 60
buttons = (PB_WHITE, PB_BLUE, PB_BLACK, PB_YELLOW, PB_RED)
# Flags that are accessed globally by call-back functions and functions that
# run in their own threads. These flags are used to prevent conflicts when
# two things try to run simultaneously. For example, when the red button is
# pressed, other actions that try to actuate the relay must be blocked.
# The relay is held down for a set amount of time when it is actuated. During
# this time the relay_busy flag indicates that it is energized. Other attempts
# to use the relay are blocked until it is released.
relay_busy = False
# The red button prevents anything from actuating the relay, effectively stopping
# the clock. The red button toggles on and off when pressed. It also times out
# 60 minutes after it is pressed. This is how the clock can be stopped for
# daylight saving time adjustment.
stopped = False
# The blue, black, and yellow buttons cause the clock to advance for predetermined
# number of minutes. During this time other attempts to actuate the relay are blocked.
# The red button stops the auto-advancing of the clock, as it should be expected to.
auto_advancing = False
# All five leds are initially off.
def initialize_leds(leds):
for led in leds:
GPIO.setup(led, GPIO.OUT, initial=0)
# All five buttons are set to be low (i.e. 0 volts) until pressed. When pressed,
# each button connects its pin with a high (i.e. 3.3 volts) signal. The rising edge of the
# button press is detected and a function named button_callback is called. This
# function can determine which button was pressed and take appropriate action.
def initialize_pushbuttons(buttons):
for button in buttons:
GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(button, GPIO.RISING, callback=button_callback, bouncetime=300)
# Turn on the relay and the white LED. Then start a timer to reset
# the relay and LED.
def relay_pulse():
global relay_busy
relay_busy = True
GPIO.output(RELAY_AND_WHITE, 1)
# Start an asynchronous timer that calls a function to
# turn the relay off and led off in 1 second.
threading.Timer(HOLD_SECONDS, relay_reset).start()
# Turn off the relay and the white LED.
def relay_reset():
global relay_busy
GPIO.output(RELAY_AND_WHITE, 0)
relay_busy = False
def stop_set():
global stopped
if not stopped:
stopped = True
GPIO.output(LED_RED, 1)
# Start an asynchronous timer that calls a function to
# turn the relay off and led off in 3600 seconds (1 hour)
# which you can do if your clock is ahead or when adjusting
# for Daylight Saving Time.
STOP_SECONDS = RED_COUNT * 60
threading.Timer(STOP_SECONDS, stop_reset).start()
else:
stopped = False
GPIO.output(LED_RED, 0)
def stop_reset():
global stopped
if stopped:
stopped = False
GPIO.output(LED_RED, 0)
def repeating_pulses(count, indicator_led):
global auto_advancing
# Set flag indicating that repeating pulses are being sent
auto_advancing = True
GPIO.output(indicator_led, 1)
for _ in range(count):
# The red button sets the stopped flag when pressed to
# set the stop state. The stopped flag is also cleared by the red
# button if it is pressed to reset the stop state.
if stopped:
break
else:
relay_pulse()
if relay_busy:
# The relay is held closed for HOLD_SECONDS seconds. Wait
# at least that long so it opens before looping to close it again.
# We enforce REPEAT_INTERVAL_SECONDS to be longer than
# HOLD_SECONDS at program start-up time.
sleep(REPEAT_INTERVAL_SECONDS)
pass
GPIO.output(indicator_led, 0)
auto_advancing = False
def repeat_dispatcher(pulse_count, indicator_led):
# The multi-pulse behavior takes a long time and must
# be done on a separate thread to keep the button processing
# responsive. During repeat, only the red button presses
# are handled. A red button press can set the stopped flag
# to end a repeat thread.
global auto_advancing
if not relay_busy and not stopped and not auto_advancing:
x = threading.Thread(target=repeating_pulses, args=(pulse_count, indicator_led,))
x.start()
else:
logging.info('repeat_dispatcher: Cannot process request')
# Handle button presses as they occur. Note that this callback can only handle
# one button press at a time. If processing for a button press takes a long time,
# the processing should be done on a separate thread so the buttons remain
# responsive.
def button_callback (channel):
global relay_busy
global stopped
global auto_advancing
# White button sends a single pulse only when the relay is not already
# picked and when there is not a repeating operation already running
if channel == PB_WHITE and GPIO.input(PB_WHITE):
if not relay_busy and not stopped and not auto_advancing:
relay_pulse()
else:
logging.info(f'Button press {channel} not accepted because the relay was busy or stopped')
# The red button toggles the stopped state on and off
elif channel == PB_RED and GPIO.input(PB_RED):
stop_set()
# The blue button causes a fixed number of minutes to be advanced
elif channel == PB_BLUE and GPIO.input(PB_BLUE):
# Advance BLUE_COUNT minutes
repeat_dispatcher(BLUE_COUNT, LED_BLUE)
# Note that PB_BLACK is represented by the green led because I have no
# green button caps
# The black button causes a fixed number of minutes to be advanced
elif channel == PB_BLACK and GPIO.input(PB_BLACK):
# Advance BLACK_COUNT times
repeat_dispatcher(BLACK_COUNT, LED_GREEN)
# The yellow button causes a fixed number of minutes (usually 60)
# to be advanced
elif channel == PB_YELLOW and GPIO.input(PB_YELLOW):
# Advance 1 hour (60 minutes-good for daylight savings time 'fall back')
# YELLOW_COUNT must be 60 for 1 hour advance
repeat_dispatcher(YELLOW_COUNT, LED_YELLOW)
else:
logging.info(f'button_callback: Unexpected input from channel: {channel}')
if __name__ == '__main__':
try:
logging.basicConfig(filename='clockrunner.log',
filemode='w+',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Start with Broadcom pin numbering and be sure GPIO is cleaned up
# from any previous settings
GPIO.setmode(GPIO.BCM)
initialize_leds(leds)
initialize_pushbuttons(buttons)
current_minute = int(datetime.strftime(datetime.now(),"%M"))
last_minute = current_minute
while True:
current_minute = int(datetime.strftime(datetime.now(),"%M"))
while current_minute != last_minute:
last_minute = current_minute
if not relay_busy and not stopped and not auto_advancing:
relay_pulse()
else:
logging.info('A regular automatic clock minute pulse was not accepted')
sleep(5)
except Exception as e:
print (e)
except KeyboardInterrupt:
pass
finally:
print(f'\nResetting all GPIO ports')
GPIO.cleanup()
|
<reponame>cmlino/scraper
from dotenv import load_dotenv
import os
import requests
from bs4 import BeautifulSoup
import json
import re
load_dotenv()
def addConflicts(data):
for department in data:
for course in department["courses"]:
for section in course["sections"]:
section["conflicts"] = getConflict(
data, section["timeslots"], section["subj"] + str(section["crse"])
)
def getConflict(data, check_timeslots, course_code):
conflicts = {}
for department in data:
for course in department["courses"]:
for section in course["sections"]:
for timeslot in section["timeslots"]:
for day in timeslot["days"]:
# Dont conflict with other sections of the same course (or with self)
if course_code == section["subj"] + str(section["crse"]):
continue
# If this course does not have a timeslot just skip it
if timeslot["timeStart"] == -1 or timeslot["timeEnd"] == -1:
continue
for check_timeslot in check_timeslots:
# If this course does not have a timeslot just skip it
if (
check_timeslot["timeStart"] == -1
or check_timeslot["timeEnd"] == -1
):
continue
# If not happening on the same day skip it
if day not in check_timeslot["days"]:
continue
# If the dates dont overlap skip it
if not max(
check_timeslot["dateStart"], timeslot["dateStart"]
) < min(check_timeslot["dateEnd"], timeslot["dateEnd"]):
continue
# There is a conflict
if max(
check_timeslot["timeStart"], timeslot["timeStart"]
) < min(check_timeslot["timeEnd"], timeslot["timeEnd"]):
# JSON does not support hashtables without a value so the value
# is always set to true even though just by being in the conflicts
# hash table is enough to know it conflicts
conflicts[section["crn"]] = True
return conflicts
# We decided not to use this but I left it just in case
# def reformatJson(data):
# departments_copy = data
# reformat = {}
# for department in departments_copy:
# reformat[department['code']] = department
# course_copy = department['courses']
# reformat[department['code']]['courses'] = {}
# for course in course_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"] = course
# sections_copy = course['sections']
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'] = {}
# for section in sections_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'][section['crn']] = section
#
#
# return reformat
#
def getContent(element):
return " ".join(
element.encode_contents().decode().strip().replace("&", "&").split()
)
def getContentFromChild(element, childType):
if len(element.findAll(childType)) > 0:
element = element.findAll(childType)[0]
return getContent(element)
def cleanOutAbbr(text):
text = re.sub("<abbr.*?>", "", text)
text = re.sub("<\/abbr>", "", text)
text = re.sub(
"\s?\([pP]\)", "", text
) # Remove primary instructor indicator (maybe we can use this data somewhere later but for now it is removed)
text = re.sub("\w+\.\s+", "", text)
return text
def timeToMilitary(time, useStartTime):
if "TBA" in time:
return -1
if useStartTime:
time = time.split("-")[0]
else:
time = time.split("-")[1]
offset = 0
if "pm" in time and "12:" not in time:
offset = 1200
return int("".join(time.strip().split(":"))[:4]) + offset
def toTitle(text):
text = text.title()
regex = r"\b[iI]+\b"
matches = re.finditer(regex, text)
for matchNum, match in enumerate(matches, start=1):
text = (
text[: match.start()]
+ text[match.start() : match.end()].upper()
+ text[match.end() :]
)
text = text.replace("'S", "'s")
return text
payload = f'sid={os.getenv("RIN")}&PIN={os.getenv("PASSWORD")}'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
with requests.Session() as s:
s.get(url="https://sis.rpi.edu/rss/twbkwbis.P_WWWLogin")
response = s.request(
"POST",
"https://sis.rpi.edu/rss/twbkwbis.P_ValLogin",
headers=headers,
data=payload,
)
if b"Welcome" not in response.text.encode("utf8"):
print("Failed to log into sis")
exit(1)
url = "https://sis.rpi.edu/rss/bwskfcls.P_GetCrse_Advanced"
payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=ADMN&sel_subj=USAF&sel_subj=ARCH&sel_subj=ARTS&sel_subj=ASTR&sel_subj=BCBP&sel_subj=BIOL&sel_subj=BMED&sel_subj=CHME&sel_subj=CHEM&sel_subj=CIVL&sel_subj=COGS&sel_subj=COMM&sel_subj=CSCI&sel_subj=ENGR&sel_subj=ERTH&sel_subj=ECON&sel_subj=ECSE&sel_subj=ESCI&sel_subj=ENVE&sel_subj=GSAS&sel_subj=ISYE&sel_subj=ITWS&sel_subj=IENV&sel_subj=IHSS&sel_subj=ISCI&sel_subj=LANG&sel_subj=LGHT&sel_subj=LITR&sel_subj=MGMT&sel_subj=MTLE&sel_subj=MATP&sel_subj=MATH&sel_subj=MANE&sel_subj=USAR&sel_subj=USNA&sel_subj=PHIL&sel_subj=PHYS&sel_subj=PSYC&sel_subj=STSH&sel_subj=STSS&sel_subj=WRIT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
# This payload is for testing. It will only return CSCI classes and will therefore be a bit faster
# payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=CSCI&sel_subj=LGHT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
headers = {}
response = s.request("POST", url, headers=headers, data=payload)
data = []
# print(response.text.encode('utf8'))
soup = BeautifulSoup(response.text.encode("utf8"), "html.parser")
table = soup.findAll("table", {"class": "datadisplaytable"})[0]
rows = table.findAll("tr")
current_department = None
current_code = None
current_courses = None
last_subject = None
last_course_code = None
for row in rows:
th = row.findAll("th")
if len(th) != 0:
if "ddtitle" in th[0].attrs["class"]:
# if(current_department):
data.append(
{"name": toTitle(getContent(th[0])), "code": "", "courses": []}
)
else:
td = row.findAll("td")
if "TBA" not in getContent(td[8]):
timeslot_data = {
"days": list(getContent(td[8])),
"timeStart": timeToMilitary(
getContentFromChild(td[9], "abbr"), True
),
"timeEnd": timeToMilitary(
getContentFromChild(td[9], "abbr"), False
),
"instructor": ", ".join(
[x.strip() for x in cleanOutAbbr(getContent(td[19])).split(",")]
),
"dateStart": getContentFromChild(td[20], "abbr").split("-")[0],
"dateEnd": getContentFromChild(td[20], "abbr").split("-")[1],
"location": getContentFromChild(td[21], "abbr"),
}
else:
timeslot_data = {
"dateEnd": "",
"dateStart": "",
"days": [],
"instructor": "",
"location": "",
"timeEnd": -1,
"timeStart": -1,
}
if len(getContent(td[1])) == 0:
data[-1]["courses"][-1]["sections"][-1]["timeslots"].append(
timeslot_data
)
continue
credit_min = float(getContent(td[6]).split("-")[0])
credit_max = credit_min
if len(getContent(td[6]).split("-")) > 1:
credit_max = float(getContent(td[6]).split("-")[1])
section_data = {
# "select":getContentFromChild(td[0], 'abbr'),
"crn": int(getContentFromChild(td[1], "a")),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"sec": getContent(td[4]),
# "cmp":getContent(td[5]),
"credMin": credit_min,
"credMax": credit_max,
"title": toTitle(getContent(td[7])),
# "cap": int(getContent(td[10])),
# "act":int(getContent(td[11])),
# "rem": int(getContent(td[12])),
# "wlCap":int(getContent(td[13])),
# "wlAct":int(getContent(td[14])),
# "wlRem":int(getContent(td[15])),
# "xlCap":getContent(td[16]),
# "xlAct":getContent(td[17]),
# "xlRem":getContent(td[18]),
"attribute": getContent(td[22]) if 22 < len(td) else "",
"timeslots": [timeslot_data],
}
if (
section_data["subj"] == last_subject
and section_data["crse"] == last_course_code
):
data[-1]["courses"][-1]["sections"].append(section_data)
continue
last_subject = getContent(td[2])
last_course_code = int(getContent(td[3]))
data[-1]["courses"].append(
{
"title": toTitle(getContent(td[7])),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"id": getContent(td[2]) + "-" + getContent(td[3]),
"sections": [section_data],
}
)
if len(getContent(td[2])) > 0:
data[-1]["code"] = getContent(td[2])
# This is for the old conflict method that has a list for each class that it conflicts with
# addConflicts(data)
# data = reformatJson(data)
# print(json.dumps(data,sort_keys=False,indent=2))
with open(f"courses.json", "w") as outfile: # -{os.getenv("CURRENT_TERM")}
json.dump(data, outfile, sort_keys=False, indent=2)
# Generate binary conflict output
# (32bit crn + 3*64bit conflicts 5am-midnight(by 30min))for every course
day_offsets = {
"M": 0 * 16 * 6,
"T": 1 * 16 * 6,
"W": 2 * 16 * 6,
"R": 3 * 16 * 6,
"F": 4 * 16 * 6,
"S": 5 * 16 * 6,
}
conflicts = {}
crn_to_courses = {}
for dept in data:
for course in dept["courses"]:
for section in course["sections"]:
crn_to_courses[section["crn"]] = course["id"]
conflict = [0] * (64 * 9)
for time in section["timeslots"]:
for day in time["days"]:
for hour in range(700, 2300, 100):
for minute in range(0, 60, 10):
if (
time["timeStart"] <= hour + minute
and time["timeEnd"] > hour + minute
):
minute_idx = int(minute / 10)
hour_idx = int(hour / 100) - 7 # we start at 7am
conflict[
day_offsets[day] + hour_idx * 6 + minute_idx
] = 1
conflicts[section["crn"]] = "".join(str(e) for e in conflict)
with open("mod.rs", "w") as f: # -{os.getenv("CURRENT_TERM")}
f.write(
"""\
//This file was automatically generated. Please do not modify it directly
use ::phf::{phf_map, Map};
pub static CRN_TIMES: Map<u32, [u64; 9]> = phf_map! {
"""
)
for crn, conflict in conflicts.items():
rust_array = f"\t{crn}u32 => ["
for i in range(0, 9 * 64, 64):
if i != 0:
rust_array += ", "
rust_array += str(int(conflict[i : i + 64], 2))
rust_array += "],\n"
f.write(rust_array)
f.write(
"""
};
pub static CRN_COURSES: Map<u32, &'static str> = phf_map! {
"""
)
for crn, course in crn_to_courses.items():
f.write(f'\t{crn}u32 => "{course}",\n')
f.write("};")
|
<reponame>ComputermindCorp/DeepEye<gh_stars>1-10
# django libs
from django.http import FileResponse
from django.core.serializers import serialize
from django.utils import translation
from django.utils.translation import gettext
# deepeye setting & models & form
from main.models import Project
from .models import ClassificationModel, Dataset, Result, Weight
from .models import TestResult, Pred, PredProbe, TrainLog
from .model.train import main as classification_train
from .model.test import main as classification_test
from .model import DA
from .dataset_util import *
# common libs
from channels.generic.websocket import WebsocketConsumer
import glob
import json
import logging
import numpy as np
import os
import shutil
import sys
from threading import Thread
import time
import urllib
from main.project_type import ProjectType
from main import file_action
from main.log import get_logger
logger = get_logger(__name__)
class Classification(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def websocket_receive(self, data):
logger.debug("[websocket_receive] data: {}".format(data))
data = json.loads(data['text'])
self.status = data['status']
logger.info(f"Data received from frontend with status of '{self.status}'")
if 'project_type' in data:
project_type = data['project_type']
if self.status == "lang-setting":
translation.activate(data["user-lang"])
# starting training
elif self.status == 'train':
thread = Thread(target = self.train, args = (data,))
thread.start()
# self.train(data)
elif self.status == 'stop':
pass
elif self.status == 'training-ended':
pass
# testing( upload_data / self_testdata / saved_dataset )
elif self.status == 'test':
self.predict(data)
# memo update
"""
elif self.status == 'memo_update':
if data["target_type"] == "dataset":
dataset = Dataset.objects.get(project=self.selected_project, name=data["selectedDatasetId"])
dataset.memo = data["memo"]
dataset.save()
elif data["target_type"] == "model":
model = ClassificationModel.objects.get(project=self.selected_project, name=data["selectedModelId"])
model.memo = data["memo"]
model.save()
logger.debug("memo update finish")
"""
def save_trainlog(self, data):
training_model = ClassificationModel.objects.get(id=data['model_id'])
train_log_record = TrainLog(
epoch = data['epoch'],
train_loss = data['train_loss'],
train_acc = data['train_acc'],
val_loss = data['val_loss'],
val_acc = data['val_acc'],
model = training_model,
)
train_log_record.save()
def train(self, data):
self.train_log = {}
# get Dataset param from DB
training_model = ClassificationModel.objects.get(id=data['model_id'])
project = Project.objects.get(name=data['project_name'])
dataset = Dataset.objects.get(name=data['dataset_name'],project=project)
base_dataset_path = dataset.dataset_path
default_test_ratio = dataset.default_test_ratio
default_val_ratio = dataset.default_val_ratio
# get training param from from
model_name = data['model_name']
architecture = data['architecture'].lower()
epochs = int(data['epoch'])
batch_size = int(data['batch'])
learning_rate = float(data['learning_rate'])
optimizer = data['optimizer'].lower()
fine_tuning = data['fine_tuning']
use_default_ratio = data['use_default_ratio']
val_ratio = int(data['val_ratio'])
test_ratio = int(data['test_ratio'])
memo = data['memo']
weights_path = data['weights_path']
weights_file_path = data['weights_file_path']
image_list_unique_id = data['image_list_unique_id']
logger.debug(f"image_list_unique_id: {image_list_unique_id}")
# make path & dir
model_root = file_action.get_model_path_by_model_name(model_name, project)
weights_path = file_action.get_weights_directory_by_model_name(model_name, project)
dataset_path = os.path.join(model_root, "dataset")
if os.path.exists(weights_path):
shutil.rmtree(weights_path)
if os.path.exists(dataset_path):
shutil.rmtree(dataset_path)
os.makedirs(weights_path, exist_ok=True)
os.makedirs(dataset_path, exist_ok=True)
# copy files
class_list = load_class_list(dataset)
num_classes = len(class_list)
# make finetuning waight path
if fine_tuning:
transfer_path = training_model.baseweight_set.get().weight.path
else:
transfer_path = 'none'
# get Augmentation flags
augmentation_flags = {
'horizontal_flip': data["horizontal_flip"],
'vertical_flip': data["vertical_flip"],
'rotate_30': data["rotate_30"],
'rotate_45': data["rotate_45"],
'rotate_90': data["rotate_90"],
'gaussian_noise': data["gaussian_noise"],
'blur': data["blur"],
'contrast': data["contrast"]
}
image_type = data['image_type'].lower()
if use_default_ratio:
train_list, _ = get_dataset_list(project, dataset, DatasetDataType.Train)
val_list, _ = get_dataset_list(project, dataset, DatasetDataType.Validation)
else:
train_list, _ = get_dataset_list(project, dataset, DatasetDataType.Train, image_list_unique_id)
val_list, _ = get_dataset_list(project, dataset, DatasetDataType.Validation, image_list_unique_id)
# Running training scripts
try:
classification_train(self,
data['model_id'],
model_root,
num_classes,
image_type,
train_list,
val_list,
augmentation_flags,
architecture,
epochs,
batch_size,
optimizer,
learning_rate,
transfer_path,
weights_path,
weights_file_path,
int(data["n_iter"]),
self.save_trainlog)
except Exception as e:
logger.debug(e)
logger.debug('The program is exiting...')
trans_message = gettext('training failed please check terminal')
self.send(text_data=json.dumps({'status': 'reload',
'message_type':'error',
'message':trans_message}))
finally:
logger.debug("Saving Model to dataset")
logger.debug(f"epoch: {self.train_log.get('epoch', '---')}")
logger.debug(f"status: {self.train_log.get('status', '---')}")
logger.debug(f"train_loss: {self.train_log.get('train_loss', '---')}")
logger.debug(f"train_acc: {self.train_log.get('train_acc', '---')}")
logger.debug(f"val_loss: {self.train_log.get('val_loss', '---')}")
logger.debug(f"val_acc: {self.train_log.get('val_acc', '---')}")
logger.debug(f"best_train_loss: {self.train_log.get('best_train_loss', '---')}")
logger.debug(f"best_val_loss: {self.train_log.get('best_val_loss', '---')}")
logger.debug(f"best_train_epoch: {self.train_log.get('best_train_epoch', '---')}")
logger.debug(f"best_val_epoch: {self.train_log.get('best_val_epoch', '---')}")
try:
training_model.epochs_runned = self.train_log['epoch']
training_model.train_status = self.train_log['status']
training_model.train_loss = self.train_log['train_loss']
training_model.train_acc = self.train_log['train_acc']
training_model.val_loss = self.train_log['val_loss']
training_model.val_acc = self.train_log['val_acc']
training_model.best_train_loss = self.train_log['best_train_loss']
training_model.best_val_loss = self.train_log['best_val_loss']
training_model.best_train_epoch = self.train_log['best_train_epoch']
training_model.best_val_epoch = self.train_log['best_val_epoch']
training_model.save()
except:
logger.info("fail: training model save")
if not use_default_ratio:
new_dataset_data = DatasetData.objects.filter(unique_id=image_list_unique_id)
for data in new_dataset_data:
data.model = training_model
data.save()
trans_message = gettext('training : {} training ended')
cancel = self.train_log.get('cancel', '')
if cancel == '':
training_model.delete()
self.send(text_data=json.dumps({'status': 'reload',
'message_type':'success',
'message': trans_message.format(model_name),
'cancel': cancel,
'project_id': project.id}))
sys.exit(0)
def predict(self, data):
# get common params from form
project = Project.objects.get(pk=data["project_id"])
project_type = ProjectType(project.project_type)
model = ClassificationModel.objects.get(id=data["model_id"], project=project)
weight = Weight.objects.get(model=model)
predict_type = data['predict_type'] # self_dataset / save_dataset / upload_dataset
if predict_type == "self_dataset":
train_flag = data['train_flag']
val_flag = data["val_flag"]
test_flag = data["test_flag"]
elif predict_type == "save_dataset":
dataset = Dataset.objects.get(pk=data['database_id'])
train_flag = data['train_flag']
val_flag = data["val_flag"]
test_flag = data["test_flag"]
elif predict_type == "upload_dataset":
train_flag = None
val_flag = None
test_flag = None
# get model params from DB
architecture = model.architecture_type
num_classes = model.dataset.classes
image_type = model.image_type
model_root = file_action.get_model_path(model)
training_class_list = load_class_list(model.dataset)
#
if predict_type == "self_dataset":
dataset = model.dataset
dataset_data_types = []
if train_flag:
dataset_data_types.append(DatasetDataType.Train)
if val_flag:
dataset_data_types.append(DatasetDataType.Validation)
if test_flag:
dataset_data_types.append(DatasetDataType.Test)
predict_list, dataset_data_list = get_dataset_list(project, dataset, dataset_data_types)
elif predict_type == "save_dataset":
dataset_data_types = []
if train_flag:
dataset_data_types.append(DatasetDataType.Train)
if val_flag:
dataset_data_types.append(DatasetDataType.Validation)
if test_flag:
dataset_data_types.append(DatasetDataType.Test)
predict_list, dataset_data_list = get_dataset_list(project, dataset, dataset_data_types)
else:
pass
# run predict
logger.debug("model.train_status: {}".format(model.train_status))
if model.train_status == 'finished' or model.train_status == 'stopped':
try:
logger.debug(architecture)
self.result = Result
preds, pred_probs, labels = classification_test(
self,
model_root,
project,
model,
num_classes,
image_type,
architecture,
predict_list,
training_class_list,
weight.path)
# delete test result database
all_test_result = TestResult.objects.all()
all_test_result.delete()
# create database
new_test_result = TestResult(model=model)
new_test_result.save()
for pred, pred_prob, label, dataset_data in zip(preds, pred_probs, labels, dataset_data_list):
new_pred = Pred(
test_result=new_test_result,
pred=pred,
model=model,
image_data=dataset_data.image_data
)
new_pred.save()
for p in pred_prob:
new_pred_prob = PredProbe(pred=new_pred, value=p)
new_pred_prob.save()
self.send(text_data=json.dumps({
'status': 'test-complete',
'dataset_id': dataset.id,
'test_result_id': new_test_result.id,
}))
except Exception as e:
logger.debug('Testing exiting on error...')
logger.debug(e)
self.send(text_data=json.dumps({'status': 'error',
'text': e}))
finally:
if predict_type == "self_dataset":
pass
elif predict_type == "save_dataset":
pass
elif predict_type == "upload_dataset":
logger.debug("Deleting upload files")
shutil.rmtree(tmp_dir, ignore_errors=True)
else:
trans_message =_('Chosen model training not completed')
self.send(text_data=json.dumps({'status': 'error',
'text': trans_message}))
|
<filename>tests/unit/__init__.py
# -*- coding: utf-8 -*-
import json
import os
from mock import Mock
DIRPATH = os.path.dirname(__file__)
FIXTURES = os.path.join(DIRPATH, 'fixtures')
def create_response(status, fixture=None):
def request(*args, **kwargs):
response = Mock()
response.status_code = status
if fixture:
fixture_path = os.path.join(FIXTURES, fixture)
response.content = open(fixture_path).read()
return response
return request
def local_response(**params):
def _call(*args, **kwargs):
response = Mock()
reply = {}
for name, value in list(kwargs.items()):
reply[name] = value
for name, value in list(params.items()):
reply[name] = value
response.content = json.dumps(reply)
response.status_code = 200
return response
return _call
def mock_response(content, status_code=200, encoding='utf-8', headers=None):
if headers is None:
headers = {
'x-ratelimit-limit': 500,
'x-ratelimit-remaining': 500,
'x-ratelimit-reset': 1427932858
}
return Mock(
content=content, status_code=status_code, encoding=encoding, headers=headers)
def get_user(email="<EMAIL>", name="<NAME>"):
return {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaaaa",
"user_id": 'id-from-customers-app',
"email": email,
"name": name,
"avatar": {
"type": "avatar",
"image_url": "https://graph.facebook.com/1/picture?width=24&height=24"
},
"app_id": "the-app-id",
"created_at": 1323422442,
"custom_attributes": {"a": "b", "b": 2},
"companies": {
"type": "company.list",
"companies": [
{
"type": "company",
"company_id": "123",
"id": "bbbbbbbbbbbbbbbbbbbbbbbb",
"app_id": "the-app-id",
"name": "Company 1",
"remote_created_at": 1390936440,
"created_at": 1401970114,
"updated_at": 1401970114,
"last_request_at": 1401970113,
"monthly_spend": 0,
"session_count": 0,
"user_count": 1,
"tag_ids": [],
"custom_attributes": {
"category": "Tech"
}
}
]
},
"session_count": 123,
"unsubscribed_from_emails": True,
"last_request_at": 1401970113,
"created_at": 1401970114,
"remote_created_at": 1393613864,
"updated_at": 1401970114,
"user_agent_data": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"social_profiles": {
"type": "social_profile.list",
"social_profiles": [
{
"type": "social_profile",
"name": "twitter",
"url": "http://twitter.com/abc",
"username": "abc",
"id": None
},
{
"type": "social_profile",
"name": "twitter",
"username": "abc2",
"url": "http://twitter.com/abc2",
"id": None
},
{
"type": "social_profile",
"name": "facebook",
"url": "http://facebook.com/abc",
"username": "abc",
"id": "1234242"
},
{
"type": "social_profile",
"name": "quora",
"url": "http://facebook.com/abc",
"username": "abc",
"id": "1234242"
}
]
},
"location_data": {
"type": "location_data",
"city_name": 'Dublin',
"continent_code": 'EU',
"country_name": 'Ireland',
"latitude": '90',
"longitude": '10',
"postal_code": 'IE',
"region_name": 'Europe',
"timezone": '+1000',
"country_code": "IRL"
}
}
def get_company(name):
return {
"type": "company",
"id": "531ee472cce572a6ec000006",
"name": name,
"plan": {
"type": "plan",
"id": "1",
"name": "Paid"
},
"company_id": "6",
"remote_created_at": 1394531169,
"created_at": 1394533506,
"updated_at": 1396874658,
"monthly_spend": 49,
"session_count": 26,
"user_count": 10,
"custom_attributes": {
"paid_subscriber": True,
"team_mates": 0
}
}
def get_event(name="the-event-name"):
return {
"type": "event",
"event_name": name,
"created_at": 1389913941,
"user_id": "314159",
"metadata": {
"type": "user",
"invitee_email": "<EMAIL>",
"invite_code": "ADDAFRIEND"
}
}
def page_of_users(include_next_link=False):
page = {
"type": "user.list",
"pages": {
"type": "pages",
"page": 1,
"next": None,
"per_page": 50,
"total_pages": 7
},
"users": [
get_user("<EMAIL>"),
get_user("<EMAIL>"),
get_user("<EMAIL>")],
"total_count": 314
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/users?per_page=50&page=2"
return page
def users_scroll(include_users=False): # noqa
# a "page" of results from the Scroll API
if include_users:
users = [
get_user("<EMAIL>"),
get_user("<EMAIL>"),
get_user("<EMAIL>")
]
else:
users = []
return {
"type": "user.list",
"scroll_param": "da6bbbac-25f6-4f07-866b-b911082d7",
"users": users
}
def page_of_events(include_next_link=False):
page = {
"type": "event.list",
"pages": {
"next": None,
},
"events": [
get_event("invited-friend"),
get_event("bought-sub")],
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/events?type=user&intercom_user_id=55a3b&before=144474756550" # noqa
return page
def page_of_companies(include_next_link=False):
page = {
"type": "company.list",
"pages": {
"type": "pages",
"page": 1,
"next": None,
"per_page": 50,
"total_pages": 7
},
"companies": [
get_company('ACME A'),
get_company('ACME B'),
get_company('ACME C')
],
"total_count": 3
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/companies?per_page=50&page=2"
return page
test_tag = {
"id": "4f73428b5e4dfc000b000112",
"name": "Test Tag",
"segment": False,
"tagged_user_count": 2
}
test_subscription = {
"type": "notification_subscription",
"id": "nsub_123456789",
"created_at": 1410368642,
"updated_at": 1410368642,
"service_type": "web",
"app_id": "3qmk5gyg",
"url": "http://example.com",
"self": "https://api.intercom.io/subscriptions/nsub_123456789",
"topics": ["user.created", "conversation.user.replied", "conversation.admin.replied"],
"active": True,
"metadata": {},
"hub_secret": None,
"mode": "point",
"links": {
"sent": "https://api.intercom.io/subscriptions/nsub_123456789/sent",
"retry": "https://api.intercom.io/subscriptions/nsub_123456789/retry",
"errors": "https://api.intercom.io/subscriptions/nsub_123456789/errors"
},
"notes": []
}
test_user_notification = {
"type": "notification_event",
"id": "notif_123456-56465-546546",
"topic": "user.created",
"app_id": "aaaaaa",
"data": {
"type": "notification_event_data",
"item": {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaaaa",
"user_id": None,
"email": "<EMAIL>",
"name": "<NAME>",
"avatar": {
"type": "avatar",
"image_url": None
},
"app_id": "aaaaa",
"companies": {
"type": "company.list",
"companies": []
},
"location_data": {
},
"last_request_at": None,
"created_at": "1401970114",
"remote_created_at": None,
"updated_at": "1401970114",
"session_count": 0,
"social_profiles": {
"type": "social_profile.list",
"social_profiles": []
},
"unsubscribed_from_emails": False,
"user_agent_data": None,
"tags": {
"type": "tag.list",
"tags": []
},
"segments": {
"type": "segment.list",
"segments": []
},
"custom_attributes": {
}
}
},
"delivery_status": None,
"delivery_attempts": 1,
"delivered_at": 0,
"first_sent_at": 1410188629,
"created_at": 1410188628,
"links": {},
"self": None
}
test_conversation_notification = {
"type": "notification_event",
"id": "notif_123456-56465-546546",
"topic": "conversation.user.created",
"app_id": "aaaaa",
"data": {
"type": "notification_event_data",
"item": {
"type": "conversation",
"id": "123456789",
"created_at": "1410335293",
"updated_at": "1410335293",
"user": {
"type": "user",
"id": "540f1de7112d3d1d51001637",
"name": "<NAME>",
"email": "<EMAIL>"
},
"assignee": {
"type": "nobody_admin",
"id": None
},
"conversation_message": {
"type": "conversation_message",
"id": "321546",
"subject": "",
"body": "<p>An important message</p>",
"author": {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaa",
"name": "<NAME>",
"email": "<EMAIL>"
},
"attachments": []
},
"conversation_parts": {
"type": "conversation_part.list",
"conversation_parts": [
{
"type": "conversation_part",
"id": "4412",
"part_type": "comment",
"body": "<p>Hi Jane, it's all great thanks!</p>",
"created_at": 1400857494,
"updated_at": 1400857494,
"notified_at": 1400857587,
"assigned_to": None,
"author": {
"type": "user",
"id": "<PASSWORD>"
},
"attachments": []
}
]
},
"open": None,
"read": True,
"links": {
"conversation_web": "https://app.intercom.io/a/apps/aaaaaa/inbox/all/conversations/123456789"
}
}
},
"delivery_status": None,
"delivery_attempts": 1,
"delivered_at": 0,
"first_sent_at": 1410335293,
"created_at": 1410335293,
"links": {},
"self": "http://example.com/resource/url/"
}
|
#!/bin/env python
#encoding=utf-8
import re
import lxml
import lxml.html
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from .tags_util import clean_tags_only, clean_tags_hasprop, clean_tags
from .region import Region
class PageModel(object):
def __init__(self, page, url=""):
try:
assert type(page) is unicode
# Python3
except NameError:
pass
for tag in ['style', 'script']:
page = clean_tags(page, tag)
page = clean_tags_hasprop(page, "div", "(display:.?none|comment|measure)")
page = clean_tags_only(page, "(span|section|font|em)")
self.doc = lxml.html.fromstring(page)
self.url = url
self.region = Region(self.doc)
self.impurity_threshold = 30
self.anchor_ratio_limit = 0.3
self.stripper = re.compile(r'\s+')
def extract_content(self, region):
items = region.xpath('.//text()|.//img|./table')
tag_hist = {}
for item in items:
if hasattr(item, 'tag'):
continue
t = item.getparent().tag
if t not in tag_hist:
tag_hist[t] = 0
tag_hist[t] += len(item.strip())
winner_tag = None
try:
if len(tag_hist) > 0:
winner_tag = max((c, k) for k, c in tag_hist.items())[1]
# problem here in Python3
except TypeError:
pass
contents = []
for item in items:
if not hasattr(item, 'tag'):
txt = item.strip()
parent_tag = item.getparent().tag
if parent_tag != winner_tag \
and len(self.stripper.sub("", txt)) < self.impurity_threshold \
and parent_tag != 'li':
continue
if txt == "":
continue
contents.append({"type": "text", "data": txt})
elif item.tag == 'table':
if winner_tag == 'td':
continue
if item != region:
for el in item.xpath(".//a"):
el.drop_tag()
table_s = lxml.html.tostring(item)
contents.append({"type": "html", "data": table_s})
else:
for sub_item in item.xpath("//td/text()"):
contents.append({"type": "text", "data": sub_item})
elif item.tag == 'img':
for img_prop in ('original', 'file', 'data-original', 'src-info', 'data-src', 'src'):
src = item.get(img_prop)
if src is not None:
break
if self.url != "":
if not src.startswith("/") and not src.startswith("http") and not src.startswith("./"):
src = "/" + src
src = urlparse.urljoin(self.url, src, False)
contents.append({"type": "image", "data": {"src": src}})
else:
pass
return contents
def extract_title(self):
doc = self.doc
tag_title = doc.xpath("/html/head/title/text()")
s_tag_title = "".join(re.split(r'_|-', "".join(tag_title))[:1])
title_candidates = doc.xpath('//h1/text()|//h2/text()|//h3/text()|//p[@class="title"]/text()')
for c_title in title_candidates:
c_title = c_title.strip()
if c_title != "" and (s_tag_title.startswith(c_title) or s_tag_title.endswith(c_title)):
return c_title
sort_by_len_list = sorted((-1*len(x.strip()), x) for x in ([s_tag_title] + title_candidates))
return sort_by_len_list[0][1]
def extract(self):
title = self.extract_title()
region = self.region.locate()
if region is None:
return {'title': '', 'content': []}
rm_tag_set = set([])
for p_el in region.xpath(".//p|.//li"):
child_links = p_el.xpath(".//a/text()")
count_p = len(" ".join(p_el.xpath(".//text()")))
count_a = len(" ".join(child_links))
if float(count_a) / (count_p + 1.0) > self.anchor_ratio_limit:
p_el.drop_tree()
for el in region.xpath(".//a"):
rm_tag_set.add(el)
for el in region.xpath(".//strong|//b"):
rm_tag_set.add(el)
for el in rm_tag_set:
el.drop_tag()
content = self.extract_content(region)
return {"title": title, "content": content}
|
<filename>serie2/summe.py
# coding=utf-8
# Authors: <NAME> (lambertt) and <NAME> (odafaluy)
from __future__ import print_function
import time
import numpy as np
import Console as Console
import Sum as Sum
import warnings
warnings.filterwarnings("ignore")
class AddendGenerator:
"""Provides algorithms to generate addends for series."""
def __init__(self, delegate):
"""
Initializes a new instance of this class.
:param delegate: A function which creates a numeric type to use in the calculation from a given value x.
"""
self.delegate = delegate
def factorial(self, k):
"""
Calculates the factorial of k.
:param k: An integer or long to calculate its factorial.
:return: The factorial of Parameter k.
"""
result = self.delegate(1)
for i in range(2, k + 1):
result *= self.delegate(i)
return result
def get_harmonic_series_addends(self, k):
"""
Calculates addends for an harmonic series.
:param k: The number of addends to generate.
:return: An array of all generated addends.
"""
result = []
for i in range(1, k + 1):
result.append(self.delegate(1) / self.delegate(i))
return result
def get_e_taylor_series_1_addends(self, x, k):
"""
Calculates addends for a Taylor series to calculate e^x.
:param x: The x to use to calculate e^x.
:param k: The number of addends to generate.
:return: An array of all generated addends.
"""
result = []
for i in range(0, k + 1):
result.append((self.delegate(x) ** self.delegate(i)) / self.factorial(i))
return result
def get_e_taylor_series_2_addends(self, x, k):
"""
Calculates addends for a Taylor series to calculate e^x. The sum of the result-array is the reciprocal of e^x.
:param x: The x to use to calculate e^x.
:param k: The number of addends to generate.
:return: An array of all generated addends.
"""
result = []
for i in range(0, k + 1):
numerator = self.delegate(1 if i % 2 == 0 else -1) * (self.delegate(x) ** self.delegate(i))
denominator = self.delegate(self.factorial(i))
result.append(0 if np.isinf(denominator) else (numerator / denominator))
return result
def main():
"""Executes the main program including user-interaction"""
while True:
print("This program calculates sums.")
print(" [0] Exit")
print(" [1] Calculate partial sum of harmonic series")
print(" [2] Calculate Taylor series for e^x")
kind = Console.read_integer_interval("Please Choose: ", "Please input 1 or 2", 0, 2)
if kind == 0:
return
addend_counts = []
print("")
print("How many addends do you like to sum? You can specify multiple values separated by commas (,).")
if kind == 1:
print("Press [Enter] to use 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192")
addend_counts = Console.read_integer_list_in_range("", 1, None, [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
2048, 4096, 8192])
if kind == 2:
print("Press [Enter] to use 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192")
addend_counts = Console.read_integer_list_in_range("", 1, None, [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
2048, 4096, 8192])
x_values = []
if kind == 2:
print()
print("For which x in e^x do you want to calculate?"
"You can specify multiple values separated by commas (,)")
print("Press [Enter] to use -20,-1,1,20")
x_values = Console.read_integer_list_in_range("", None, None, [-20, -1, 1, 20])
print()
print("Which data type do you like to use for the calculation? You can specify multiple values separated by"
" commas" "(,)")
print(" [1] Numpy.float16")
print(" [2] Numpy.float32")
print(" [3] Numpy.float64")
print("Press [Enter] to use all.")
types = Console.read_integer_list_in_range("", 1, 3, [1, 2, 3])
Console.print_separator1()
for type in types:
delegate = [lambda y: np.float16(y), lambda y: np.float32(y), lambda y: np.float64(y)][type - 1]
type_name = ["float16", "float32", "float64"][type - 1]
for addend_count in addend_counts:
ag = AddendGenerator(delegate)
if kind == 1:
print("Type: {0}; Addend Count: {1}".format(type_name, addend_count))
start = time.clock()
addends = ag.get_harmonic_series_addends(addend_count)
problems = []
r1 = Sum.sum_indices(addends, problems)
print(" added by indices: " + str(r1) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
r2 = Sum.sum_ordered(addends, problems)
print(" added by size: " + str(r2) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
r3 = Sum.sum_ordered_grouped_by_sign(addends, problems)
print(" added by sign: " + str(r3) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
print(" > elapsed time: " + str(time.clock() - start) + "s")
print()
if kind == 2:
for x in x_values:
print("Type: {0}; Addend Count: {1}; x = {2}".format(type_name, addend_count, x))
start = time.clock()
print("Algorithm 1:")
addends = ag.get_e_taylor_series_1_addends(x, addend_count)
problems = []
r1 = Sum.sum_indices(addends, problems)
print(" added by indices: " + str(r1) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
r2 = Sum.sum_ordered(addends, problems)
print(" added by size: " + str(r2) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
r3 = Sum.sum_ordered_grouped_by_sign(addends, problems)
print(" added by sign: " + str(r3) + (" (an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
print(" > elapsed time: " + str(time.clock() - start) + "s")
print()
start = time.clock()
print("Algorithm 2:")
addends = ag.get_e_taylor_series_2_addends(x, addend_count)
problems = []
s = Sum.sum_indices(addends, problems)
r1 = "1 / ±infinity = 0" if np.isinf(s) else str(delegate(1) / s)
print(" added by indices: " + r1 + (" ( an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
s = Sum.sum_ordered(addends, problems)
r2 = "1 / ±infinity = 0" if np.isnan(s) else str(delegate(1) / s)
print(" added by size: " + r2 + (" ( an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
problems = []
s = Sum.sum_ordered_grouped_by_sign(addends, problems)
r3 = "1 / ±infinity = 0" if np.isnan(s) else str(delegate(1) / s)
print(" added by sign: " + r3 + (" ( an addend was " + problems[0] + ")" if len(problems) > 0 else ""))
print(" > elapsed time: " + str(time.clock() - start) + "s")
print()
Console.print_separator3()
Console.print_separator2()
Console.print_separator1()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for traffic control library."""
import unittest
import traffic_control
class TrafficControlUnitTests(unittest.TestCase):
"""Unit tests for traffic control."""
# Stores commands called by the traffic control _Exec function.
commands = []
def _ExecMock(self, command, **kwargs):
"""Mocks traffic_control._Exec and adds the command to commands list."""
cmd_list = [str(x) for x in command]
self.commands.append(' '.join(cmd_list))
return ''
def setUp(self):
"""Resets the commands list and set the _Exec mock function."""
self.commands = []
self._old_Exec = traffic_control._Exec
traffic_control._Exec = self._ExecMock
def tearDown(self):
"""Resets the _Exec mock function to the original."""
traffic_control._Exec = self._old_Exec
def testCreateConstrainedPort(self):
config = {
'interface': 'fakeeth',
'port': 12345,
'server_port': 8888,
'bandwidth': 256,
'latency': 100,
'loss': 2
}
traffic_control.CreateConstrainedPort(config)
expected = [
'sudo tc qdisc add dev fakeeth root handle 1: htb',
'sudo tc class add dev fakeeth parent 1: classid 1:3039 htb rate '
'256kbit ceil 256kbit',
'sudo tc qdisc add dev fakeeth parent 1:3039 handle 3039:0 netem loss '
'2% delay 100ms',
'sudo tc filter add dev fakeeth protocol ip parent 1: prio 1 u32 match '
'ip sport 12345 0xffff flowid 1:3039',
'sudo iptables -t nat -A PREROUTING -i fakeeth -p tcp --dport 12345 -j '
'REDIRECT --to-port 8888',
'sudo iptables -t nat -A OUTPUT -p tcp --dport 12345 -j REDIRECT '
'--to-port 8888'
]
self.assertEqual(expected, self.commands)
def testCreateConstrainedPortDefaults(self):
config = {
'interface': 'fakeeth',
'port': 12345,
'server_port': 8888,
'latency': None
}
traffic_control.CreateConstrainedPort(config)
expected = [
'sudo tc qdisc add dev fakeeth root handle 1: htb',
'sudo tc class add dev fakeeth parent 1: classid 1:3039 htb rate '
'%dkbit ceil %dkbit' % (traffic_control._DEFAULT_MAX_BANDWIDTH_KBIT,
traffic_control._DEFAULT_MAX_BANDWIDTH_KBIT),
'sudo tc qdisc add dev fakeeth parent 1:3039 handle 3039:0 netem',
'sudo tc filter add dev fakeeth protocol ip parent 1: prio 1 u32 '
'match ip sport 12345 0xffff flowid 1:3039',
'sudo iptables -t nat -A PREROUTING -i fakeeth -p tcp --dport 12345 -j '
'REDIRECT --to-port 8888',
'sudo iptables -t nat -A OUTPUT -p tcp --dport 12345 -j REDIRECT '
'--to-port 8888'
]
self.assertEqual(expected, self.commands)
def testDeleteConstrainedPort(self):
config = {
'interface': 'fakeeth',
'port': 12345,
'server_port': 8888,
'bandwidth': 256,
}
_old_GetFilterHandleId = traffic_control._GetFilterHandleId
traffic_control._GetFilterHandleId = lambda interface, port: 'fc00:e968:6179::de52:7100'
try:
traffic_control.DeleteConstrainedPort(config)
expected = [
'sudo tc filter del dev fakeeth protocol ip parent 1:0 handle '
'fc00:e968:6179::de52:7100 prio 1 u32',
'sudo tc class del dev fakeeth parent 1: classid 1:3039 htb rate '
'256kbit ceil 256kbit',
'sudo iptables -t nat -D PREROUTING -i fakeeth -p tcp --dport 12345 '
'-j REDIRECT --to-port 8888',
'sudo iptables -t nat -D OUTPUT -p tcp --dport 12345 -j REDIRECT '
'--to-port 8888']
self.assertEqual(expected, self.commands)
finally:
traffic_control._GetFilterHandleId = _old_GetFilterHandleId
def testTearDown(self):
config = {'interface': 'fakeeth'}
traffic_control.TearDown(config)
expected = [
'sudo tc qdisc del dev fakeeth root',
'sudo iptables -t nat -F'
]
self.assertEqual(expected, self.commands)
def testGetFilterHandleID(self):
# Check seach for handle ID command.
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, 'fakeeth', 1)
self.assertEquals(self.commands, ['sudo tc filter list dev fakeeth parent '
'1:'])
# Check with handle ID available.
traffic_control._Exec = (lambda command, msg:
'filter parent 1: protocol ip pref 1 u32 fh fc00:e968:6179::de52:7100 order 2048 key ht '
'800 bkt 0 flowid 1:1\nmatch 08ae0000/ffff0000 at 20')
output = traffic_control._GetFilterHandleId('fakeeth', 1)
self.assertEqual(output, 'fc00:e968:6179::de52:7100')
# Check with handle ID not available.
traffic_control._Exec = (lambda command, msg:
'filter parent 1: protocol ip pref 1 u32 fh fc00:e968:6179::de52:7100 order 2048 key ht '
'800 bkt 0 flowid 1:11\nmatch 08ae0000/ffff0000 at 20')
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, 'fakeeth', 1)
traffic_control._Exec = lambda command, msg: 'NO ID IN HERE'
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, 'fakeeth', 1)
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
Base para desarrollo de modulos externos.
Para obtener el modulo/Funcion que se esta llamando:
GetParams("module")
Para obtener las variables enviadas desde formulario/comando Rocketbot:
var = GetParams(variable)
Las "variable" se define en forms del archivo package.json
Para modificar la variable de Rocketbot:
SetVar(Variable_Rocketbot, "dato")
Para obtener una variable de Rocketbot:
var = GetVar(Variable_Rocketbot)
Para obtener la Opcion seleccionada:
opcion = GetParams("option")
Para instalar librerias se debe ingresar por terminal a la carpeta "libs"
pip install <package> -t .
"""
import os
import sys
base_path = tmp_global_obj["basepath"]
cur_path = base_path + 'modules' + os.sep + 'MicrosoftWord' + os.sep + 'libs' + os.sep
sys.path.append(cur_path)
# Import local libraries
import win32com.client
module = GetParams("module")
global word_document
global ms_word
def alignments(WdParagraphAlignment):
return ["Left", "Center", "Rigth", "Justify"][WdParagraphAlignment]
WdBuiltinStyle = {
"paragraph": -1,
"heading1": -2,
"heading2": -3,
"heading3": -4,
"heading4": -5,
"heading5": -6,
"heading6": -7,
"heading7": -8,
"heading8": -9,
"heading9": -10,
"caption": -35,
"bullet1": -49,
"number1": -50,
"bullet2": -55,
"bullet3": -56,
"bullet4": -57,
"bullet5": -58,
"number2": -59,
"number3": -60,
"number4": -61,
"number5": -62,
"title": -63,
"subtitle": -75,
"quote": -181,
"intense_quote": -182,
"book": -265
}
if module == "new":
try:
ms_word = win32com.client.DispatchEx("Word.Application")
word_document = ms_word.Documents.Add()
ms_word.Visible = True
print("test")
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "open":
path = GetParams("path")
try:
path = path.replace("/", os.sep)
ms_word = win32com.client.DispatchEx("Word.Application")
word_document = ms_word.Documents.Open(path)
ms_word.Visible = True
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "read":
# Rocketbot params
result = GetParams("result")
details = GetParams("details")
try:
text = []
paragraphs = word_document.Paragraphs
for paragraph in paragraphs:
range_ = paragraph.Range
font = range_.Font
if details:
text.append({
"text": range_.Text,
"style": str(paragraph.Style),
"alignment": alignments(paragraph.Alignment),
"font": {
"name": font.Name,
"size": int(font.Size),
"bold": bool(font.Bold),
"italic": bool(font.Italic),
"underline": bool(font.Underline)
}
})
else:
text.append("" + range_.Text)
if result:
SetVar(result, text)
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "readTable":
try:
result = GetParams("result")
tablesDoc = []
for table in word_document.tables:
table_ = []
for row in table.rows:
array_row = []
for cell in row.cells:
"""if len(array_row) > 0:
if array_row[-1] != cell.text:
array_row.append(cell.text)
else:
print("test")"""
information = cell.range.text
information.replace("\r", "").replace("\x07", "")
array_row.append(information)
table_.append(array_row)
tablesDoc.append(table_)
if result:
SetVar(result, tablesDoc)
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "addTextBookmark":
import copy
bookmark_searched = GetParams("bookmark")
text = GetParams("text")
#clean = GetParams("Clean")
#print(clean)
try:
"""tmp_doc = Document()
# Generate content in tmp_doc document
tmp_doc.add_paragraph(text)
# Reference the tmp_doc XML content
tmp_doc_body = tmp_doc._element.body
ele = document._element[0]
bookmarks_list = ele.findall('.//' + qn('w:bookmarkStart'))
for bookmark in bookmarks_list:
# print(bookmark)
name = bookmark.get(qn('w:name'))
if name == bookmark_searched:
par = bookmark.getparent()
if clean:
next_element = bookmark.getnext()
if not isinstance(next_element, docx.oxml.CT_R):
next_element = next_element.getnext()
t = next_element.findall('.//' + qn('w:t'))
if len(t) == 1:
t[0].text = text
elif isinstance(par, docx.oxml.CT_P):
bookmark_par_parent = par.getparent()
index = bookmark_par_parent.index(par)
for child in tmp_doc_body:
bookmark_par_parent.insert(index, child)
index = index + 1
break
else:
name = None
if not name:
raise Exception("Bookmark not found")"""
if word_document.Bookmarks.Exists(bookmark_searched):
range = word_document.Bookmarks(bookmark_searched).Range
range.Text = text
#word_document.Bookmarks.Add(bookmark_searched)
else:
pass
except Exception as e:
PrintException()
raise e
if module == "save":
path = GetParams("path")
try:
if path:
word_document.SaveAs2(path)
else:
word_document.SaveAs2()
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "to_pdf":
path = GetParams("from")
to = GetParams("to")
wdFormatPDF = 17
try:
if path:
ms_word = win32com.client.DispatchEx("Word.Application")
word_document = ms_word.Documents.Open(path)
word_document.ExportAsFixedFormat(OutputFileName=to, ExportFormat=wdFormatPDF, IncludeDocProps=True)
if path:
word_document.Close()
ms_word.Quit()
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "write":
text = GetParams("text")
type_ = GetParams("type")
level = GetParams("level")
align = GetParams("align")
size = GetParams("size")
bold = GetParams("bold")
italic = GetParams("italic")
underline = GetParams("underline")
try:
word_document.Paragraphs.Add()
paragraph = word_document.Paragraphs.Last
range_ = paragraph.Range
range_.Text = text
font = paragraph.Range.Font
size = float(size) if size else 12
font.Size = size
if bold == "True":
boldInt = -1
else:
boldInt = 0
font.Bold = boldInt
if italic == "True":
italicInt = -1
else:
italicInt = 0
font.Italic = italicInt
if underline == "True":
underlineInt = -1
else:
underlineInt = 0
font.Underline = underlineInt
paragraph.Alignment = int(align) if align else 0
style = type_ + level if level is not None else ""
if style in WdBuiltinStyle:
paragraph.Style = WdBuiltinStyle[style]
elif (type_ == "number" or type_ == "bullet") and int(level) > 5:
level = 5
style = type_ + str(level)
paragraph.Style = WdBuiltinStyle[style]
else:
style = type_
paragraph.Style = WdBuiltinStyle[style]
except Exception as e:
PrintException()
raise e
if module == "close":
try:
word_document.Close()
ms_word.Quit()
word_document = None
ms_word = None
except Exception as e:
PrintException()
raise e
if module == "new_page":
try:
word_document.Paragraphs.Add()
paragraph = word_document.Paragraphs.Last
paragraph.Range.InsertBreak()
except Exception as e:
PrintException()
raise e
if module == "add_pic":
img_path = GetParams("img_path")
try:
# Only work with \
img_path = img_path.replace("/", os.sep)
count = word_document.Paragraphs.Count # Count number paragraphs
if count > 1:
word_document.Paragraphs.Add()
paragraph = word_document.Paragraphs.Last
img = paragraph.Range.InlineShapes.AddPicture(FileName=img_path, LinkToFile=False, SaveWithDocument=True)
print(img)
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "count_paragraphs":
number = GetParams("variable")
try:
count = word_document.Paragraphs.count
SetVar(number, count)
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
if module == "search_replace_text":
text_search = GetParams("text_search")
text_replace = GetParams("text_replace")
numParagraphs = GetParams("numParagraphs")
if text_search == text_replace:
pass
else:
if numParagraphs:
paragraphList = [int(s) for s in numParagraphs.split(',')]
for i in paragraphList:
paragraph = word_document.Paragraphs(i)
range_ = paragraph.Range
if text_search in range_.Text:
range_.Text = range_.Text.replace(text_search, text_replace)
else:
paragraphs = word_document.Paragraphs
#fullRange = word_document.content
for paragraph in paragraphs:
range_ = paragraph.Range
print(range_.Find.Text)
range_.Find.Text = text_search
range_.Find.Replacement.Text = text_replace
range_.Find.Execute(Replace=2,Forward=True,MatchWholeWord=True)
#print(range_.Find.Execute(FindText=text_search, ReplaceWith="text_replace", Replace=2))
#if text_search in range_.Text:
#range_.Text = range_.Text.replace(text_search,text_replace)
if module == "search_text":
try:
text_search = GetParams("text_search")
whichParagraph = GetParams("variable")
paragraphList = []
count = 1
for paragraph in word_document.Paragraphs:
range_ = paragraph.Range
range_.Find.Text = text_search
if range_.Find.Execute(Forward=True, MatchWholeWord=True):
paragraphList.append(count)
count += 1
SetVar(whichParagraph, paragraphList)
print(paragraphList)
except Exception as e:
print("\x1B[" + "31;40mError\u2193\x1B[" + "0m")
PrintException()
raise e
|
<reponame>Thomasjkeel/Exploratory-Data-Analysis-GUI<gh_stars>0
## useful references:
# http://www.science.smith.edu/dftwiki/index.php/PyQt5_Tutorial:_A_Window_Application_with_File_IO
# https://blog.aaronhktan.com/posts/2018/05/14/pyqt5-pyinstaller-executable
# https://realpython.com/pyinstaller-python/
import os
import sys
from pathlib import Path
import time
# for developing the PyQt5 app
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox, QFileDialog, QVBoxLayout, QLabel, QComboBox, QProgressBar, QCheckBox
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtCore import pyqtSlot, QThread, pyqtSignal, Qt
# for the Exploratory Data Analysis
import pandas
import processes # location with the apps processes
TIME_LIMIT = 100
def resource_path(relative_path):
# Translate asset paths to useable format for PyInstaller
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
class App(QMainWindow):
def __init__(self):
# testing run == 'will automatically fill the inputs'
super().__init__()
self.title = 'Simple Exploratory Data Analysis GUI'
self.left = 10
self.top = 100
self.width = 700
self.height = 200
self.save_the_file = False
try:
assert sys.argv[1]
# will automatically fill in the inputs for testing
self.testing_run = sys.argv[1]
except:
self.testing_run = ""
self.initUI()
def loadFile(self):
"""
* Check the file exists and is a csv
* Maybe use: os.path.normcase
"""
fileName = self.textbox.text()
print(fileName[-4:])
# replace backslash with spaces
fileName = fileName.replace("\\ ", " ")
try:
assert fileName[-4:] == '.csv'
except Exception as e:
QMessageBox.question(self, 'Unknown File',
"Please enter a file that exists and ends in \'.csv\'", QMessageBox.Ok, QMessageBox.Ok)
return False
# sys.exit()
try:
assert len(self.output_box.text()) > 0
except:
QMessageBox.question(self, 'Error',
"Please enter a path for the output folder", QMessageBox.Ok, QMessageBox.Ok)
return False
try:
# set timeout
self.data = pandas.read_csv(fileName) # ,parse_dates=['DocDate']
QMessageBox.question(self, 'Please Wait', "Starting \'%s\', please wait" % (self.comboBox.currentText()), QMessageBox.Ok, QMessageBox.Ok)
print(self.data.head())
except Exception as e:
QMessageBox.question(self, 'Unknown File', "File doesn't exist", QMessageBox.Ok, QMessageBox.Ok)
return False
return True
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.inputLabel = QLabel(self)
self.outputLabel = QLabel(self)
# Create textbox
self.textbox = QLineEdit(self)
self.textbox.move(120, 20)
self.textbox.resize(200, 20)
self.textbox.setReadOnly(True)
self.inputLabel.setText("Input file path:")
self.inputLabel.move(10, 15)
self.output_box = QLineEdit(self)
self.output_box.move(120, 80)
self.output_box.resize(200, 20)
self.output_box.setReadOnly(True)
self.outputLabel.setText("Output folder:")
self.outputLabel.move(10, 75)
if self.testing_run == 'test': # TODO: remove for full version
self.textbox.setText("test.csv")
self.output_box.setText("test_output_folder")
# Create a button in the window
self.button = QPushButton('Start Process', self)
self.button.resize(130, 35)
self.button.move(360, 17)
# self.button.clicked.connect(self.processProgress)
# FILE EXPLORER button
self.fileExplore = QPushButton('...', self)
self.fileExplore.resize(35, 30)
self.fileExplore.move(325, 20)
self.outputDialog = QPushButton('...', self)
self.outputDialog.resize(35, 30)
self.outputDialog.move(325, 80)
# create a dropdown box for functionality:
self.comboBox = QComboBox(self)
self.comboBox.addItem(" ")
self.comboBox.addItem("Process 1")
self.comboBox.addItem("Process 2")
self.comboBox.addItem("Process 3")
self.comboBox.addItem("Process 4")
self.comboBox.resize(150, 30)
self.comboBox.move(510, 17)
# create a progress bar
self.progress = QProgressBar(self)
self.progress.setGeometry(200, 150, 250, 20)
self.progress.setMaximum(100)
# create checkbox for opening file after complete
self.open_in_web = QCheckBox("Open file?", self)
self.open_in_web.stateChanged.connect(self.clickBox)
self.open_in_web.move(520, 70)
self.open_in_web.resize(300, 40)
# create checkbox for multiple plots or not
self.multiple = QCheckBox("Allow interactivity?", self)
self.multiple.stateChanged.connect(self.clickBox)
self.multiple.move(370, 70)
self.multiple.resize(150, 40)
# connect button to function on_click
self.button.clicked.connect(self.on_click)
self.fileExplore.clicked.connect(self.chooseFile)
self.outputDialog.clicked.connect(self.openDirectoryDialog)
self.show()
def clickBox(self, state):
if state == Qt.Checked:
pass
else:
pass
@pyqtSlot()
def chooseFile(self):
# TODO: move to seperate func
self.textbox.setText(self.openFileNameDialog())
@pyqtSlot()
def on_click(self):
# make sure file is not going to be saved by app unless specified
self.save_the_file = False
# textboxValue = self.textbox.text()
## check that the output box exists
# load file
print("TO REMOVE: Process ==", self.comboBox.currentText())
if self.comboBox.currentText() != " ":
self.calc = External() # set up progress bar
self.calc.start()
else:
QMessageBox.question(self, 'Error!',"Please select a process", QMessageBox.Ok, QMessageBox.Ok)
return
try:
assert self.loadFile()
except:
print("Unable to load file")
return
## Specific Processes
if self.comboBox.currentText() == "Process 1":
self.save_the_file = True
try:
self.calc.countChanged.connect(self.onCountChanged)
self.new_data = processes.testAnalysis(self.data)
except Exception as e:
QMessageBox.information(self, 'Operation Failed',"Operation Failed!", QMessageBox.Ok, QMessageBox.Ok)
self.new_data = pandas.DataFrame([])
return
elif self.comboBox.currentText() == "Process 1":
self.save_the_file = True
try:
self.calc.countChanged.connect(self.onCountChanged)
self.new_data = processes.testAnalysis(self.data)
except Exception as e:
QMessageBox.information(
self, 'Operation Failed', "Operation Failed!", QMessageBox.Ok, QMessageBox.Ok)
self.new_data = pandas.DataFrame([])
return
elif self.comboBox.currentText() == "Process 1":
self.save_the_file = True
try:
self.calc.countChanged.connect(self.onCountChanged)
self.new_data = processes.testAnalysis(self.data)
except Exception as e:
QMessageBox.information(
self, 'Operation Failed', "Operation Failed!", QMessageBox.Ok, QMessageBox.Ok)
self.new_data = pandas.DataFrame([])
return
elif self.comboBox.currentText() == "Process 4":
self.save_the_file = True
try:
self.calc.countChanged.connect(self.onCountChanged)
self.new_data = processes.testAnalysis(self.data)
except Exception as e:
QMessageBox.information(
self, 'Operation Failed', "Operation Failed!", QMessageBox.Ok, QMessageBox.Ok)
self.new_data = pandas.DataFrame([])
return
if self.save_the_file:
self.saveFile()
def onCountChanged(self, value):
self.progress.setValue(value)
def openDirectoryDialog(self):
directory_chosen = str(
QFileDialog.getExistingDirectory(self, "Select Directory"))
self.output_box.setText(directory_chosen)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(
self, "QFileDialog.getOpenFileName()", "", "All Files (*);;CSV files (*.csv)", options=options)
if fileName:
return fileName
def saveFile(self):
"""
* To save the csv file
"""
print("TO REMOVE: SAVING FILE...")
output_dir = Path(self.output_box.text())
try:
self.new_data.to_csv(str(output_dir / 'output.csv'))
QMessageBox.question(self, 'Operation complete – ExDag',
"Operation Complete!", QMessageBox.Ok, QMessageBox.Ok)
except Exception as e:
QMessageBox.question(self, 'Incorrect output – ExDag',
"Unable to create new file", QMessageBox.Ok, QMessageBox.Ok)
return
class External(QThread):
"""
Runs a counter thread.
"""
countChanged = pyqtSignal(int)
# fix multiple progress bars
def run(self):
count = 0
while count < TIME_LIMIT:
count += 1
time.sleep(.1)
self.countChanged.emit(count)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
<gh_stars>10-100
#!/usr/bin/python3
r'''Tests special-case projection functions
Simple things like project_lonlat(), project_stereographic(), etc.
I do 3 things:
Here I make sure the projection functions return the correct values. This is a
regression test, so the "right" values were recorded at some point, and any
deviation is flagged.
I make sure that project(unproject(x)) == x
I run a gradient check. I do these for the simple project_...()
function AND the generic project() function.
'''
import sys
import numpy as np
import numpysane as nps
import os
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
from test_calibration_helpers import grad
if len(sys.argv) != 2:
raise Exception("Need one argument on the commandline: the projection type. Currently I support 'pinhole','latlon','lonlat','stereographic'")
if sys.argv[1] == 'pinhole' or \
sys.argv[1] == 'latlon' or \
sys.argv[1] == 'lonlat':
# pixels/rad
fx,fy = 3000., 2000.
# pixel where latlon = (0,0) projects to. May be negative
cx,cy = (-10000., 4000.)
# a few points, some wide, some not. Some behind the camera
p = np.array(((1.0, 2.0, 10.0),
(-1.1, 0.3, -1.0),
(-0.9, -1.5, -1.0)))
if sys.argv[1] == 'pinhole': unproject_is_normalized = False
else: unproject_is_normalized = True
if sys.argv[1] == 'pinhole':
# pinhole projects ahead only
p[:,2] = abs(p[:,2])
if sys.argv[1] == 'pinhole':
lensmodel = 'LENSMODEL_PINHOLE'
func_project = mrcal.project_pinhole
func_unproject = mrcal.unproject_pinhole
name = 'pinhole'
q_projected_ref = np.array([[ -9700., 4400.],
[ -13300., 4600.],
[ -12700., 1000.]])
elif sys.argv[1] == 'lonlat':
lensmodel = 'LENSMODEL_LONLAT'
func_project = mrcal.project_lonlat
func_unproject = mrcal.unproject_lonlat
name = 'lonlat'
q_projected_ref = np.array([[ -9700.99404253, 4392.88198287],
[-16925.83416075, 4398.25498944],
[-17226.33265541, 2320.61601685]])
elif sys.argv[1] == 'latlon':
lensmodel = 'LENSMODEL_LATLON'
func_project = mrcal.project_latlon
func_unproject = mrcal.unproject_latlon
name = 'latlon'
q_projected_ref = np.array([[ -9706.7632608 , 4394.7911197 ],
[-12434.4909092 , 9700.27171822],
[-11389.09468198, -317.59786068]])
elif sys.argv[1] == 'stereographic':
lensmodel = 'LENSMODEL_STEREOGRAPHIC'
func_project = mrcal.project_stereographic
func_unproject = mrcal.unproject_stereographic
name = 'stereographic'
fx,fy,cx,cy = 1512., 1112, 500., 333.
# a few points, some wide, some not. Some behind the camera
p = np.array(((1.0, 2.0, 10.0),
(-1.1, 0.3, -1.0),
(-0.9, -1.5, -1.0)))
q_projected_ref = np.array([[ 649.35582325, 552.6874014 ],
[-5939.33490417, 1624.58376866],
[-2181.52681292, -2953.8803086 ]])
unproject_is_normalized = False
else:
raise Exception("Unknown projection type. Currently I support 'lonlat','stereographic'")
intrinsics = (lensmodel, np.array((fx,fy,cx,cy)))
q_projected = func_project(p, intrinsics[1])
testutils.confirm_equal(q_projected,
q_projected_ref,
msg = f"project_{name}()",
worstcase = True,
relative = True)
testutils.confirm_equal(mrcal.project(p, *intrinsics),
q_projected,
msg = f"project({name}) returns the same as project_{name}()",
worstcase = True,
relative = True)
v_unprojected = func_unproject(q_projected, intrinsics[1])
if unproject_is_normalized:
testutils.confirm_equal( nps.mag(v_unprojected),
1.,
msg = f"unproject_{name}() returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( v_unprojected,
p / nps.dummy(nps.mag(p), axis=-1),
msg = f"unproject_{name}()",
worstcase = True,
relative = True)
else:
cos = nps.inner(v_unprojected, p) / (nps.mag(p)*nps.mag(v_unprojected))
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p.shape[0],), dtype=float),
msg = f"unproject_{name}()",
worstcase = True)
# Not normalized by default. Make sure that if I ask for it to be
# normalized, that it is
testutils.confirm_equal( nps.mag( mrcal.unproject(q_projected, *intrinsics, normalize = True) ),
1.,
msg = f"unproject({name},normalize = True) returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( nps.mag( mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True)[0] ),
1.,
msg = f"unproject({name},normalize = True, get_gradients=True) returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( mrcal.unproject(q_projected, *intrinsics),
v_unprojected,
msg = f"unproject({name}) returns the same as unproject_{name}()",
worstcase = True,
relative = True)
testutils.confirm_equal( mrcal.project(mrcal.unproject(q_projected, *intrinsics),*intrinsics),
q_projected,
msg = f"project(unproject()) is an identity",
worstcase = True,
relative = True)
testutils.confirm_equal( func_project(func_unproject(q_projected,intrinsics[1]),intrinsics[1]),
q_projected,
msg = f"project_{name}(unproject_{name}()) is an identity",
worstcase = True,
relative = True)
# Now gradients for project()
ipt = 1
_,dq_dp_reported = func_project(p[ipt], intrinsics[1], get_gradients=True)
dq_dp_observed = grad(lambda p: func_project(p, intrinsics[1]),
p[ipt])
testutils.confirm_equal(dq_dp_reported,
dq_dp_observed,
msg = f"project_{name}() dq/dp",
worstcase = True,
relative = True)
_,dq_dp_reported,dq_di_reported = mrcal.project(p[ipt], *intrinsics, get_gradients=True)
dq_dp_observed = grad(lambda p: mrcal.project(p, *intrinsics),
p[ipt])
dq_di_observed = grad(lambda intrinsics_data: mrcal.project(p[ipt], intrinsics[0],intrinsics_data),
intrinsics[1])
testutils.confirm_equal(dq_dp_reported,
dq_dp_observed,
msg = f"project({name}) dq/dp",
worstcase = True,
relative = True)
testutils.confirm_equal(dq_di_reported,
dq_di_observed,
msg = f"project({name}) dq/di",
worstcase = True,
relative = True,
eps = 1e-5)
# Now gradients for unproject()
ipt = 1
_,dv_dq_reported = func_unproject(q_projected[ipt], intrinsics[1], get_gradients=True)
dv_dq_observed = grad(lambda q: func_unproject(q, intrinsics[1]),
q_projected[ipt])
testutils.confirm_equal(dv_dq_reported,
dv_dq_observed,
msg = f"unproject_{name}() dv/dq",
worstcase = True,
relative = True,
eps = 2e-6)
for normalize in (False, True):
v_unprojected,dv_dq_reported,dv_di_reported = \
mrcal.unproject(q_projected[ipt], *intrinsics,
get_gradients = True,
normalize = normalize)
dv_dq_observed = grad(lambda q: mrcal.unproject(q, *intrinsics, normalize=normalize),
q_projected[ipt])
dv_di_observed = grad(lambda intrinsics_data: mrcal.unproject(q_projected[ipt], intrinsics[0],intrinsics_data, normalize=normalize),
intrinsics[1])
testutils.confirm_equal(dv_dq_reported,
dv_dq_observed,
msg = f"unproject({name}, normalize={normalize}) dv/dq",
worstcase = True,
relative = True,
eps = 1e-5)
testutils.confirm_equal(dv_di_reported,
dv_di_observed,
msg = f"unproject({name}, normalize={normalize}) dv/di",
worstcase = True,
relative = True,
eps = 1e-5)
v_unprojected_inplace = v_unprojected.copy() *0
dv_dq_reported_inplace = dv_dq_reported.copy()*0
dv_di_reported_inplace = dv_di_reported.copy()*0
mrcal.unproject(q_projected[ipt], *intrinsics, get_gradients=True, normalize=normalize,
out = [v_unprojected_inplace,dv_dq_reported_inplace,dv_di_reported_inplace])
testutils.confirm_equal(v_unprojected_inplace,
v_unprojected,
msg = f"unproject({name}, normalize={normalize}) works in-place: v_unprojected",
worstcase = True,
relative = True)
testutils.confirm_equal(dv_dq_reported_inplace,
dv_dq_reported,
msg = f"unproject({name}, normalize={normalize}) works in-place: dv_dq",
worstcase = True,
relative = True)
testutils.confirm_equal(dv_di_reported_inplace,
dv_di_reported,
msg = f"unproject({name}, normalize={normalize}) works in-place: dv_di",
worstcase = True,
relative = True)
testutils.finish()
|
<reponame>tradenity/python-sdk<filename>tradenity/resources/shopping_cart.py
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: <EMAIL>
"""
from __future__ import absolute_import
import re
import pprint
# python 2 and python 3 compatibility library
import six
from tradenity.api_client import ApiClient
class ShoppingCart(object):
swagger_types = {
'id': 'str',
'meta': 'InstanceMeta',
'items': 'list[LineItem]',
'subtotal': 'int',
'total': 'int',
'shipping_cost': 'int',
'items_tax_amount': 'int',
'total_items_discount': 'int',
'promotions': 'list[Promotion]'
}
attribute_map = {
'id': 'id',
'meta': '__meta',
'items': 'items',
'subtotal': 'subtotal',
'total': 'total',
'shipping_cost': 'shippingCost',
'items_tax_amount': 'itemsTaxAmount',
'total_items_discount': 'totalItemsDiscount',
'promotions': 'promotions'
}
api_client = None
def __init__(self, id=None, meta=None, items=None, subtotal=None, total=None, shipping_cost=None, items_tax_amount=None, total_items_discount=None, promotions=None):
"""ShoppingCart - a model defined in Swagger"""
self._id = id
self._meta = None
self._items = None
self._subtotal = None
self._total = None
self._shipping_cost = None
self._items_tax_amount = None
self._total_items_discount = None
self._promotions = None
self.discriminator = None
if meta is not None:
self.meta = meta
if items is not None:
self.items = items
self.subtotal = subtotal
self.total = total
if shipping_cost is not None:
self.shipping_cost = shipping_cost
if items_tax_amount is not None:
self.items_tax_amount = items_tax_amount
if total_items_discount is not None:
self.total_items_discount = total_items_discount
if promotions is not None:
self.promotions = promotions
@property
def id(self):
if self._id:
return self._id
elif self.meta is None:
return None
else:
self._id = self.meta.href.split("/")[-1]
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def meta(self):
"""Gets the meta of this ShoppingCart.
:return: The meta of this ShoppingCart.
:rtype: InstanceMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this ShoppingCart.
:param meta: The meta of this ShoppingCart.
:type: InstanceMeta
"""
self._meta = meta
@property
def items(self):
"""Gets the items of this ShoppingCart.
:return: The items of this ShoppingCart.
:rtype: list[LineItem]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ShoppingCart.
:param items: The items of this ShoppingCart.
:type: list[LineItem]
"""
self._items = items
@property
def subtotal(self):
"""Gets the subtotal of this ShoppingCart.
:return: The subtotal of this ShoppingCart.
:rtype: int
"""
return self._subtotal
@subtotal.setter
def subtotal(self, subtotal):
"""Sets the subtotal of this ShoppingCart.
:param subtotal: The subtotal of this ShoppingCart.
:type: int
"""
self._subtotal = subtotal
@property
def total(self):
"""Gets the total of this ShoppingCart.
:return: The total of this ShoppingCart.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ShoppingCart.
:param total: The total of this ShoppingCart.
:type: int
"""
self._total = total
@property
def shipping_cost(self):
"""Gets the shipping_cost of this ShoppingCart.
:return: The shipping_cost of this ShoppingCart.
:rtype: int
"""
return self._shipping_cost
@shipping_cost.setter
def shipping_cost(self, shipping_cost):
"""Sets the shipping_cost of this ShoppingCart.
:param shipping_cost: The shipping_cost of this ShoppingCart.
:type: int
"""
self._shipping_cost = shipping_cost
@property
def items_tax_amount(self):
"""Gets the items_tax_amount of this ShoppingCart.
:return: The items_tax_amount of this ShoppingCart.
:rtype: int
"""
return self._items_tax_amount
@items_tax_amount.setter
def items_tax_amount(self, items_tax_amount):
"""Sets the items_tax_amount of this ShoppingCart.
:param items_tax_amount: The items_tax_amount of this ShoppingCart.
:type: int
"""
self._items_tax_amount = items_tax_amount
@property
def total_items_discount(self):
"""Gets the total_items_discount of this ShoppingCart.
:return: The total_items_discount of this ShoppingCart.
:rtype: int
"""
return self._total_items_discount
@total_items_discount.setter
def total_items_discount(self, total_items_discount):
"""Sets the total_items_discount of this ShoppingCart.
:param total_items_discount: The total_items_discount of this ShoppingCart.
:type: int
"""
self._total_items_discount = total_items_discount
@property
def promotions(self):
"""Gets the promotions of this ShoppingCart.
:return: The promotions of this ShoppingCart.
:rtype: list[Promotion]
"""
return self._promotions
@promotions.setter
def promotions(self, promotions):
"""Sets the promotions of this ShoppingCart.
:param promotions: The promotions of this ShoppingCart.
:type: list[Promotion]
"""
self._promotions = promotions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ShoppingCart, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShoppingCart):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
@classmethod
def get_api_client(cls):
if cls.api_client is None:
cls.api_client = ApiClient.instance()
return cls.api_client
@classmethod
def add_item(cls, item, **kwargs):
"""Add item.
Add new item to the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_item(item, async=True)
>>> result = thread.get()
:param async bool
:param LineItem item: Line item to add to cart (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._add_item_with_http_info(item, **kwargs)
else:
(data) = cls._add_item_with_http_info(item, **kwargs)
return data
@classmethod
def _add_item_with_http_info(cls, item, **kwargs):
"""Add item.
Add new item to the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_item_with_http_info(item, async=True)
>>> result = thread.get()
:param async bool
:param LineItem item: Line item to add to cart (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'item' is set
if ('item' not in params or
params['item'] is None):
raise ValueError("Missing the required parameter `item` when calling `add_item`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'item' in params:
body_params = params['item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShoppingCart',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def checkout(cls, order, **kwargs):
"""Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._checkout_with_http_info(order, **kwargs)
else:
(data) = cls._checkout_with_http_info(order, **kwargs)
return data
@classmethod
def _checkout_with_http_info(cls, order, **kwargs):
"""Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout_with_http_info(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'order' is set
if ('order' not in params or
params['order'] is None):
raise ValueError("Missing the required parameter `order` when calling `checkout`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'order' in params:
body_params = params['order']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts/checkout', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def delete_item(cls, item_id, **kwargs):
"""Remove item.
Remove item from shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_item(item_id, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to delete. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_item_with_http_info(item_id, **kwargs)
else:
(data) = cls._delete_item_with_http_info(item_id, **kwargs)
return data
@classmethod
def _delete_item_with_http_info(cls, item_id, **kwargs):
"""Remove item.
Remove item from shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_item_with_http_info(item_id, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to delete. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['item_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'item_id' is set
if ('item_id' not in params or
params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `delete_item`")
collection_formats = {}
path_params = {}
if 'item_id' in params:
path_params['itemId'] = params['item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts/{itemId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShoppingCart',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def empty(cls, **kwargs):
"""Empty cart.
Empty the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.empty(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._empty_with_http_info(**kwargs)
else:
(data) = cls._empty_with_http_info(**kwargs)
return data
@classmethod
def _empty_with_http_info(cls, **kwargs):
"""Empty cart.
Empty the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.empty_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShoppingCart',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get(cls, **kwargs):
"""Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_with_http_info(**kwargs)
else:
(data) = cls._get_with_http_info(**kwargs)
return data
@classmethod
def _get_with_http_info(cls, **kwargs):
"""Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShoppingCart',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_item(cls, item_id, item, **kwargs):
"""Update cart.
Update cart item.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_item(item_id, item, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to update. (required)
:param LineItem item: Line item to update. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_item_with_http_info(item_id, item, **kwargs)
else:
(data) = cls._update_item_with_http_info(item_id, item, **kwargs)
return data
@classmethod
def _update_item_with_http_info(cls, item_id, item, **kwargs):
"""Update cart.
Update cart item.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_item_with_http_info(item_id, item, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to update. (required)
:param LineItem item: Line item to update. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['item_id', 'item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'item_id' is set
if ('item_id' not in params or
params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `update_item`")
# verify the required parameter 'item' is set
if ('item' not in params or
params['item'] is None):
raise ValueError("Missing the required parameter `item` when calling `update_item`")
collection_formats = {}
path_params = {}
if 'item_id' in params:
path_params['itemId'] = params['item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'item' in params:
body_params = params['item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/shoppingCarts/{itemId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShoppingCart',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
<gh_stars>0
import os
import sys
import time
import json
import socket
import base64
import struct
import logging
import platform
import subprocess
import threading
from enum import Enum
from types import SimpleNamespace
# ==============================================================================
# DEFINITIONS
# ==============================================================================
#
# definition of unit
#
_1KB_ = 1024
_1MB_ = _1KB_*1024
#
# definition for pyrc
#
_WAIT_TIMEOUT_ = 60
_HEADER_SIZE_ = 16
_CHUNK_SIZE_ = _1KB_*512
_BUFFER_SIZE_ = _1MB_*2
#
# Signature of headers
#
_SIGNATURE_ECHO___ = b'$SiGEcH$'
_SIGNATURE_UPLOAD_ = b'$SiGUpL$'
_SIGNATURE_DOWNLO_ = b'$SiGDoW$'
_SIGNATURE_EXECUT_ = b'$SiGExE$'
_SIGNATURE_LIST___ = b'$SiGLiS$'
_SIGNATURE_TEXT___ = b'$SiGTex$'
class CONFIG():
def _try(self, o):
try:
return o.__dict__
except Exception:
return str(o).replace('\n', '')
def toTEXT(self):
return json.dumps(self, default=lambda o: self._try(o)).strip()
def toJSON(self):
return json.loads(self.toTEXT())
def toCLASS(self, text=None):
if not text:
text = self.toTEXT()
return json.loads(text, object_hook=lambda d: SimpleNamespace(**d))
#
# Exception definitions
#
class rcresult(CONFIG):
def __init__(self, errcode: int = 0, errmsg: str = ''):
self.errcode = errcode
self.text = errmsg
self.data = b''
# General error
error_unknown = rcresult(1, 'Unknown error')
error_file_already_exist = rcresult(2, 'File already exist')
error_file_not_found = rcresult(3, 'File not found')
error_path_not_exist = rcresult(4, 'Path is not exist')
error_not_a_file = rcresult(5, 'The specific path is not a file')
error_not_a_folder = rcresult(6, 'The specific path is not a folder')
error_file_not_identical = rcresult(7, 'File length is not identical')
error_exception = rcresult(9, 'An exception rised')
# Streaming
error_wait_timeout_streaming = rcresult(50, 'Wait streaming timeout')
error_wait_timeout_done = rcresult(51, 'Wait done timeout')
# Process
error_exception_proc_wait_timeout = rcresult(60, 'Wait timeout a process')
#
# Enumeration definitions
#
class action_name(Enum):
unknown = 0
upload = 1
download = 2
list = 3
execute = 4
message = 5
echo = 99
class action_kind(Enum):
unknown = 0
ask = 1
data = 2
done = 3
class execute_subcmd(Enum):
unknown = 0
start = 1
query = 2
kill = 3
class proc_status(Enum):
unknown = 0
unstart = 1
running = 2
killing = 3
killed = 4
terminated = 5
exception = 6
# ==============================================================================
# CLASSES
# ==============================================================================
#
# Inner Commands
#
class inncmd_sysinfo(CONFIG):
def __init__(self, osname: str = 'unknown', homedir: str = ''):
self.osname = osname
self.homedir = homedir
class inncmd_mkdir(CONFIG):
def __init__(self, path: str, result: bool = False):
self.path = path
self.result = result
#
# Basic classes
#
class execmdarg(CONFIG):
def __init__(self,
program: bytes,
argument: bytes = '',
workdir: bytes = '.',
isbase64: bool = False):
self.program = program
self.argument = argument
self.workdir = workdir
self.isbase64 = isbase64
class execresult(CONFIG):
def __init__(self):
self.errcode = 0
self.stdout = []
self.stderr = []
self.data = b''
class async_process():
def __init__(self, tag):
self.program = ''
self.argument = ''
self.workdir = '.'
self.status = proc_status.unknown
self.execrs = execresult()
self.tag: int = tag
self.thread = threading.Thread(target=self._thread_start)
self.thread.daemon = True
def _thread_start(self):
self.status = proc_status.running
try:
fullcmd = self.program
if '' != self.argument:
fullcmd += ' ' + self.argument
self.proc = subprocess.Popen(fullcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=self.workdir)
try:
_WAIT_TIMEOUT_10_MINS_ = 60 * 5
self.execrs.errcode = self.proc.wait(_WAIT_TIMEOUT_10_MINS_)
logfmt = 'fullcmd={} errcode={}'
logging.error(logfmt.format(fullcmd, self.execrs.errcode))
stdout_lines = [line.decode('utf-8', errors="ignore").rstrip() for line in self.proc.stdout.readlines()]
self.execrs.stdout.extend(stdout_lines)
stderr_lines = [line.decode('utf-8', errors="ignore").rstrip() for line in self.proc.stderr.readlines()]
self.execrs.stderr.extend(stderr_lines)
self.status = proc_status.terminated
except Exception as Err:
self.execrs.errcode = 100000
self.status = proc_status.exception
self.execrs.stderr.append(Err)
except subprocess.TimeoutExpired as Err:
logging.exception(Err)
self.status = proc_status.exception
self.execrs.errcode = error_exception_proc_wait_timeout.errcode
self.execrs.stderr.append(error_exception_proc_wait_timeout.text)
self.execrs.stderr.append(str(Err))
except Exception as Err:
logging.exception(Err)
self.status = proc_status.exception
self.execrs.errcode = error_exception.errcode
self.execrs.stderr.append(error_exception.text)
self.execrs.stderr.append(str(Err))
finally:
pass
def run(self, program: str,
argument: str = '',
workdir: str = '.'):
self.program = program
self.argument = argument
self.workdir = workdir
if self.status != proc_status.unknown:
return
if self.thread:
self.thread.start()
self.status = proc_status.running
def kill(self):
if self.proc:
self.proc.terminate()
self.status = proc_status.killed
self.thread = None
def get_status(self):
return self.status
def get_tag(self):
return self.tag
class header_echo():
def __init__(self, kind: action_kind = action_kind.unknown, data: bytes = b''):
self.data = b''
self._STRUCT_FORMAT_ = '8s' + 'iiiii' + 'iii' + 'i'
self.signature: bytes = _SIGNATURE_ECHO___
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.action_name: int = action_name.echo.value
self.action_kind: int = kind.value
self.chunk_size: int = len(data)
self.chunk_count: int = 0
self.chunk_index: int = 0
self.errcode: int = 0
self.payload: bytes = data
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = self.chunk_size
self.total_size = self.header_size + self.payload_size
rawdata = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.action_name,
self.action_kind,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode)
rawdata += self.payload
return rawdata
def unpack(self, data: bytes):
if len(data) <= _HEADER_SIZE_:
return None
hdr_size: int = int.from_bytes(data[8:11], 'little')
hdr_only: bytes = data[:hdr_size]
hdr = header_echo()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, hdr_only)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.action_name = unpack[4]
hdr.action_kind = unpack[5]
hdr.chunk_size = unpack[6]
hdr.chunk_count = unpack[7]
hdr.chunk_index = unpack[8]
hdr.errcode = unpack[9]
# Payload
hdr.payload = data[hdr_size:]
# Unpack data from payload
hdr.data = hdr.payload
return hdr
class header_upload():
def __init__(self, kind: action_kind = action_kind.unknown,
filename: str = '',
filesize: int = 0,
dstdirpath: str = '.',
data: bytes = b''):
if filename is None:
filename = ''
if dstdirpath is None:
dstdirpath = '.'
self._STRUCT_FORMAT_ = '8s' + 'iiiiii' + 'iii' + 'i' + 'ii'
# Unpack payload fields
self.filename = b''
self.dstdirpath = b''
self.data = b''
self.signature: bytes = _SIGNATURE_UPLOAD_
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.file_size: int = filesize
self.action_name: int = action_name.upload.value
self.action_kind: int = kind.value
self.chunk_size: int = len(data)
self.chunk_count: int = 0
self.chunk_index: int = 0
self.errcode: int = 0
self.length_filename: int = len(filename)
self.length_dirpath: int = len(dstdirpath)
self.payload: bytes = (filename.encode('utf-8') +
dstdirpath.encode('utf-8') +
data)
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = (self.length_filename +
self.length_dirpath +
self.chunk_size)
self.total_size = self.header_size + self.payload_size
rawdata = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.file_size,
self.action_name,
self.action_kind,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode,
self.length_filename,
self.length_dirpath)
rawdata += self.payload
return rawdata
def unpack(self, data: bytes):
if len(data) <= _HEADER_SIZE_:
return None
hdr_size: int = int.from_bytes(data[8:11], 'little')
hdr_only: bytes = data[:hdr_size]
hdr = header_upload()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, hdr_only)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.file_size = unpack[4]
hdr.action_name = unpack[5]
hdr.action_kind = unpack[6]
hdr.chunk_size = unpack[7]
hdr.chunk_count = unpack[8]
hdr.chunk_index = unpack[9]
self.errcode = unpack[10]
hdr.length_filename = unpack[11]
hdr.length_dirpath = unpack[12]
# Payload
hdr.payload = data[hdr_size:]
# Unpack data from payload
pos1 = 0
pos2 = hdr.length_filename
if pos2 - pos1 > 0:
hdr.filename = str(hdr.payload[:pos2], 'utf-8')
pos1 = hdr.length_filename
pos2 = hdr.length_filename + hdr.length_dirpath
if pos2 - pos1 > 0:
hdr.dstdirpath = str(hdr.payload[pos1:pos2], 'utf-8')
pos1 = hdr.length_filename + hdr.length_dirpath
hdr.data = hdr.payload[pos1:]
return hdr
class header_download():
def __init__(self, kind: action_kind = action_kind.unknown,
filepath: str = '',
filesize: int = 0,
data: bytes = b''):
self._STRUCT_FORMAT_ = '8s' + 'iiiiii' + 'iii' + 'i' + 'i'
# Unpack payload fields
self.filepath = b''
self.data = b''
self.signature: bytes = _SIGNATURE_DOWNLO_
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.file_size: int = filesize
self.action_name: int = action_name.download.value
self.action_kind: int = kind.value
self.chunk_size: int = len(data)
self.chunk_count: int = 0
self.chunk_index: int = 0
self.errcode: int = 0
self.length_filepath: int = len(filepath)
self.payload: bytes = b''
if filepath:
self.payload += filepath.encode('utf-8')
if data:
self.payload += data
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = (self.length_filepath +
self.chunk_size)
self.total_size = self.header_size + self.payload_size
rawdata = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.file_size,
self.action_name,
self.action_kind,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode,
self.length_filepath)
rawdata += self.payload
return rawdata
def unpack(self, data: bytes):
if len(data) <= _HEADER_SIZE_:
return None
hdr_size: int = int.from_bytes(data[8:11], 'little')
hdr_only: bytes = data[:hdr_size]
hdr = header_download()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, hdr_only)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.file_size = unpack[4]
hdr.action_name = unpack[5]
hdr.action_kind = unpack[6]
hdr.chunk_size = unpack[7]
hdr.chunk_count = unpack[8]
hdr.chunk_index = unpack[9]
self.errcode = unpack[10]
hdr.length_filepath = unpack[11]
# Payload
hdr.payload = data[hdr_size:]
# Unpack data from payload
pos1 = 0
pos2 = hdr.length_filepath
if pos2 - pos1 > 0:
hdr.filepath = str(hdr.payload[:pos2], 'utf-8')
pos1 = hdr.length_filepath
hdr.data = hdr.payload[pos1:]
return hdr
class header_list():
def __init__(self,
kind: action_kind = action_kind.unknown,
dstdirpath: str = '',
data: bytes = b''):
if data is None:
data = b''
self._STRUCT_FORMAT_ = '8s' + 'iiiii' + 'iii' + 'i' + 'i'
# Unpack payload fields
self.dstdirpath = b''
self.data = b''
self.signature: bytes = _SIGNATURE_LIST___
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.action_name: int = action_name.list.value
self.action_kind: int = kind.value
self.chunk_size: int = len(data)
self.chunk_count: int = 0
self.chunk_index: int = 0
self.errcode: int = 0
self.length_dirpath: int = len(dstdirpath)
self.payload: bytes = (dstdirpath.encode('utf-8') +
data)
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = (self.length_dirpath +
self.chunk_size)
self.total_size = self.header_size + self.payload_size
rawdata = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.action_name,
self.action_kind,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode,
self.length_dirpath)
rawdata += self.payload
return rawdata
def unpack(self, data: bytes):
if len(data) <= _HEADER_SIZE_:
return None
hdr_size: int = int.from_bytes(data[8:11], 'little')
hdr_only: bytes = data[:hdr_size]
hdr = header_list()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, hdr_only)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.action_name = unpack[4]
hdr.action_kind = unpack[5]
hdr.chunk_size = unpack[6]
hdr.chunk_count = unpack[7]
hdr.chunk_index = unpack[8]
hdr.errcode = unpack[9]
hdr.length_dirpath = unpack[10]
# Payload
hdr.payload = data[hdr_size:]
# Unpack data from payload
pos1 = 0
pos2 = hdr.length_dirpath
if pos2 - pos1 > 0:
hdr.dstdirpath = str(hdr.payload[pos1:pos2], 'utf-8')
pos1 = hdr.length_dirpath
hdr.data = hdr.payload[pos1:]
return hdr
class header_execute():
def __init__(self,
kind: action_kind = action_kind.unknown,
subcmd: execute_subcmd = execute_subcmd.unknown,
program: bytes = b'',
argument: bytes = b'',
workdir: bytes = b'.',
isbase64: bool = False,
chunk_data: bytes = b''):
self._STRUCT_FORMAT_ = '8s' + 'iiiii' + 'iii' + 'iii' + 'i' + 'Biii'
#
# payload_data
#
self.cmdresult = execresult()
self.exec = execmdarg(program, argument, workdir, isbase64)
#
# chunk_data
#
self.chunk_data = chunk_data # execresult
#
# Header content
#
self.signature: bytes = _SIGNATURE_EXECUT_
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.action_name: int = action_name.execute.value
self.action_kind: int = kind.value
self.subcmd_value: int = 0
self.status_value: int = proc_status.unknown.value
self.tag_value = 0
if subcmd:
self.subcmd_value = subcmd.value
self.chunk_size: int = len(chunk_data)
self.chunk_count: int = 0
self.chunk_index: int = 0
if self.chunk_size > 0:
self.chunk_count = 1
self.errcode: int = 0
self.length_isbase64: int = 1
self.length_program: int = len(program)
self.length_argument: int = len(argument)
self.length_workdir: int = len(workdir)
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = (self.length_isbase64 +
self.length_program +
self.length_argument +
self.length_workdir +
self.chunk_size)
self.total_size = self.header_size + self.payload_size
packed_data = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.action_name,
self.action_kind,
self.subcmd_value,
self.status_value,
self.tag_value,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode,
self.length_isbase64,
self.length_program,
self.length_argument,
self.length_workdir)
isbase = b''
if self.exec.isbase64:
isbase = b'1'
else:
isbase = b'0'
data = (isbase +
self.exec.program +
self.exec.argument +
self.exec.workdir +
self.chunk_data)
packed_data = packed_data + data
assert len(packed_data) == self.total_size, 'data lengths are not identical !!!'
return packed_data
def unpack(self, chunk_data_raw: bytes):
packed_data_len = len(chunk_data_raw)
if packed_data_len <= _HEADER_SIZE_:
logfmt = '[header_execute] buffer is insufficient !!! (data_len={})'
logging.info(logfmt.format(packed_data_len))
return None
hdr_size: int = int.from_bytes(chunk_data_raw[8:11], 'little')
total_size: int = int.from_bytes(chunk_data_raw[12:15], 'little')
logging.info('[header_execute] hdr_size={}'.format(hdr_size))
logging.info('[header_execute] total_size={}'.format(total_size))
if packed_data_len < total_size:
logfmt = '[header_execute] buffer is insufficient !!! ' + \
'(data_len={} less than total_size={})'
logging.info(logfmt.format(packed_data_len, total_size))
return None
header_content: bytes = chunk_data_raw[:hdr_size]
hdr = header_execute()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, header_content)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.action_name = unpack[4]
hdr.action_kind = unpack[5]
hdr.subcmd_value = unpack[6]
hdr.status_value = unpack[7]
hdr.tag_value = unpack[8]
hdr.chunk_size = unpack[9]
hdr.chunk_count = unpack[10]
hdr.chunk_index = unpack[11]
hdr.errcode = unpack[12]
hdr.length_isbase64 = unpack[13]
hdr.length_program = unpack[14]
hdr.length_argument = unpack[15]
hdr.length_workdir = unpack[16]
#
# Payload (payload_data + chunk_data)
#
payload_content = chunk_data_raw[hdr.header_size: hdr.total_size]
# Unpack data from payload
pos1 = 0
pos2 = (pos1 + 1)
isbase64 = payload_content[pos1:pos2]
hdr.exec.isbase64 = bool(int(isbase64))
pos1 = pos2
pos2 = pos2 + hdr.length_program
program = payload_content[pos1:pos2]
# hdr.program = str(program, encoding='utf-8')
hdr.exec.program = program
pos1 = pos2
pos2 = pos2 + hdr.length_argument
argument = payload_content[pos1:pos2]
# hdr.argument = str(argument, encoding='utf-8')
hdr.exec.argument = argument
pos1 = pos2
pos2 = pos2 + hdr.length_workdir
workdir = payload_content[pos1:pos2]
# hdr.workdir = str(workdir, 'utf-8')
hdr.exec.workdir = workdir
# chunk_data
pos1 = pos2
pos2 = pos2 + hdr.chunk_size
if pos2 - pos1 > 0:
chunk_data_raw = payload_content[pos1:pos2]
chunk_data_ori: execresult = CONFIG().toCLASS(chunk_data_raw)
hdr.chunk_data = chunk_data_ori
return hdr
class header_message():
def __init__(self, kind: action_kind = action_kind.unknown, title: str = 'default', data: bytes = b''):
self._STRUCT_FORMAT_ = '8s' + 'iiiii' + 'iii' + 'i' + 'i'
#
# payload_data
#
self.title: str = title
#
# payload_chunk
#
self.payload_chunk = data
#
# Header content
#
self.signature: bytes = _SIGNATURE_TEXT___
self.header_size: int = 0
self.total_size: int = 0
self.payload_size: int = 0
self.action_name: int = action_name.message.value
self.action_kind: int = kind.value
self.chunk_size: int = len(data)
self.chunk_count: int = 0
self.chunk_index: int = 0
self.errcode: int = 0
self.length_title: int = len(title)
if len(data) > 0:
self.chunk_count = 1
def pack(self):
self.header_size = struct.calcsize(self._STRUCT_FORMAT_)
self.payload_size = self.length_title + self.chunk_size
self.total_size = self.header_size + self.payload_size
rawdata = struct.pack(self._STRUCT_FORMAT_,
self.signature,
self.header_size,
self.total_size,
self.payload_size,
self.action_name,
self.action_kind,
self.chunk_size,
self.chunk_count,
self.chunk_index,
self.errcode,
self.length_title)
payload = (self.title.encode('ascii') + self.payload_chunk)
rawdata += payload
return rawdata
def unpack(self, data: bytes):
if len(data) <= _HEADER_SIZE_:
return None
hdr_size: int = int.from_bytes(data[8:11], 'little')
hdr_only: bytes = data[:hdr_size]
hdr = header_message()
# Header files
unpack = struct.unpack(self._STRUCT_FORMAT_, hdr_only)
hdr.signature = unpack[0]
hdr.header_size = unpack[1]
hdr.total_size = unpack[2]
hdr.payload_size = unpack[3]
hdr.action_name = unpack[4]
hdr.action_kind = unpack[5]
hdr.chunk_size = unpack[6]
hdr.chunk_count = unpack[7]
hdr.chunk_index = unpack[8]
hdr.errcode = unpack[9]
hdr.length_title = unpack[10]
#
# Payload
#
pos1 = self.header_size
pos2 = pos1 + self.total_size
payload = data[pos1:pos2]
# payload_data
pos1 = 0
pos2 = self.length_title
hdr.title = str(payload[pos1:pos2], encoding='ascii')
# payload_chunk
pos1 = pos2
pos2 = pos1 + self.chunk_size
hdr.payload_chunk = payload[pos1:pos2]
return hdr
class header():
def __init__(self):
pass
def find_header(self, data: bytes):
data_len = len(data)
if data_len < _HEADER_SIZE_:
logging.info('buffer is insufficient !!! (data_len={})'.format(data_len))
return None, 0
index = 0
matched_index = -1
targets = [_SIGNATURE_UPLOAD_, # 0
_SIGNATURE_DOWNLO_, # 1
_SIGNATURE_EXECUT_, # 2
_SIGNATURE_LIST___, # 3
_SIGNATURE_ECHO___, # 4
_SIGNATURE_TEXT___] # 5
signature_pos = -1
for item in targets:
signature_pos = data.find(item)
if signature_pos >= 0:
matched_index = index
logging.info('signature matched (matched_index={}).'.format(matched_index))
break
index += 1
hdr_pos1 = signature_pos + 8
hdr_pos2 = hdr_pos1 + 4
header_size: int = int.from_bytes(data[hdr_pos1:hdr_pos2], 'little')
if data_len < header_size:
logging.info('buffer is insufficient !!! (data_len is less than header_size)')
return None, 0
found_hdr = None
hdr_pos1 = signature_pos + 12
hdr_pos2 = hdr_pos1 + 4
total_size: int = int.from_bytes(data[hdr_pos1:hdr_pos2], 'little')
if data_len < total_size:
logging.info('buffer is insufficient !!! (data_len is less than total_size)')
return None, 0
chunk_end_pos = signature_pos + total_size
chunk_diff = chunk_end_pos - signature_pos
logging.info('total_size={}'.format(signature_pos))
logging.info('chunk_end_pos={}'.format(chunk_end_pos))
logging.info('chunk_end_pos-signature_pos={}'.format(chunk_diff))
full_header = data[signature_pos:chunk_end_pos]
full_header_size = len(full_header)
logging.info('full_header_size={}'.format(full_header_size))
if 0 == matched_index:
logging.info('unpacking header_upload ...')
hdr: header_upload = header_upload().unpack(full_header)
logging.info('find a header_upload')
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(data) >= hdr_pos2:
# full_header = data[hdr_pos1:hdr_pos2]
found_hdr = hdr.unpack(full_header)
logging.info('unpack a header_upload, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_upload action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
logging.info('filename={}'.format(found_hdr.filename))
logging.info('dstdirpath={}'.format(found_hdr.dstdirpath))
logging.info('file_size={}'.format(found_hdr.file_size))
else:
logging.warning('buffer is insufficient for a header_upload, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
elif 1 == matched_index:
logging.info('unpacking header_download ...')
hdr: header_download = header_download().unpack(full_header)
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(full_header) >= hdr_pos2:
found_hdr: header_download = hdr.unpack(full_header)
logging.info('unpack a header_download, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_download action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
logging.info('filepath={}'.format(found_hdr.filepath))
logging.info('file_size={}'.format(found_hdr.file_size))
else:
logging.warning('buffer is insufficient for a header_download, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
elif 2 == matched_index:
logging.info('unpacking header_execute ...')
hdr: header_execute = header_execute().unpack(full_header)
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(full_header) >= hdr_pos2:
found_hdr: header_execute = hdr.unpack(full_header)
logging.info('unpack a header_execute, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_execute action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
logging.info('program={}'.format(found_hdr.exec.program))
logging.info('argument={}'.format(found_hdr.exec.argument))
logging.info('workdir={}'.format(found_hdr.exec.workdir))
else:
logging.warning('buffer is insufficient for a header_execute, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
elif 3 == matched_index:
logging.info('unpacking header_list ...')
hdr: header_list = header_list().unpack(full_header)
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(full_header) >= hdr_pos2:
# full_header = full_header[hdr_pos1:hdr_pos2]
found_hdr = hdr.unpack(full_header)
logging.info('unpack a header_list, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_list action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
logging.info('dstdirpath={}'.format(found_hdr.dstdirpath))
else:
logging.warning('buffer is insufficient for a header_list, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
elif 4 == matched_index:
logging.info('unpacking header_echo ...')
hdr: header_echo = header_echo().unpack(full_header)
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(full_header) >= hdr_pos2:
# full_header = full_header[hdr_pos1:hdr_pos2]
found_hdr = hdr.unpack(full_header)
logging.info('unpack a header_echo, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_echo action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
else:
logging.warning('buffer is insufficient for a header_echo, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
elif 5 == matched_index:
logging.info('unpacking header_text ...')
hdr: header_message = header_message().unpack(full_header)
if hdr is None:
logging.warning('buffer is insufficient !!! (failed to unpack)')
return None, 0
hdr_pos1 = 0
hdr_pos2 = hdr_pos1 + hdr.total_size
if len(full_header) >= hdr_pos2:
found_hdr = hdr.unpack(full_header)
logging.info('unpack a header_text, len(chunk)={}'.format(len(full_header)))
logfmt = 'header_text action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(found_hdr.action_kind,
found_hdr.chunk_index + 1,
found_hdr.chunk_count,
found_hdr.chunk_size))
else:
logging.warning('buffer is insufficient for a header_text, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
else:
logging.warning('buffer missed matching, len(data)={}'.format(len(data)))
found_hdr = None
hdr_pos2 = 0
return found_hdr, hdr_pos2
class actor_callbacks():
def __init__(self):
self.list = None
self.upload = None
self.download = None
self.execute = None
self.message = None
class rcsock():
def __init__(self, conn, actors: actor_callbacks = None):
self.BUFF_SIZE = _BUFFER_SIZE_
self.header = header()
self.conn: socket.socket = conn
self.stream_pool = b''
self.chunk_list = list()
self.server_callback = actors
self.file_path = ''
self.file_handle = None
self.conn.setblocking(True)
self.thread = threading.Thread(target=self._receive_stream)
self.thread.daemon = True
self.thread.start()
def _send(self, data):
data_len = len(data)
logging.info('data_len={}'.format(data_len))
ret = None
try:
ret = self.conn.sendall(data)
except Exception as Err:
logging.exception(Err)
finally:
return ret
def _wait_until(self, condition, interval=0.1, timeout=1, *args):
start = time.time()
while not condition(*args) and time.time() - start < timeout:
time.sleep(interval)
return condition(*args)
def _receive_stream(self):
try:
while chunk := self.conn.recv(self.BUFF_SIZE):
chunklen = len(chunk)
logging.info('chunklen={}'.format(chunklen))
self.stream_pool += chunk
self._parse_complete_chunk()
if self.server_callback:
self._consume_chunks()
except socket.timeout:
logging.exception('socket.timeout')
except ConnectionResetError:
logging.warning('ConnectionResetError')
except Exception as Err:
print(Err)
logging.exception(Err)
finally:
pass
def _consume_chunks(self):
while len(self.chunk_list) > 0:
logging.info('There are {} chunk in the chunk_list.'.format(
len(self.chunk_list)))
chunk = self.chunk_list.pop(0)
logfmt = 'action_kind={} chunk_index={}/{} chunk_size={}'
logging.info(logfmt.format(chunk.action_kind,
chunk.chunk_index + 1,
chunk.chunk_count,
chunk.chunk_size))
if chunk.action_name == action_name.list.value:
# def _handle_list_command(self,
# conn: socket.socket,
# hdr: header_list):
self.server_callback.list(self, chunk)
elif chunk.action_name == action_name.upload.value:
# def _handle_upload_command(self,
# sock: rcsock,
# hdr: header_upload,
# overwrite: bool = True):
self.server_callback.upload(self, chunk)
elif chunk.action_name == action_name.download.value:
# def _handle_download_command(self,
# conn: rcsock,
# data_hdr: header_download):
self.server_callback.download(self, chunk)
elif chunk.action_name == action_name.execute.value:
# def _handle_execute_command(self,
# sock: rcsock,
# ask_chunk: header_execute):
self.server_callback.execute(self, chunk)
elif chunk.action_name == action_name.message.value:
# def _handle_message_command(self,
# sock: rcsock,
# ask_chunk: header_text):
self.server_callback.message(self, chunk)
else:
pass
def _parse_complete_chunk(self):
while True:
logfmt = 'b4 len(self.stream_pool)={}'
logging.info(logfmt.format(len(self.stream_pool)))
logfmt = 'b4 len(self.chunk_list)={}'
logging.info(logfmt.format(len(self.chunk_list)))
found_header, size = self.header.find_header(self.stream_pool)
if 0 == size:
logging.info('Nothing found !!!')
break
logging.info('Found a new header, ' +
'will be insertted to chunk_list.')
self.chunk_list.append(found_header)
self.stream_pool = self.stream_pool[size:]
logfmt = 'ft len(self.stream_pool)={}'
logging.info(logfmt.format(len(self.stream_pool)))
logfmt = 'ft len(self.chunk_list)={}'
logging.info(logfmt.format(len(self.chunk_list)))
class rcserver():
def __init__(self, host: str, port: int, workdir: str = '~',
debug_enabled: bool = False):
self.CHUNK_SIZE = _CHUNK_SIZE_
self.client_list = list()
self.chunk_list = list()
self.stream_pool = b''
self.proc_list = list()
self.server_callback = actor_callbacks()
self.server_callback.download = self._handle_download_command
self.server_callback.list = self._handle_list_command
self.server_callback.upload = self._handle_upload_command
self.server_callback.execute = self._handle_execute_command
self.server_callback.message = self._handle_message_command
self.__HOST__ = host
self.__PORT__ = port
self.__WORKDIR__ = workdir
if debug_enabled:
self._enable_debug()
def _enable_debug(self):
prefix = '[%(asctime)s][%(levelname)s]' + \
'[%(filename)s!%(funcName)s:%(lineno)d] %(message)s'
format = logging.Formatter(prefix, datefmt='%Y%m%d %H:%M:%S')
logger = logging.getLogger()
# Write to file
logfile = logging.FileHandler('rcserver.log')
logfile.setLevel(logging.INFO)
logfile.setFormatter(format)
logger.addHandler(logfile)
# # Write to screen
# screen = logging.StreamHandler()
# screen.setLevel(logging.INFO)
# screen.setFormatter(format)
# logger.addHandler(screen)
def start(self):
self._listening = True
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('0.0.0.0', self.__PORT__))
self.sock.listen(10)
try:
while self._listening:
conn, _ = self.sock.accept()
conn.sendall(header_echo().pack())
self.client_list.append(rcsock(conn, self.server_callback))
except Exception as e:
logging.exception(e)
finally:
self.sock.close()
def stop(self):
self._listening = False
for client in self.client_list:
client: rcsock = client
client.conn.close()
def get_addr_info(self):
return (self.__HOST__, self.__PORT__)
def _handle_list_command(self, conn: rcsock, ask_chunk: header_list):
logging.info('-------------------------------------------')
filepath = os.path.abspath(ask_chunk.dstdirpath)
logfmt = 'filepath={}'
logging.info(logfmt.format(filepath))
if not os.path.exists(filepath):
data_chunk = header_list(action_kind.data,
filepath,
None)
data_chunk.errcode = error_path_not_exist.errcode
conn._send(data_chunk.pack())
return error_file_not_found
listdir = []
if os.path.isdir(filepath):
listdir = os.listdir(filepath)
else:
listdir.append(os.path.basename(filepath))
index = 0
for file in listdir:
index += 1
logging.info('file[{}/{}]={}'.format(index, len(listdir), file))
data = json.dumps(listdir).encode()
data_chunk = header_list(action_kind.data,
filepath,
data)
data_chunk.chunk_count = 1
data_chunk.chunk_index = 0
data_chunk.chunk_size = len(data)
conn._send(data_chunk.pack())
# done_chunk = header_list(action_kind.done, ask_chunk.dstdirpath)
# conn._send(done_chunk.pack())
return True
def _handle_download_command(self,
conn: rcsock,
ask_chunk: header_download):
logging.info('-------------------------------------------')
fileloc = os.path.abspath(ask_chunk.filepath)
logging.info("fileloc={}".format(fileloc))
rcrs = rcresult()
# if ask_chunk.action_kind == action_kind.done.value:
# return rcrs
if not os.path.exists(fileloc):
logging.error("The spcific path is not found !!!")
logging.error("fileloc={}".format(fileloc))
rcrs = error_file_not_found
if os.path.isdir(fileloc):
logging.error("The spcific path should be a file !!!")
logging.error("fileloc={}".format(fileloc))
rcrs = error_not_a_file
# data + done for error
if 0 != rcrs.errcode:
data_chunk = header_download(action_kind.data,
ask_chunk.filepath,
ask_chunk.file_size)
conn._send(data_chunk.pack())
done_chunk = header_download(action_kind.done,
ask_chunk.filepath,
ask_chunk.file_size)
conn._send(done_chunk.pack())
return rcrs
filesize = os.path.getsize(fileloc)
index = 0
chunk_count = int(filesize / self.CHUNK_SIZE)
if (filesize % self.CHUNK_SIZE) > 0:
chunk_count += 1
# data
file = open(fileloc, "rb")
while data := file.read(self.CHUNK_SIZE):
datalen = len(data)
logging.info('chunklen={}'.format(datalen))
data_chunk = header_download(action_kind.data,
ask_chunk.filepath,
filesize,
data)
data_chunk.chunk_size = min(self.CHUNK_SIZE, datalen)
data_chunk.chunk_count = chunk_count
data_chunk.chunk_index = index
logfmt = 'header_download action_kind={} chunk_index={}/{} ' + \
'chunk_size={}'
logging.info(logfmt.format(data_chunk.action_kind,
data_chunk.chunk_index + 1,
data_chunk.chunk_count,
data_chunk.chunk_size))
conn._send(data_chunk.pack())
index += 1
file.close()
# done by complete
done_chunk = header_download(action_kind.done,
ask_chunk.filepath,
filesize)
conn._send(done_chunk.pack())
return True
def _handle_upload_command(self,
sock: rcsock,
data_chunk: header_upload,
overwrite: bool = True):
logging.info('-------------------------------------------')
logfmt = 'chunk_index={}/{} file_size={} chunk_size={}'
logging.info(logfmt.format(data_chunk.chunk_index + 1,
data_chunk.chunk_count,
data_chunk.file_size,
data_chunk.chunk_size))
try:
# open
if not sock.file_handle and \
data_chunk.action_kind == action_kind.ask.value:
filepath = os.path.join(data_chunk.dstdirpath,
data_chunk.filename)
fullpath = os.path.abspath(filepath)
logging.info('open file (fullpath={})'.format(fullpath))
sock.file_path = filepath
sock.file_handle = open(filepath, "wb")
# write
if data_chunk.action_kind == action_kind.data.value:
sock.file_handle.write(data_chunk.data)
diff = (data_chunk.chunk_count - data_chunk.chunk_index)
is_last_data = (1 == diff)
logging.info('last_chunk={}'.format(is_last_data))
logging.info('filepath={}'.format(sock.file_path))
# close
if data_chunk.action_kind == action_kind.done.value:
if sock.file_handle:
logfmt = 'close file (fullpath={})'
logging.info(logfmt.format(sock.file_path))
sock.file_handle.flush()
sock.file_handle.close()
sock.file_handle = None
sock.file_path = ''
# done (reply)
chunk_done = header_upload(action_kind.done,
data_chunk.filename,
data_chunk.file_size,
data_chunk.dstdirpath)
sock._send(chunk_done.pack())
except Exception as err:
logging.exception(err)
return True
def _handle_execute_command(self,
sock: rcsock,
ask_chunk: header_execute):
try:
data_chunk = None
logging.info('-------------------------------------------')
logging.info('[UTF8] program={}'.format(ask_chunk.exec.program))
logging.info('[UTF8] argument={}'.format(ask_chunk.exec.argument))
logging.info('[UTF8] workdir={}'.format(ask_chunk.exec.workdir))
program = str(ask_chunk.exec.program, encoding='utf-8')
argument = ''
if ask_chunk.exec.isbase64:
argument = base64.b64decode(ask_chunk.exec.argument).decode('utf-8')
else:
argument = str(ask_chunk.exec.argument, encoding='utf-8')
workdir = str(ask_chunk.exec.workdir, encoding='utf-8')
logging.info('[ORIGIN] program={}'.format(program))
logging.info('[ORIGIN] argument={}'.format(argument))
logging.info('[ORIGIN] workdir={}'.format(workdir))
# subcmd: start
if ask_chunk.subcmd_value == execute_subcmd.start.value:
logging.info('Before opening a process')
async_proc = async_process(len(self.proc_list) + 100)
self.proc_list.append(async_proc)
async_proc.run(program, argument, workdir)
logging.info('After opening a process')
subcmd = execute_subcmd(ask_chunk.subcmd_value)
data_chunk = header_execute(action_kind.data,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir,
ask_chunk.exec.isbase64)
data_chunk.status_value = async_proc.get_status().value
data_chunk.tag_value = async_proc.get_tag()
# subcmd: kill
elif ask_chunk.subcmd_value == execute_subcmd.kill.value:
subcmd = execute_subcmd(ask_chunk.subcmd_value)
data_chunk = header_execute(action_kind.data,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir,
ask_chunk.exec.isbase64)
data_chunk.status_value = proc_status.killing
for item in self.proc_list:
item: async_process = item
if item.get_tag() == ask_chunk.tag_value:
item.kill()
data_chunk.status_value = item.get_status()
data_chunk.tag_value = ask_chunk.tag_value
# subcmd: query
elif ask_chunk.subcmd_value == execute_subcmd.query.value:
data = b''
status = proc_status.unknown
for item in self.proc_list:
item: async_process = item
if item.get_tag() == ask_chunk.tag_value:
status = item.get_status()
data = item.execrs.toTEXT().encode()
break
subcmd = execute_subcmd(ask_chunk.subcmd_value)
data_chunk = header_execute(action_kind.data,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir,
ask_chunk.exec.isbase64,
data)
data_chunk.tag_value = ask_chunk.tag_value
data_chunk.status_value = status.value
else:
subcmd = execute_subcmd(ask_chunk.subcmd_value)
data_chunk = header_execute(action_kind.data,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir,
ask_chunk.exec.isbase64)
data_chunk.tag_value = ask_chunk.tag_value
data_chunk.status_value = proc_status.exception
except Exception as Err:
logging.exception(Err)
subcmd = execute_subcmd(ask_chunk.subcmd_value)
data_chunk = header_execute(action_kind.data,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir)
data_chunk.tag_value = ask_chunk.tag_value
data_chunk.status_value = proc_status.exception
finally:
# send data
packed_data_chunk = data_chunk.pack()
sock._send(packed_data_chunk)
logging.info('send data ({})'.format(ask_chunk.exec.program))
# send done
subcmd = execute_subcmd(ask_chunk.subcmd_value)
done_chunk = header_execute(action_kind.done,
subcmd,
ask_chunk.exec.program,
ask_chunk.exec.argument,
ask_chunk.exec.workdir)
done_chunk.tag_value = data_chunk.tag_value
logging.info('send done ({})'.format(ask_chunk.exec.program))
sock._send(done_chunk.pack())
def _handle_message_command(self,
sock: rcsock,
ask_chunk: header_message):
logging.info('-------------------------------------------')
logging.info('title={}'.format(ask_chunk.title))
data = None
if 'default' == ask_chunk.title:
data = 'Hello from server with default'.encode()
elif 'inncmd_sysinfo' == ask_chunk.title:
osname = platform.system().lower()
homedir = os.path.expanduser('~')
data = inncmd_sysinfo(osname, homedir).toTEXT().encode()
done_chunk = header_message(action_kind.done, ask_chunk.title, data)
sock._send(done_chunk.pack())
elif 'inncmd_mkdir' == ask_chunk.title:
result: bool = True
path = str(ask_chunk.payload_chunk, encoding='utf-8')
try:
pos1 = path.find('/')
pos2 = path.find('\\')
if pos1 >= 0 or pos2 >= 0:
os.makedirs(path)
else:
os.mkdir(path)
except Exception as Err:
logging.error(Err)
result = False
data = inncmd_mkdir(path, result).toTEXT().encode()
done_chunk = header_message(action_kind.done, ask_chunk.title, data)
sock._send(done_chunk.pack())
else:
data = 'Hello from server with UNKNOWN'.encode()
done_chunk = header_message(action_kind.done, ask_chunk.title, data)
sock._send(done_chunk.pack())
return True
class rcclient():
def __init__(self):
self.CHUNK_SIZE = _CHUNK_SIZE_
self.BUFF_SIZE = _BUFFER_SIZE_
self.TIMEOUT_TIMES = 10
self.__HOST__ = None
self.__PORT__ = 0
self.sock = None
self._connected = False
def _wait_until(self, condition, interval=0.1, timeout=1, *args):
start = time.time()
while not condition(*args) and time.time() - start < timeout:
time.sleep(interval)
return condition(*args)
def connect(self, host: str, port: int):
self.__HOST__ = host
self.__PORT__ = port
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ret = conn.connect_ex((self.__HOST__, self.__PORT__))
if ret:
self._connected = False
else:
chunk = conn.recv(self.BUFF_SIZE)
echo_chunk = header_echo().unpack(chunk)
try:
if (echo_chunk.signature[0] == _SIGNATURE_ECHO___[0]) and \
(echo_chunk.signature[1] == _SIGNATURE_ECHO___[1]) and \
(echo_chunk.signature[2] == _SIGNATURE_ECHO___[2]) and \
(echo_chunk.signature[3] == _SIGNATURE_ECHO___[3]) and \
(echo_chunk.signature[4] == _SIGNATURE_ECHO___[4]) and \
(echo_chunk.signature[5] == _SIGNATURE_ECHO___[5]) and \
(echo_chunk.signature[6] == _SIGNATURE_ECHO___[6]) and \
(echo_chunk.signature[7] == _SIGNATURE_ECHO___[7]):
self._connected = True
conn.setblocking(True)
self.sock = rcsock(conn)
except Exception:
self._connected = False
logging.error('Failed to receive an ECHO from server !!!')
return self._connected
def is_connected(self):
return self._connected
def stop(self):
self._connected = False
def _send(self, data):
self.sock._send(data)
def _execute_start(self, cmdarg: execmdarg):
execrs = execresult()
ask_chunk = header_execute(action_kind.ask,
execute_subcmd.start,
cmdarg.program,
cmdarg.argument,
cmdarg.workdir,
cmdarg.isbase64)
self._send(ask_chunk.pack())
# wait data
logging.info('wait data ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait data timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(_execute_start, wait data)')
else:
program = ask_chunk.exec.program
logging.info('fetch the data ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
logging.info('chunk.data={}'.format(str(chunk.chunk_data)))
# wait done
logging.info('wait done ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait done timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(_execute_start, wait done)')
else:
program = ask_chunk.exec.program
logging.info('fetch the done ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
execrs.data = chunk.tag_value
return execrs
def _execute_query(self, cmdarg: execmdarg, proc_tag: int):
retry_times = 3
while retry_times > 0:
execrs, status = self._execute_query_and_wait(cmdarg, proc_tag)
if status != proc_status.running:
break
time.sleep(2)
return execrs
def _execute_query_and_wait(self, cmdarg: execmdarg, proc_tag: int):
ask_chunk = header_execute(action_kind.ask,
execute_subcmd.query,
cmdarg.program,
cmdarg.argument,
cmdarg.workdir,
cmdarg.isbase64)
execrs = execresult()
status = proc_status.unknown
ask_chunk.tag_value = proc_tag
self._send(ask_chunk.pack())
# wait data
logging.info('wait data ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait data timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(_execute_query_and_wait, wait data)')
else:
program = ask_chunk.exec.program
logging.info('fetch the data ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
logging.info('chunk.data={}'.format(str(chunk.chunk_data)))
if chunk.chunk_data:
execrs = chunk.chunk_data
status = proc_status(chunk.status_value)
# wait done
logging.info('wait done ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait done timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(_execute_query_and_wait, wait done)')
else:
program = ask_chunk.exec.program
logging.info('fetch the done ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
return execrs, status
def _execute_kill(self, cmdarg: execmdarg, proc_tag: int):
ask_chunk = header_execute(action_kind.ask,
execute_subcmd.kill,
cmdarg.program,
cmdarg.argument,
cmdarg.workdir,
cmdarg.isbase64)
execrs = execresult()
ask_chunk.tag_value = proc_tag
self._send(ask_chunk.pack())
# wait data
logging.info('wait data ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait data timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(kill, wait data)')
else:
program = ask_chunk.exec.program
logging.info('fetch the data ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
logging.info('chunk.data={}'.format(str(chunk.chunk_data)))
if chunk.chunk_data:
execrs = chunk.chunk_data
# wait done
logging.info('wait done ({})'.format(ask_chunk.exec.program))
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
logging.info('is_there_a_chunk={}'.format(is_there_a_chunk))
if not is_there_a_chunk:
program = ask_chunk.exec.program
logging.error('wait done timeout !!! ({})'.format(program))
execrs.errcode = error_wait_timeout_streaming.errcode
execrs.stderr.append(error_wait_timeout_streaming.text + '(kill, wait done)')
else:
program = ask_chunk.exec.program
logging.info('fetch the done ({})'.format(program))
chunk: header_execute = self.sock.chunk_list.pop(0)
return result
def classify_execresult(self, cmdrs: execresult) -> rcresult:
result = rcresult()
result.data = cmdrs
if 0 != cmdrs.errcode:
result.errcode = cmdrs.errcode
if isinstance(cmdrs.stderr, list):
result.text = '\n'.join(cmdrs.stderr)
else:
result.text = str(cmdrs.stderr)
return result
def cmd_upload(self, local_filepath: str, remote_dirpath: str = '.'):
filepath = os.path.abspath(local_filepath)
if not os.path.exists(filepath):
return error_path_not_exist
filename = os.path.basename(filepath)
filesize = os.path.getsize(filepath)
# ask
hdr = header_upload(action_kind.ask,
filename,
filesize,
remote_dirpath)
self._send(hdr.pack())
logging.info('filename={}'.format(filename))
logging.info('filesize={}'.format(filesize))
logging.info('filepath={}'.format(filepath))
index = 0
sentsize = 0
chunk_count = int(filesize / self.CHUNK_SIZE)
if filesize % self.CHUNK_SIZE > 0:
chunk_count += 1
# data
file = open(filepath, "rb")
while data := file.read(self.CHUNK_SIZE):
hdr = header_upload(action_kind.data,
filename,
filesize,
remote_dirpath,
data)
hdr.chunk_size = min(self.CHUNK_SIZE, len(data))
hdr.chunk_index = index
hdr.chunk_count = chunk_count
self._send(hdr.pack())
index += 1
sentsize += hdr.chunk_size
logging.info('index={}/{} size={} sentsize={} name={}'.format(
hdr.chunk_index + 1,
hdr.chunk_count,
hdr.chunk_size,
sentsize,
hdr.filename))
file.close()
# done
hdr = header_upload(action_kind.done,
filename,
filesize,
remote_dirpath)
self._send(hdr.pack())
wait_done = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
if wait_done:
self.sock.chunk_list.pop(0)
else:
return error_wait_timeout_done
return rcresult()
def cmd_download(self,
remote_filepath: str,
local_dirpath: str,
overwrite: bool = True):
# if not os.path.exists(remote_filepath):
# return error_file_not_found
if not os.path.exists(local_dirpath):
return error_path_not_exist
filepath = remote_filepath
filename = os.path.basename(filepath)
fileloc = os.path.join(local_dirpath, filename)
logging.info('filepath={}'.format(filepath))
logging.info('filename={}'.format(filename))
logging.info('fileloc={}'.format(fileloc))
if (not overwrite) and os.path.exists(fileloc):
return error_file_already_exist
hdr = header_download(action_kind.ask, remote_filepath)
self._send(hdr.pack())
filepath_dst = os.path.join(local_dirpath, filename)
logging.info('filepath_dst={}'.format(filepath_dst))
index = 0
recvsize = 0
result = rcresult()
keep_going = True
file_size = 0
file = None
while keep_going:
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
if not is_there_a_chunk:
result = error_wait_timeout_streaming
break
while len(self.sock.chunk_list) > 0:
is_data_chunk = self.sock.chunk_list[0].action_kind == action_kind.data.value
# unexpected case because no chunk in the list
if not is_data_chunk:
keep_going = False
break
data_chunk: header_download = self.sock.chunk_list.pop(0)
file_size = data_chunk.file_size
# unexpected data_chunk with zero-size chunk
if 0 == data_chunk.chunk_size:
result = error_file_not_found
keep_going = False
break
if not file:
file = open(filepath_dst, "wb")
file.write(data_chunk.data)
index += 1
recvsize += data_chunk.chunk_size
logging.info('index={}/{} size={} recvsize={} name={}'.format(
hdr.chunk_index + 1,
hdr.chunk_count,
hdr.chunk_size,
recvsize,
hdr.filepath))
if recvsize == file_size:
break
if file:
file.flush()
file.close()
file = False
# wait done
wait_done = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
if wait_done:
self.sock.chunk_list.pop(0)
else:
return error_wait_timeout_done
return result
def cmd_list(self, dstdirpath: str):
ask_chunk = header_list(action_kind.ask, dstdirpath)
self._send(ask_chunk.pack())
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
if is_there_a_chunk:
result = rcresult()
data_chunk: header_list = self.sock.chunk_list.pop(0)
if 0 != data_chunk.errcode:
result.errcode = data_chunk.errcode
logfmt = 'the specifc path is not there (dstdirpath={})'
result.text = logfmt.format(dstdirpath)
logging.error(result.text)
elif data_chunk.data:
result.data = json.loads(data_chunk.data)
logging.info('type(result.data) = {}'.format(type(result.data)))
logging.info('result.data = {}'.format(result.data))
index = 0
total = len(result.data)
for file in result.data:
index += 1
logfmt = 'file[{}/{}]={}'
logging.info(logfmt.format(index, total, file))
return result
else:
return error_wait_timeout_streaming
def cmd_execute(self,
program: str,
argument: str = '',
workdir: str = '.',
isbase64: bool = False):
argument_encoded = b''
if isbase64:
argument_encoded = argument.encode('ascii')
else:
argument_encoded = argument.encode('utf-8')
proc_tag = 0
cmdarg = execmdarg(program.encode('utf-8'),
argument_encoded,
workdir.encode('utf-8'),
isbase64)
cmdrs = self._execute_start(cmdarg)
result: rcresult = self.classify_execresult(cmdrs)
if 0 == result.errcode:
proc_tag = cmdrs.data
if 0 == proc_tag:
result.errcode = -1
result.text = 'failed to run the process !!!'
elif proc_tag > 0:
cmdrs = self._execute_query(cmdarg, proc_tag)
result: rcresult = self.classify_execresult(cmdrs)
else:
pass
return result
def cmd_message(self, title: str, data: bytes = b''):
ask_chunk = header_message(action_kind.ask, title, data)
self._send(ask_chunk.pack())
is_there_a_chunk = self._wait_until(len,
0.1,
_WAIT_TIMEOUT_,
self.sock.chunk_list)
if is_there_a_chunk:
done_chunk: header_message = self.sock.chunk_list.pop(0)
result = rcresult()
result.data = done_chunk.payload_chunk
result.text = title
return result
else:
return error_wait_timeout_streaming
def inncmd_get_sysinfo(self):
result: rcresult = self.cmd_message('inncmd_sysinfo')
text = str(result.data, encoding='utf-8')
data: inncmd_sysinfo = CONFIG().toCLASS(text)
return data
def inncmd_make_dir(self, path: str):
result: rcresult = self.cmd_message('inncmd_mkdir', path.encode())
text = str(result.data, encoding='utf-8')
data: inncmd_mkdir = CONFIG().toCLASS(text)
return data
if __name__ == '__main__':
prefix = '[%(asctime)s][%(levelname)s]' + \
'[%(filename)s!%(funcName)s:%(lineno)d] %(message)s'
format = logging.Formatter(prefix, datefmt='%Y%m%d %H:%M:%S')
screen = logging.StreamHandler()
screen.setFormatter(format)
logger = logging.getLogger()
logger.addHandler(screen)
logger.setLevel(logging.INFO)
_HOST_ = 'localhost'
_PORT_ = 12345
if len(sys.argv) > 1:
if sys.argv[1] == 'server':
rcsrv = rcserver(_HOST_, _PORT_)
rcsrv.start()
elif sys.argv[1] == 'client':
rcclt = rcclient()
# if rcclt.connect('localhost', 10013):
if rcclt.connect('localhost', 10013):
# result = rcclt.upload('../MyApp.exe', '.')
# result = rcclt.upload('../calc.exe', '.')
# result = rcclt.upload('../VirtualBox.exe', '.')
# result = rcclt.list('README.md')
# result = rcclt.list('.')
# result = rcclt.execute('ifconfig')
# result = rcclt.execute('devcon64', 'rescan')
# result = rcclt.upload('../UsbTreeView.exe', '.')
result = rcclt.cmd_execute('pwd')
# # # # # # # # # # #
# Windows commands #
# # # # # # # # # # #
# result = rcclt.execute('ipconfig')
# result = rcclt.execute('systeminfo')
if 0 == result.errcode:
logging.info("errcode={}".format(result.errcode))
logging.info("data={}".format(result.data))
else:
logging.error("errcode={}".format(result.errcode))
logging.error("text={}".format(result.text))
else:
logging.error("Failed to connect to server !!!")
|
<reponame>leniartek/trino-admin
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from mock import patch
from prestoadmin import config
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from tests.base_test_case import BaseTestCase
DIR = os.path.abspath(os.path.dirname(__file__))
class TestConfiguration(BaseTestCase):
def test_file_does_not_exist_json(self):
self.assertRaisesRegexp(ConfigFileNotFoundError,
'Missing configuration file ',
config.get_conf_from_json_file,
'does/not/exist/conf.json')
def test_file_is_empty_json(self):
emptyconf = {}
conf = config.get_conf_from_json_file(DIR + '/resources/empty.txt')
self.assertEqual(conf, emptyconf)
def test_file_is_empty_properties(self):
emptyconf = {}
conf = config.get_conf_from_properties_file(
DIR + '/resources/empty.txt')
self.assertEqual(conf, emptyconf)
def test_file_is_empty_config(self):
emptyconf = []
conf = config.get_conf_from_config_file(DIR + '/resources/empty.txt')
self.assertEqual(conf, emptyconf)
def test_invalid_json(self):
self.assertRaisesRegexp(ConfigurationError,
'Expecting , delimiter: line 3 column 3 '
'\(char 19\)',
config.get_conf_from_json_file,
DIR + '/resources/invalid_json_conf.json')
def test_get_config(self):
config_file = os.path.join(DIR, 'resources', 'valid.config')
conf = config.get_conf_from_config_file(config_file)
self.assertEqual(conf, ['prop1', 'prop2', 'prop3'])
def test_get_properties(self):
config_file = os.path.join(DIR, 'resources', 'valid.properties')
conf = config.get_conf_from_properties_file(config_file)
self.assertEqual(conf, {'a': '1', 'b': '2', 'c': '3',
'd\\=': '4', 'e\\:': '5', 'f': '==6',
'g': '= 7', 'h': ':8', 'i': '9'})
@patch('__builtin__.open')
def test_get_properties_ignores_whitespace(self, open_mock):
file_manager = open_mock.return_value.__enter__.return_value
file_manager.read.return_value = ' key1 =value1 \n \n key2= value2'
conf = config.get_conf_from_properties_file('/dummy/path')
self.assertEqual(conf, {'key1': 'value1', 'key2': 'value2'})
def test_get_properties_invalid(self):
config_file = os.path.join(DIR, 'resources', 'invalid.properties')
self.assertRaisesRegexp(ConfigurationError,
'abcd is not in the expected format: '
'<property>=<value>, <property>:<value> or '
'<property> <value>',
config.get_conf_from_properties_file,
config_file)
def test_fill_defaults_no_missing(self):
orig = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
defaults = {'key1': 'default1', 'key2': 'default2'}
filled = orig.copy()
config.fill_defaults(filled, defaults)
self.assertEqual(filled, orig)
def test_fill_defaults(self):
orig = {'key1': 'val1', 'key3': 'val3'}
defaults = {'key1': 'default1', 'key2': 'default2'}
filled = orig.copy()
config.fill_defaults(filled, defaults)
self.assertEqual(filled,
{'key1': 'val1', 'key2': 'default2', 'key3': 'val3'})
|
<reponame>tcosmo/coreli
""" This file contains the relevant tools to study predecessor sets in the Collatz graph.\
In particular, it is proven in `https://arxiv.org/abs/1907.00775 <https://arxiv.org/abs/1907.00775>`_\
that these sets can be partionned in **Regular Languages**.\
We provide the tools to construct the associated regular expressions.
"""
import random
import enum
import copy
from typing import List, Callable, Tuple
from coreli.base_conversion_routines import *
from coreli.modular_routines import *
class SpanPredNodeType(enum.Enum):
""" Each node in the regular expression tree has one these types.\
More details in `https://arxiv.org/abs/1907.00775 <https://arxiv.org/abs/1907.00775>`_.
"""
EMPTY = 0
INITIAL_SEGMENT = 1
KLEENE = 2
JOIN = 3
BIT = 4
class SpanPredNode(object):
""" A node in the `SpanPredRegularTree`
"""
def __init__(self, node_type: 'SpanPredNodeType', annotation: Tuple[int],
str_value: str, children: List['SpanPredNode']) -> None:
"""Construct a node of the Span Regular Expression tree.
Args:
node_type (SpanPredNodeType): which information is the node encoding
annotation (Tuple[int]): extra information related to the construction,
contains at least the `x` and `k` for which the node was constructed
str_value (str): the bit string retained by the node
children (List[SpanPredNode]): list of the node's children
"""
self.node_type = node_type
self.annotation = annotation
self.str_value = str_value
self.children = children
def copy_without_children(self) -> 'SpanPredNode':
""" It can be useful to copy all the information of a node but\
its children."""
return SpanPredNode(self.node_type,self.annotation,self.str_value,[])
@classmethod
def chain_link(cls, chain: 'SpanPredNode') -> 'SpanPredNode':
""" Links a chain of `SpanPredNode`.
"""
def construct_root(i):
if i == len(chain)-1:
return chain[-1]
to_return = chain[i].copy_without_children()
to_return.children = [construct_root(i+1)]
return to_return
return construct_root(0)
def sample_chain(self, kleene_choice: Tuple[int]) -> str:
""" Sample a regular expression chain tree using Kleene star\
as per number given by `kleene_choice`.
"""
current_sample = ''
new_kleene_choice = kleene_choice
if self.node_type == SpanPredNodeType.KLEENE:
if len(kleene_choice) == 0:
raise ValueError('There are more Kleene star in {}'.format(self.simpler_str())\
+'than given by `kleene_choice`')
current_choice = kleene_choice[0]
current_sample += self.str_value*current_choice
new_kleene_choice = kleene_choice[1:]
else:
current_sample += self.str_value
if len(self.children) == 0:
return current_sample
if len(self.children) > 1:
raise ValueError('Can only operate on a chain tree. If you have a'\
+'non chain tree please operate on branches.')
return current_sample + self.children[0].sample_chain(new_kleene_choice)
def __str__(self):
if self.node_type == SpanPredNodeType.EMPTY:
return ''
me = '({})'.format(self.str_value)
if self.node_type == SpanPredNodeType.KLEENE:
me += '*'
if len(self.children) == 0:
return me
if len(self.children) == 1:
me += str(self.children[0])
return me
me += '('
for i,child in enumerate(self.children):
me += '('+str(child)+')'
if i != len(self.children)-1:
me +='|'
me += ')'
return me
def simpler_str(self, only_me: bool = False) -> str:
""" Returns a less pedantic str than __str__.
If `only_me` is set, the children wont be appended.
"""
if self.node_type == SpanPredNodeType.EMPTY:
return 'Empty'
if self.node_type == SpanPredNodeType.KLEENE:
me = '({})*'.format(self.str_value)
else:
me = '{}'.format(self.str_value)
if len(self.children) == 0 or only_me:
return me
if len(self.children) == 1:
me += self.children[0].simpler_str()
return me
me += '('
for i,child in enumerate(self.children):
me += str(child)
if i != len(self.children)-1:
me +='|'
me += ')'
return me
class SpanPredRegularTree(object):
""" The set of predecessors of :math:`x` at "span-distance" :math:`k`\
(i.e. any :math:`y` that goes to `x` following a parity vector of span `k`)\
is a Regular Language. The associate Regular Expression is a tree and this class\
constructs it.
"""
def __init__(self, x: int, k: int):
""" We are computing the expression of the predecessors of :math:`x` at\
"span-distance" distance :math:`k`."""
self.x = x
self.k = k
self.branches = None
self.root = self.construct_tree()
@staticmethod
def get_nb_branches(k: int) -> int:
""" The number of branches of the tree only depends on k and is
given by a closed formula.
"""
return 2**(k-1)*3**((k*(k-3)+2)//2)
@property
def nb_branches(self) -> int:
""" Instance wrapper around `get_nb_branches`. """
if self.x %3 == 0:
return 1 #Tree is 'Empty' which gives 1 branch instead of 0 awkwardly enough
return self.get_nb_branches(self.k)
@staticmethod
def parity_string(x: int, y: int, k: int, n_cycle: int = 0) -> str:
""" For :math:`x` and :math:`y`, elements of Z/3^kZ the function computes\
the parities of all elements of the form\
:math:`2^{-i}*x` until :math:`y` is reached `n_cycle+1` times (excluded).
"""
if (x%3)*(y%3) == 0:
raise ValueError('{} or {} is a multiple of three!'.format(x,y))
if x >= 3**k or y >= 3**k:
raise ValueError('{} or {} is not in Z/3^{}Z'.format(x,y,k))
parity_str = ''
curr_cycle = 0
x_0 = x
while not (x == y and curr_cycle == n_cycle):
parity_str += str(x%2)
x = mul_inv2(x,k)
if x == x_0:
curr_cycle += 1
return parity_str[::-1]#encodings reverse the order
def construct_tree(self) -> 'SpanPredNode':
""" Constructs the regular expression tree, it follows the construction\
of Th. 4.16 in `https://arxiv.org/abs/1907.00775 <https://arxiv.org/abs/1907.00775>`_."""
x,k = self.x, self.k
if k == 0:
str_value = '0'
node0 = SpanPredNode(SpanPredNodeType.KLEENE,
(0, 0), str_value, [])
if x == 0:
return node0
else:
node = SpanPredNode(SpanPredNodeType.INITIAL_SEGMENT,
(x, 0), int_to_binary(x), [node0])
return node
if x%3 == 0:
''' The predecessor set of a multiple of 3, when k!=0 is empty'''
node = SpanPredNode(SpanPredNodeType.EMPTY,
(x, k),
'', [])
return node
if x >= 3**k:
tree_below = SpanPredRegularTree(x%(3**k),k).root
return SpanPredNode(SpanPredNodeType.INITIAL_SEGMENT,
(x, k),
int_to_binary(x//(3**k)), [tree_below])
children_nodes = []
group_k = enumerate_group_k(k-1)
for y in group_k:
tree_below = SpanPredRegularTree(y,k-1).root
bit_node = SpanPredNode(SpanPredNodeType.BIT,
(x,k,y),
str(1-y%2), [tree_below])
join_node = SpanPredNode(SpanPredNodeType.JOIN,
(x,k,T1_k(y,k)), SpanPredRegularTree.parity_string(T1_k(y,k),x,k),
[bit_node])
children_nodes.append(join_node)
kleene_node = SpanPredNode(SpanPredNodeType.KLEENE,
(x,k), SpanPredRegularTree.parity_string(x,x,k,1), children_nodes)
return kleene_node
def get_random_samples(self, n_samples:int = 1, max_kleene:int = 3) -> str:
""" Returns random samples of strings matching the tree's regexp.
Args:
n_samples (int): number of samples to generate
max_kleene (int): number of maximum application of kleene star
in samples
"""
to_return = []
self.extract_branches()
for _ in range(n_samples):
i_branch = random.randint(0,self.nb_branches-1)
kleene_choice = tuple(random.randint(0,max_kleene) for _ in range(self.k+1))
to_return.append(self.branches[i_branch].sample_chain(kleene_choice))
return to_return
def extract_branches(self) -> None:
""" Return the list of all the tree's branches.
"""
if not self.branches is None:
return
self.branches = []
def explore_branch(curr_node: 'SpanPredNode',
curr_branch: List['SpanPredNode']) -> None:
if len(curr_node.children) == 0:
to_link = curr_branch + [curr_node]
self.branches.append(SpanPredNode.chain_link(to_link))
return
for child in curr_node.children:
curr_node_mod = curr_node.copy_without_children()
explore_branch(child, curr_branch + [curr_node_mod])
explore_branch(self.root, [])
def pprint_branches(self, print_root_once: bool = False,
print_in_custom_order: bool = True) -> None:
""" Pretty prints all the branches of the tree.
Args:
print_root_once (bool): all branches starts with the same root\
if set to true, the root will be printed only once to avoid\
redundancy.
print_in_custom_order (bool): The order in which the branches where\
computed is not necessarily the most convenient to work with. There is\
a custom order which is simpler to read and this boolean implements it.
"""
self.extract_branches()
if print_root_once:
print(self.branches[0].simpler_str(only_me=True))
print()
if self.branches[0].node_type == SpanPredNodeType.EMPTY:
return
if not print_in_custom_order:
for branch in self.branches:
if not print_root_once:
print(branch.simpler_str())
else:
print(branch.children[0].simpler_str())
return
nb_branches_inferior = self.get_nb_branches(self.k-1)
nb_elem_per_block = self.nb_branches//nb_branches_inferior
for i_block in range(nb_branches_inferior):
for j_elem in range(nb_elem_per_block):
branch = self.branches[j_elem*nb_branches_inferior + i_block]
if not print_root_once:
print(branch.simpler_str())
else:
print(branch.children[0].simpler_str())
print()
def __str__(self):
return str(self.root) |
<filename>src/python/pants/rules/core/strip_source_roots_test.py
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import PurePath
from typing import List, Optional, Union
from unittest.mock import Mock
import pytest
from pants.build_graph.address import Address
from pants.build_graph.files import Files
from pants.engine.legacy.graph import HydratedTarget
from pants.engine.rules import RootRule
from pants.engine.scheduler import ExecutionError
from pants.engine.selectors import Params
from pants.rules.core.strip_source_roots import SourceRootStrippedSources, StripSourceRootsRequest
from pants.rules.core.strip_source_roots import rules as strip_source_root_rules
from pants.testutil.option.util import create_options_bootstrapper
from pants.testutil.test_base import TestBase
class StripSourceRootsTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
*strip_source_root_rules(),
RootRule(HydratedTarget),
RootRule(StripSourceRootsRequest),
)
def get_stripped_files(
self,
request: Union[StripSourceRootsRequest, HydratedTarget],
*,
args: Optional[List[str]] = None,
) -> List[str]:
result = self.request_single_product(
SourceRootStrippedSources, Params(request, create_options_bootstrapper(args=args))
)
return sorted(result.snapshot.files)
def test_strip_snapshot(self) -> None:
def get_stripped_files_for_snapshot(
paths: List[str],
*,
use_representative_path: bool = True,
args: Optional[List[str]] = None,
) -> List[str]:
input_snapshot = self.make_snapshot({fp: "" for fp in paths})
request = StripSourceRootsRequest(
input_snapshot, representative_path=paths[0] if use_representative_path else None
)
return self.get_stripped_files(request, args=args)
# Normal source roots
assert get_stripped_files_for_snapshot(["src/python/project/example.py"]) == [
"project/example.py"
]
assert get_stripped_files_for_snapshot(["src/java/com/project/example.java"]) == [
"com/project/example.java"
]
assert get_stripped_files_for_snapshot(["tests/python/project_test/example.py"]) == [
"project_test/example.py"
]
# Unrecognized source root
unrecognized_source_root = "no-source-root/example.txt"
assert get_stripped_files_for_snapshot([unrecognized_source_root]) == ["example.txt"]
with pytest.raises(ExecutionError) as exc:
get_stripped_files_for_snapshot(
[unrecognized_source_root], args=["--source-unmatched=fail"]
)
assert (
f"NoSourceRootError: Could not find a source root for `{unrecognized_source_root}`"
in str(exc.value)
)
# Support for multiple source roots
file_names = ["src/python/project/example.py", "src/java/com/project/example.java"]
with pytest.raises(ExecutionError) as exc:
get_stripped_files_for_snapshot(file_names, use_representative_path=True)
assert "Cannot strip prefix src/python" in str(exc.value)
assert sorted(
get_stripped_files_for_snapshot(file_names, use_representative_path=False)
) == sorted(["project/example.py", "com/project/example.java"])
def test_strip_target(self) -> None:
def get_stripped_files_for_target(
*, source_paths: Optional[List[str]], type_alias: Optional[str] = None,
) -> List[str]:
adaptor = Mock()
adaptor.type_alias = type_alias
if source_paths is None:
del adaptor.sources
return self.get_stripped_files(
HydratedTarget(address=Mock(), adaptor=adaptor, dependencies=()),
)
adaptor.sources = Mock()
adaptor.sources.snapshot = self.make_snapshot({fp: "" for fp in source_paths})
address = Address(
spec_path=PurePath(source_paths[0]).parent.as_posix(), target_name="target"
)
return self.get_stripped_files(
HydratedTarget(address=address, adaptor=adaptor, dependencies=()),
)
# normal target
assert get_stripped_files_for_target(
source_paths=["src/python/project/f1.py", "src/python/project/f2.py"]
) == sorted(["project/f1.py", "project/f2.py"])
# empty target
assert get_stripped_files_for_target(source_paths=None) == []
# files targets are not stripped
assert get_stripped_files_for_target(
source_paths=["src/python/project/f1.py"], type_alias=Files.alias(),
) == ["src/python/project/f1.py"]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import cv2
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import get_model
from losses import get_loss
from transforms import from_norm_bgr
class LandmarkDetector(object):
# (X, Y, W, H)
SCALE = [0.5, 1.0, 2.0]
RATIO = [[0.30, 0.30],
[0.60, 0.15],
[0.15, 0.60]]
NUM_OUTPUTS = 1+4+2*4
def __init__(self, config):
self.anchors = [np.array(LandmarkDetector.RATIO) * s for s in LandmarkDetector.SCALE]
self.anchors = np.concatenate(self.anchors, axis=0)
assert self.anchors.shape == (len(LandmarkDetector.SCALE) * len(LandmarkDetector.RATIO), 2)
self.feature_size = config.model.params.feature_size
self.num_anchors = len(LandmarkDetector.SCALE) * len(LandmarkDetector.RATIO)
num_outputs = LandmarkDetector.NUM_OUTPUTS
self.model = get_model(config, num_outputs=num_outputs)
self.model.avgpool = nn.AdaptiveAvgPool2d(self.feature_size)
in_features = self.model.last_linear.in_features
self.model.last_linear = nn.Conv2d(in_channels=in_features,
out_channels=len(self.anchors)*num_outputs,
kernel_size=1)
def logits(self, features):
x = self.avgpool(features)
x = self.last_linear(x)
return x
self.model.logits = types.MethodType(logits, self.model)
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.cuda()
self.preprocess_opt = {'mean': self.model.mean,
'std': self.model.std,
'input_range': self.model.input_range,
'input_space': self.model.input_space}
self.criterion = get_loss(config)
self.cls_criterion = F.binary_cross_entropy_with_logits
def get_model(self):
return self.model
def get_preprocess_opt(self):
return self.preprocess_opt
def forward(self, images, labels=None, **_):
return self.model(images)
def inference(self, images=None, outputs=None, labels=None, **_):
if outputs is None:
assert images is not None
outputs = self.model(images)
num_outputs = LandmarkDetector.NUM_OUTPUTS
outputs = outputs.view(-1,num_outputs,self.num_anchors,self.feature_size,self.feature_size)
anchors = self._get_anchors()
B,C,A,H,W = outputs.size()
outputs = outputs.view(B,C,A*H*W)
anchors = torch.stack([anchors]*B, dim=0)
anchors = anchors.view(B,-1,A*H*W)
scores, indices = torch.max(outputs[:,0], dim=1)
outputs = outputs[torch.arange(B), :, indices]
anchors = anchors[torch.arange(B), :, indices]
boxes = self._targets_to_boxes(outputs[:,1:5], anchors)
landmarks = self._targets_to_landmarks(outputs[:,5:], anchors)
probabilities = F.sigmoid(scores)
return {'boxes': boxes, 'landmarks': landmarks, 'probabilities': probabilities}
def _get_anchors(self):
anchors = []
denom = self.feature_size*2
for y in np.arange(1/denom, 1.0, 2/denom):
for x in np.arange(1/denom, 1.0, 2/denom):
for w, h in self.anchors:
anchors.append([x, y, w, h])
# row x column x num_anchors x 4
# 5 x 5 x 9 x 4
anchors = np.array(anchors).reshape((self.feature_size,self.feature_size,self.num_anchors,4))
# row x column x num_anchors x 4 => 4 x num_anchors x row x col
anchors = np.transpose(anchors, (3,2,0,1))
anchors = torch.FloatTensor(anchors).cuda()
assert anchors.size() == (4,self.num_anchors,self.feature_size,self.feature_size)
return anchors
def loss(self, outputs, labels, **_):
num_outputs = LandmarkDetector.NUM_OUTPUTS
outputs = outputs.view(-1,num_outputs,self.num_anchors,self.feature_size,self.feature_size)
anchors = self._get_anchors()
output_boxes = self._targets_to_boxes(outputs[:,1:5], anchors.unsqueeze(0))
output_landmarks = self._targets_to_landmarks(outputs[:,5:], anchors.unsqueeze(0))
box_targets = self._boxes_to_targets(labels[:,:4], anchors)
landmark_targets = self._landmarks_to_targets(labels[:,4:], anchors)
cls_targets, target_on_off = self._get_cls_targets(labels, anchors.unsqueeze(0))
assert cls_targets.size() == target_on_off.size()
assert cls_targets.size() == outputs[:,:1].size()
outputs = outputs * target_on_off
loss_box = self.criterion(outputs[:,1:5], box_targets)
loss_landmark = self.criterion(outputs[:,5:], landmark_targets)
loss_cls = self.cls_criterion(outputs[:,:1], cls_targets)
return (loss_box + loss_landmark) * 5 + loss_cls * 0.5
def metrics(self, boxes, landmarks, probabilities, labels, **_):
iou = torch.mean(self._get_iou(boxes, labels[:,:4])).item()
l2 = torch.mean(torch.sqrt(torch.sum(torch.pow(landmarks - labels[:,4:], 2), dim=1)))
return {'score': iou, 'iou': iou, 'l2': l2}
def annotate_to_images(self, images, labels, predicts, **_):
assert images.dim() == 4
assert labels.dim() == 2
boxes = predicts['boxes']
landmarks = predicts['landmarks']
probabilities = predicts['probabilities']
ious = self._get_iou(boxes, labels[:,:4])
iou_1, indices_1 = torch.topk(ious, 2, largest=False)
iou_2, indices_2 = torch.topk(ious, 2, largest=True)
indices = torch.cat([indices_1, indices_2], dim=0)
images = images.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
boxes = boxes.detach().cpu().numpy()
landmarks = landmarks.detach().cpu().numpy()
probabilities = probabilities.detach().cpu().numpy()
ious = ious.detach().cpu().numpy()
indices = indices.detach().cpu().numpy()
images = images[indices]
labels = labels[indices]
boxes = boxes[indices]
landmarks = landmarks[indices]
probabilities = probabilities[indices]
ious = ious[indices]
annotated_images = []
for item in zip(images, labels, boxes, landmarks, probabilities, ious):
image, label, box, landmark, probability, iou = item
if image.shape[0] == 3:
image = np.transpose(image, [1,2,0])
H, W, _ = image.shape
label = label * [W,H,W,H, W,H,W,H,W,H,W,H]
label = label.astype(np.int32)
box = box * [W,H,W,H]
box = box.astype(np.int32)
landmark = landmark * [W,H,W,H,W,H,W,H]
landmark = landmark.astype(np.int32)
label_box_x1 = int(label[0] - label[2] / 2)
label_box_y1 = int(label[1] - label[3] / 2)
label_box_x2 = int(label[0] + label[2] / 2)
label_box_y2 = int(label[1] + label[3] / 2)
predict_box_x1 = int(box[0] - box[2] / 2)
predict_box_y1 = int(box[1] - box[3] / 2)
predict_box_x2 = int(box[0] + box[2] / 2)
predict_box_y2 = int(box[1] + box[3] / 2)
label_landmarks = [(int(label[4]), int(label[5])),
(int(label[6]), int(label[7])),
(int(label[8]), int(label[9])),
(int(label[10]), int(label[11]))]
predict_landmarks = [(int(landmark[0]), int(landmark[1])),
(int(landmark[2]), int(landmark[3])),
(int(landmark[4]), int(landmark[5])),
(int(landmark[6]), int(landmark[7]))]
image = from_norm_bgr(image, **self.preprocess_opt)
image = image.astype('uint8')
image = image.copy()
cv2.rectangle(image,
(label_box_x1, label_box_y1), (label_box_x2, label_box_y2),
(0,0,255), thickness=3)
cv2.rectangle(image,
(predict_box_x1, predict_box_y1), (predict_box_x2, predict_box_y2),
(255,0,0), thickness=3)
for i, (x, y) in enumerate(label_landmarks):
if i == 0:
cv2.circle(image, (x,y), 4, (0,255,0), thickness=-1)
elif i == 2:
cv2.circle(image, (x,y), 4, (0,0,255), thickness=-1)
else:
cv2.circle(image, (x,y), 4, (0,255,255), thickness=-1)
for x, y in predict_landmarks:
cv2.circle(image, (x,y), 4, (255,0,0), thickness=-1)
image = image.copy()
cv2.putText(image, '{:.04f}, {:.04f}'.format(iou, probability),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 0, 0), lineType=cv2.LINE_AA)
image = np.array(image)
image = np.transpose(image, [2,0,1])
annotated_images.append(image)
return annotated_images
def to_dataframe(self, key_list, boxes, probabilities):
print(len(key_list), len(boxes), len(probabilities))
print(key_list[0])
print(boxes[0])
print(probabilities[0])
records = []
for key, box, probability in zip(key_list, boxes, probabilities):
x, y, w, h = box
records.append((key, x, y, w, h, probability))
df = pd.DataFrame.from_records(
records, columns=['key', 'x', 'y', 'w', 'h', 'probability'])
df = df.set_index('key')
return df
def _get_cls_targets(self, labels, anchors):
# assert labels.size() == anchors.size()[:2], '{} vs {}'.format(labels.size(), anchors.size())
B, _ = labels.size()
ious = torch.zeros((labels.size(0), anchors.size(2), anchors.size(3), anchors.size(4))).cuda()
for i in range(anchors.size(2)):
for y in range(anchors.size(3)):
for x in range(anchors.size(4)):
ious[:,i,y,x] = self._get_iou(labels, anchors[:,:,i,y,x])
# ious = (B,9,9,9)
ious_max, _ = torch.max(ious, dim=1, keepdim=False)
# ious_max: (B,9,9)
ious_max = ious_max.view(B, -1)
_, ious_max_indices = torch.max(ious_max, dim=1, keepdim=False)
# targets: 1 if ious > 0.75 else 0
# ious Bx1x9x9
targets = torch.zeros_like(ious)
on_off = torch.zeros_like(ious)
thres_pos = 0.75
thres_neg = 0.40
targets[ious > thres_pos] = 1.0
on_off[ious > thres_pos] = 1.0
on_off[ious < thres_neg] = 1.0
targets = targets.float()
on_off = on_off.float()
return (targets.view(labels.size(0),1,anchors.size(2),anchors.size(3),anchors.size(4)),
on_off.view(labels.size(0),1,anchors.size(2),anchors.size(3),anchors.size(4)))
def _boxes_to_targets(self, boxes, anchors):
if len(boxes.size()) == 2:
assert boxes.size(1) == anchors.size(0)
boxes = boxes.view(boxes.size(0), boxes.size(1), 1, 1, 1)
tx = (boxes[:,0,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:]
ty = (boxes[:,1,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:]
tw = torch.log(boxes[:,2,:,:,:] / anchors[2,:,:,:])
th = torch.log(boxes[:,3,:,:,:] / anchors[3,:,:,:])
return torch.stack([tx,ty,tw,th], dim=1)
def _targets_to_boxes(self, targets, anchors):
x = anchors[:,2] * targets[:,0] + anchors[:,0]
y = anchors[:,3] * targets[:,1] + anchors[:,1]
w = anchors[:,2] * torch.exp(targets[:,2])
h = anchors[:,3] * torch.exp(targets[:,3])
return torch.stack([x,y,w,h], dim=1)
def _landmarks_to_targets(self, landmarks, anchors):
if len(landmarks.size()) == 2:
assert landmarks.size(1) == 8
landmarks = landmarks.view(landmarks.size(0), landmarks.size(1), 1, 1, 1)
points = [
(landmarks[:,0,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,1,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,2,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,3,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,4,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,5,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:],
(landmarks[:,6,:,:,:] - anchors[0,:,:,:]) / anchors[2,:,:,:],
(landmarks[:,7,:,:,:] - anchors[1,:,:,:]) / anchors[3,:,:,:]]
return torch.stack(points, dim=1)
def _targets_to_landmarks(self, targets, anchors):
points = [
anchors[:,2] * targets[:,0] + anchors[:,0],
anchors[:,3] * targets[:,1] + anchors[:,1],
anchors[:,2] * targets[:,2] + anchors[:,0],
anchors[:,3] * targets[:,3] + anchors[:,1],
anchors[:,2] * targets[:,4] + anchors[:,0],
anchors[:,3] * targets[:,5] + anchors[:,1],
anchors[:,2] * targets[:,6] + anchors[:,0],
anchors[:,3] * targets[:,7] + anchors[:,1]]
return torch.stack(points, dim=1)
def _get_iou(self, coords_a, coords_b):
def clamp(v):
return torch.clamp(v, min=0.0, max=1.0)
area_a = coords_a[:,2] * coords_b[:,3]
area_b = coords_b[:,2] * coords_b[:,3]
left_tops_x_a = clamp(coords_a[:,0] - coords_a[:,2] / 2)
left_tops_y_a = clamp(coords_a[:,1] - coords_a[:,3] / 2)
right_bottoms_x_a = clamp(coords_a[:,0] + coords_a[:,2] / 2)
right_bottoms_y_a = clamp(coords_a[:,1] + coords_a[:,3] / 2)
left_tops_x_b = clamp(coords_b[:,0] - coords_b[:,2] / 2)
left_tops_y_b = clamp(coords_b[:,1] - coords_b[:,3] / 2)
right_bottoms_x_b = clamp(coords_b[:,0] + coords_b[:,2] / 2)
right_bottoms_y_b = clamp(coords_b[:,1] + coords_b[:,3] / 2)
left_tops_x = torch.max(left_tops_x_a, left_tops_x_b)
left_tops_y = torch.max(left_tops_y_a, left_tops_y_b)
right_bottoms_x = torch.min(right_bottoms_x_a, right_bottoms_x_b)
right_bottoms_y = torch.min(right_bottoms_y_a, right_bottoms_y_b)
width = clamp(right_bottoms_x - left_tops_x)
height = clamp(right_bottoms_y - left_tops_y)
intersection = width * height
return intersection / (area_a + area_b - intersection)
def main():
print('main')
from utils.config import _get_default_config
config = _get_default_config()
config.model.params.num_outputs = 4
config.loss.name = 'mse_loss'
box_regressor = LandmarkDetector(config)
if __name__ == '__main__':
import cProfile
|
<gh_stars>0
_base_ = '../../_base_/models/movinet/movinetA2.py'
model = dict(
cls_head=dict(
type='MoViNetHead',
in_channels=640,
hidden_dim=2048,
num_classes=600,
spatial_type='avg',
tf_like=True,
causal=False,
conv_type='3d',
dropout_ratio=0.,
# label_smooth_eps=0.1,
topk=(1,5),
# loss_cls=dict(type='BCELossWithLogits')
# loss_cls=dict(type='AsymmetricLossOptimized', gamma_neg=4, gamma_pos=1, disable_torch_grad_focal_loss=True)
),
train_cfg=None,
#test_cfg=dict(maximize_clips='score')
test_cfg=dict(average_clips='prob')
)
dataset_type = 'VideoDataset'
data_root = 'data/ucf101/videos'
data_root_val = 'data/ucf101/videos'
split = 1 # official train/test splits. valid numbers: 1, 2, 3
ann_file_train = f'data/ucf101/ucf101_train_split_{split}_videos.txt'
ann_file_val = f'data/ucf101/ucf101_val_split_{split}_videos.txt'
#ann_file_test = f'data/ucf101/ucf101_val_split_{split}_videos.txt'
ann_file_test = f'data/ucf101/ucf101_train_videos.txt'
img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=3,
num_clips=1),
dict(type='DecordDecode'),
#dict(type='RandomRescale', scale_range=(256, 320)),
#dict(type='RandomCrop', size=256),
#dict(type='Flip', flip_ratio=0.5),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=3,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
#dict(type='CenterCrop', crop_size=256),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
#test_pipeline = [
# dict(type='DecordInit'),
# dict(
# type='SampleFrames',
# clip_len=32,
# frame_interval=3,
# #num_clips=10,
# num_clips=1,
# test_mode=True),
# dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='ThreeCrop', crop_size=256),
# dict(type='Flip', flip_ratio=0),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='FormatShape', input_format='NCTHW'),
# dict(type='Collect', keys=['imgs'], meta_keys=[]),
# dict(type='ToTensor', keys=['imgs'])
#]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
#clip_len=32, #32 frames is causing OOM error
clip_len=16,
frame_interval=3,
#num_clips=10,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
#dict(type='Resize', scale=(256, 256)),
#dict(type='Resize', scale=(224, 224), keep_ratio=False), # 224x224 is causing OOM errors
dict(type='Resize', scale=(128,128), keep_ratio=False),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=2,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=2
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=2
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline,
test_mode=True))
evaluation = dict(
interval=1, metrics=['mean_average_precision'])
# optimizer
optimizer = dict(type='AdamW',
lr=0.0001,
betas=(0.9, 0.9999),
weight_decay=0.05
)
# optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=2,
warmup_ratio=0.01)
total_epochs = 20
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=4,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
# runtime settings
checkpoint_config = dict(interval=5)
workflow = [('train', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
])
log_level = 'INFO'
work_dir = './work_dirs/movinetA2_ucf101/'
# load_from = ('/home/ckai/project/mmaction2/model_zoo/movinet/modelA4_statedict_mm')
# load_from = ('work_dirs/movinetA4/best_mean_average_precision_epoch_38.pth')
load_from = None
find_unused_parameters = False
resume_from = None
dist_params = dict(backend='nccl')
|
<reponame>ComputerSystemsLaboratory/YaCoS
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import numpy as np
import pandas as pd
from absl import app, flags, logging
from yacos.essential import IO
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
from sklearn import model_selection
import matplotlib.pyplot as plt
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import stellargraph as sg
from stellargraph.mapper import PaddedGraphGenerator
from stellargraph.layer import DeepGraphCNN
from stellargraph import StellarDiGraph
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten
from tensorflow.keras.losses import categorical_crossentropy
def graph2stellar(data):
"""Convert the graph to StellarGraph representation."""
s_labels = []
s_graphs = []
for label, graphs in data.items():
for graph in graphs:
nodes_features = graph.get_nodes_inst2vec_embeddings()
n_index = [index for index, _, _ in nodes_features]
n_features = [features for _, _, features in nodes_features]
nodes = pd.DataFrame(n_features, index=n_index)
edges = graph.get_edges_dataFrame()
s_graph = StellarDiGraph(nodes,
edges=edges,
edge_type_column="type")
# print(s_graph.info(show_attributes=True, truncate=None))
s_labels.append(label)
s_graphs.append(s_graph)
return s_graphs, pd.Series(s_labels, name='label', dtype="category")
def prepare_data(data_directory,
graph_type):
"""Extract the representation from the source code."""
# Instantiate the LLVM driver.
driver = LLVMDriver()
# Define the builder
builder = R.LLVMGraphBuilder(driver)
# Define the visitor
visitors = {'programl': R.LLVMProGraMLVisitor,
'programlnoroot': R.LLVMProGraMLNoRootVisitor,
'cfg': R.LLVMCFGVisitor,
'cfgcompact': R.LLVMCFGCompactVisitor,
'cfgcall': R.LLVMCFGCallVisitor,
'cfgcallnoroot': R.LLVMCFGCallNoRootVisitor,
'cfgcallcompact': R.LLVMCFGCallCompactVisitor,
'cfgcallcompactnoroot': R.LLVMCFGCallCompactNoRootVisitor,
'cdfg': R.LLVMCDFGVisitor,
'cdfgcompact': R.LLVMCDFGCompactVisitor,
'cdfgcall': R.LLVMCDFGCallVisitor,
'cdfgcallnoroot': R.LLVMCDFGCallNoRootVisitor,
'cdfgcallcompact': R.LLVMCDFGCallCompactVisitor,
'cdfgcallcompactnoroot': R.LLVMCDFGCallCompactNoRootVisitor,
'cdfgplus': R.LLVMCDFGPlusVisitor,
'cdfgplusnoroot': R.LLVMCDFGPlusNoRootVisitor}
folders = [
os.path.join(data_directory, subdir)
for subdir in os.listdir(data_directory)
if os.path.isdir(os.path.join(data_directory, subdir))
]
representation = {}
# Load data from all folders
for folder in folders:
label = folder.replace('{}/'.format(data_directory), '')
sources = glob.glob('{}/*.ll'.format(folder))
representation[label] = []
for source in sources:
# Extract "information" from the file
# (data to construct the graph).
extractionInfo = builder.ir_to_info(source)
# Build the graph.
graph = builder.info_to_representation(extractionInfo,
visitors[graph_type])
representation[label].append(graph)
return representation
def execute(argv):
"""Extract a graph representation."""
del argv
# Print summary
print('='*80, flush=True)
print('Classify applications into 104 classes given their raw code.')
print('='*80, flush=True)
FLAGS = flags.FLAGS
print('Deep Graph Convolutional Neural Network')
print('='*80, flush=True)
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
#
# IMPORT THE DATA
#
# Prepare the datasets
dataset = prepare_data(FLAGS.dataset_directory, FLAGS.graph)
graphs, graph_labels = graph2stellar(dataset)
# Summary statistics of the sizes of the graphs
print('Dataset', flush=True)
summary = pd.DataFrame(
[(g.number_of_nodes(), g.number_of_edges()) for g in graphs],
columns=['nodes', 'edges'],
)
print('\n', summary.describe().round(1), flush=True)
print('\n', graph_labels.value_counts().to_frame(), flush=True)
print('='*80, flush=True)
# Encode class values
graph_labels = pd.get_dummies(graph_labels)
classes = graph_labels.shape[1]
#
# PREPARE GRAPH GENERATOR
#
generator = PaddedGraphGenerator(graphs=graphs)
#
# CREATE THE KERAS GRAPH CLASSIFICATION MODEL
#
# First we create the base DGCNN model that includes
# the graph convolutional and SortPooling layers
k = 35 # the number of rows for the output tensor
layer_sizes = [256, 256, 256, classes]
dgcnn_model = DeepGraphCNN(
layer_sizes=layer_sizes,
activations=["relu", "relu", "relu", "relu"],
k=k,
bias=False,
generator=generator,
)
x_inp, x_out = dgcnn_model.in_out_tensors()
# Add the convolutional, max pooling, and dense layers
x_out = Conv1D(filters=16,
kernel_size=sum(layer_sizes),
strides=sum(layer_sizes))(x_out)
x_out = MaxPool1D(pool_size=2)(x_out)
x_out = Conv1D(filters=32, kernel_size=5, strides=1)(x_out)
x_out = Flatten()(x_out)
x_out = Dense(units=128, activation="relu")(x_out)
x_out = Dropout(rate=0.5)(x_out)
outputs = Dense(units=classes, activation="sigmoid")(x_out)
# Create the model and prepare it for training by specifying
# the loss and optimisation algorithm.
model = Model(inputs=x_inp, outputs=outputs)
model.compile(
optimizer=Adam(lr=0.0001),
loss=categorical_crossentropy,
metrics=["acc"]
)
#
# TRAIN THE MODEL
#
# Split the dataset to training, validate and test sets
train_graphs, test_graphs = model_selection.train_test_split(
graph_labels,
train_size=1.0-FLAGS.test_ratio,
test_size=FLAGS.test_ratio,
stratify=graph_labels
)
train_graphs, val_graphs = model_selection.train_test_split(
train_graphs,
train_size=FLAGS.train_ratio,
test_size=FLAGS.val_ratio,
stratify=train_graphs
)
print('Training:', train_graphs.shape[0], flush=True)
print('Validation:', val_graphs.shape[0], flush=True)
print('Test:', test_graphs.shape[0], flush=True)
print('='*80, flush=True)
# Prepares the data for training
gen = PaddedGraphGenerator(graphs=graphs)
train_gen = gen.flow(
list(train_graphs.index - 1),
targets=train_graphs.values,
batch_size=50,
symmetric_normalization=False,
)
val_gen = gen.flow(
list(val_graphs.index - 1),
targets=val_graphs.values,
batch_size=1,
symmetric_normalization=False,
)
test_gen = gen.flow(
list(test_graphs.index - 1),
targets=test_graphs.values,
batch_size=1,
symmetric_normalization=False,
)
# Train the model
verbose = 1 if FLAGS.verbose else 0
history = model.fit(
train_gen,
epochs=FLAGS.epochs,
verbose=verbose,
validation_data=val_gen,
shuffle=True
)
#
# EVALUATE THE MODEL
#
# Calculate the performance of the trained model on the test data.
test_metrics = model.evaluate(test_gen)
if verbose:
print('='*80, flush=True)
print('Test Set Metrics', flush=True)
print('='*80)
test_metrics_dict = {}
for name, val in zip(model.metrics_names, test_metrics):
print('{}: {:0.4f}'.format(name, val), flush=True)
test_metrics_dict[name] = val
#
# PREDICT
#
predicted = model.predict(test_gen)
#
# STORE THE RESULTS
#
# Create the output directory.
os.makedirs(FLAGS.results_directory, exist_ok=True)
# Save the history
IO.dump_yaml(history.history,
'{}/history.yaml'.format(FLAGS.results_directory))
# Save the summary
IO.dump_yaml(summary.describe().to_dict(),
'{}/summary.yaml'.format(FLAGS.results_directory))
# Save the metrics
IO.dump_yaml(test_metrics_dict,
'{}/test_metrics.yaml'.format(FLAGS.results_directory))
# Save the prediction
np.savez_compressed('{}/predict.npz'.format(FLAGS.results_directory),
encoded=graph_labels[test_graphs.shape[0]:],
predicted=predicted)
# Plot the training history
# (losses and accuracies for the train and test data).
figure = sg.utils.plot_history(history, return_figure=True)
plt.figure(figure)
plt.savefig('{}/history.pdf'.format(FLAGS.results_directory))
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('results_directory',
None,
'Results directory')
flags.DEFINE_string('dataset_directory',
None,
'Dataset directory')
flags.DEFINE_float('train_ratio',
0.75,
'Training ratio')
flags.DEFINE_float('val_ratio',
0.25,
'Validation ratio')
flags.DEFINE_float('test_ratio',
0.20,
'Test ratio')
flags.DEFINE_integer('epochs',
100,
'Epochs')
flags.DEFINE_enum('graph',
'cdfgcallcompactnoroot',
[
'programl',
'programlnoroot',
'cfg',
'cfgcompact',
'cfgcall',
'cfgcallnoroot',
'cfgcallcompact',
'cfgcallcompactnoroot',
'cdfg',
'cdfgcompact',
'cdfgcall',
'cdfgcallnoroot',
'cdfgcallcompact',
'cdfgcallcompactnoroot',
'cdfgplus',
'cdfgplusnoroot'
],
'The type of the graph')
flags.DEFINE_boolean('verbose',
False,
'Verbose')
flags.mark_flag_as_required('dataset_directory')
flags.mark_flag_as_required('results_directory')
app.run(execute)
|
# coding: utf-8
'''
magi.machine
~~~~~~~~~~~~
A simple virtual machine implement.
'''
from magi import const, utils
from magi.instruction import Instruction
class MachineRunError(RuntimeError):
'''Machine runtime error.'''
class MachineHalt(StopIteration):
'''Halt machine.'''
machine_state_tmpl = '''Machine State:
PC: 0x{m.pc:02x} ACC: 0x{m.acc:02x} IR: {m.instruction}
AX: 0x{m.ax:02x} BX: 0x{m.bx:02x}
CX: 0x{m.cx:02x} DX: 0x{m.dx:02x}
Memory:
{m.mem}
'''
class Machine(object):
DS_SIZE = 1 << 8
def __init__(self):
# PC: points to current instruction.
self.pc = None
# ACC: stores last execution result.
self.acc = None
# Common usage registers.
self.ax = None
self.bx = None
self.cx = None
self.dx = None
# Data storage.
self.mem = None
self.reset()
def reset(self, bytecodes=None):
'''Reset machine.
:param bytecodes: instructions.
'''
# Reset registers.
self.pc = 0
self.acc = 0
self.ax = 0
self.bx = 0
self.cx = 0
self.dx = 0
# Reset data storage.
self.mem = Mem(self.DS_SIZE)
self.mem[0] = 0
# Set instructions stream.
self.bytecodes = bytecodes
@property
def ir(self):
'''Current instruction.'''
if not self.bytecodes or self.pc >= len(self.bytecodes):
return None
return self.bytecodes[self.pc]
@property
def instruction(self):
'''Decoded instruction.'''
ir = self.ir
if ir is None:
return None
return Instruction.from_bytecode(ir)
def get_instruction(self):
'''Get an instruction and advance pc.'''
instruction = self.instruction
if instruction is None:
raise MachineRunError('cannot get instruction')
self.pc = self.pc + 1
return instruction
def get_immediate(self):
'''Get bytecode as immediate value and advance pc.'''
immediate = self.ir
if immediate is None:
raise MachineRunError('cannot get immediate')
self.pc = self.pc + 1
return immediate
def get_register(self, register):
'''Get register value.
:param register: register to be loaded.
'''
if register == const.Registers.ax:
return self.ax
elif register == const.Registers.bx:
return self.bx
elif register == const.Registers.cx:
return self.cx
elif register == const.Registers.dx:
return self.dx
elif register == const.SPRegisters.PC:
return self.pc
elif register == const.SPRegisters.ACC:
return self.pc
elif register == const.SPRegisters.IR:
return self.ir
else:
raise MachineRunError('unsupported register: {0}'.format(register))
def update_register(self, register, value=None):
'''Update an regiseter.
:param register: register to be updated.
:param value: new value, defaults to immediate value.
'''
if value is None:
value = self.get_immediate()
if not isinstance(value, int):
raise MachineRunError('unsupported immediate value type')
if register == const.Registers.ax:
self.ax = value
elif register == const.Registers.bx:
self.bx = value
elif register == const.Registers.cx:
self.cx = value
elif register == const.Registers.dx:
self.dx = value
elif register == const.SPRegisters.PC:
self.pc = value
elif register == const.SPRegisters.ACC:
self.acc = value
else:
raise MachineRunError('unsupported register: {0}'.format(register))
def run(self, bytecodes):
'''Run list of bytecodes.
:param bytecodes: Instruction bytecodes.
'''
self.reset(bytecodes)
while True:
try:
self._executing_instrution = self.get_instruction()
self._executing_instrution.execute(self)
except MachineHalt:
self.dump()
return
def halt(self):
'''Halt the machine.'''
raise MachineHalt
def dump(self):
'''Dump machine state.'''
print(machine_state_tmpl.format(m=self))
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class Mem(list):
'''Memory storage.'''
def __init__(self, capacity):
self.capacity = capacity
for _ in range(self.capacity):
self.append(0)
def __str__(self):
hex_format = lambda x: '0x{0:02x}'.format(x)
return '\n'.join(
[' '.join(map(hex_format, i)) for i in utils.chunks(self, 16)]
)
def __repr__(self):
return self.__str__()
|
<gh_stars>0
"""
Data base to save and recall motor positions
Author: <NAME>
Date created: 2019-05-24
Date last modified: 2019-05-31
"""
__version__ = "1.0.5" # issue: __builtins__.getattr: 'dict' object has no attribute 'getattr'
# Solution: made setattr, getattr static methods
from logging import debug,info,warn,error
from traceback import format_exc
class Configuration_Server(object):
prefix = "NIH:CONF"
global_properties = [
"configuration_names",
]
configuration_properties = [
"value",
"values",
"command_value",
"title",
"description",
"matching_description",
"closest_descriptions",
"command_description",
"command_rows",
"matching_rows",
"closest_rows",
"n_motors",
"descriptions",
"updated",
"formats",
"nrows",
"name",
"motor_names",
"names",
"motor_labels",
"widths",
"formats",
"tolerance",
"description_width",
"row_height",
"show_apply_buttons",
"apply_button_label",
"show_define_buttons",
"define_button_label",
"show_stop_button",
"serial",
"vertical",
"multiple_selections",
"are_configuration",
"motor_configuration_names",
"are_numeric",
"current_timestamp",
"applying",
"show_in_list",
]
motor_properties = [
"current_position",
"positions",
"positions_match",
]
def run(self):
from time import sleep
while True:
self.update()
sleep(0.02)
def update(self):
from CAServer import casput,casmonitor
from configuration_driver import configuration
for prop in self.global_properties:
PV_name = (self.prefix+"."+prop).upper()
value = self.getattr(configuration,prop,expand=True)
if value is not None:
casput(PV_name,value,update=False)
casmonitor(PV_name,callback=self.monitor)
for conf in configuration.configurations:
for prop in self.configuration_properties:
PV_name = (self.prefix+"."+conf.name+"."+prop).upper()
value = self.getattr(conf,prop,expand=True)
if value is not None:
casput(PV_name,value,update=False)
casmonitor(PV_name,callback=self.monitor)
for prop in self.motor_properties:
for motor_num in range(0,conf.n_motors):
PV_name = (self.prefix+"."+conf.name+".MOTOR"+str(motor_num+1)+"."+prop).upper()
value = self.getitem(self.getattr(conf,prop),motor_num)
if value is not None:
casput(PV_name,value,update=False)
casmonitor(PV_name,callback=self.monitor)
def monitor(self,PV_name,value,char_value):
"""Handle PV change requests"""
info("%s = %r" % (PV_name,value))
from configuration_driver import configuration
from CAServer import casput
for conf in configuration.configurations:
for prop in self.configuration_properties:
if PV_name == (self.prefix+"."+conf.name+"."+prop).upper():
self.setattr(conf,prop,value)
value = self.getattr(conf,prop,expand=True)
if value is not None: casput(PV_name,value,update=False)
for motor_num in range(0,conf.n_motors):
for prop in self.motor_properties:
if PV_name == (self.prefix+"."+conf.name+".MOTOR"+str(motor_num+1)+"."+prop).upper():
self.setitem(self.getattr(conf,prop),motor_num,value)
value = self.getitem(self.getattr(conf,prop),motor_num)
if value is not None: casput(PV_name,value,update=False)
def global_PV_name(self,prop):
return (self.prefix+"."+prop).upper()
def configuration_PV_name(self,conf,prop):
return (self.prefix+"."+conf.name+"."+prop).upper()
def motor_PV_name(self,conf,prop,motor_num):
return (self.prefix+"."+conf.name+".MOTOR"+str(motor_num+1)+"."+prop).upper()
@staticmethod
def getattr(obj,property_name,expand=False):
try: value = getattr(obj,property_name)
except Exception,msg:
error("%s.%s: %s\n%s" % (obj,property_name,msg,format_exc()))
value = None
if expand:
if hasattr(value,"__getitem__"):
try: value = value[:]
except: warn("%s.%s[:]: %s\n%s" % (obj,property_name,msg,format_exc()))
return value
@staticmethod
def setattr(obj,property_name,value):
debug("setattr(%r,%r,%r)" % (obj,property_name,value))
try: setattr(obj,property_name,value)
except Exception,msg:
error("%s.%s = %r: %s\n%s" % (obj,property_name,value,msg,format_exc()))
@staticmethod
def getitem(obj,i):
try: value = obj[i]
except Exception,msg:
error("%s[%r]: %s\n%s" % (obj,i,msg,format_exc()))
value = None
if hasattr(value,"__getitem__"):
try: value = value[:]
except: warn("%s.%s[:]: %s\n%s" % (obj,property_name,msg,format_exc()))
return value
@staticmethod
def setitem(obj,i,value):
debug("setitem(%r,%r,%r)" % (obj,i,value))
try: obj[i] = value
except Exception,msg:
error("%s[%r] = %r: %s\n%s" % (obj,i,value,msg,format_exc()))
configuration_server = Configuration_Server()
if __name__ == '__main__': # for testing
from pdb import pm # for debugging
from time import time # for performance testing
import logging
for h in logging.root.handlers[:]: logging.root.removeHandler(h)
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s",
)
self = configuration_server
print("from configuration_driver import configuration")
print("conf = configuration.configurations[0]")
print("self.getattr(conf,'descriptions')")
print("value = self.getattr(conf,'descriptions')")
print("self.setattr(conf,'descriptions',value)")
print("self.getitem(self.getattr(conf,'current_positions'),0)")
print("self.getitem(self.getattr(conf,'positions'),0)")
print("")
print("configuration_server.update()")
print("t=time(); configuration_server.update(); time()-t")
print("configuration_server.run()")
##print("")
##from CAServer import casget
##print("casget(configuration_server.configuration_PV_name(conf,'descriptions'))")
|
<filename>numpyro/nn/block_neural_arn.py<gh_stars>1000+
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import jax
from jax import random
from numpyro.util import _versiontuple
if _versiontuple(jax.__version__) >= (0, 2, 25):
from jax.example_libraries import stax
else:
from jax.experimental import stax
from jax.nn import sigmoid, softplus
from jax.nn.initializers import glorot_uniform, normal, uniform
import jax.numpy as jnp
from numpyro.distributions.util import logmatmulexp, vec_to_tril_matrix
def BlockMaskedDense(
num_blocks, in_factor, out_factor, bias=True, W_init=glorot_uniform()
):
"""
Module that implements a linear layer with block matrices with positive diagonal blocks.
Moreover, it uses Weight Normalization (https://arxiv.org/abs/1602.07868) for stability.
:param int num_blocks: Number of block matrices.
:param int in_factor: number of rows in each block.
:param int out_factor: number of columns in each block.
:param W_init: initialization method for the weights.
:return: an (`init_fn`, `update_fn`) pair.
"""
input_dim, out_dim = num_blocks * in_factor, num_blocks * out_factor
# construct mask_d, mask_o for formula (8) of Ref [1]
# Diagonal block mask
mask_d = np.identity(num_blocks)[..., None]
mask_d = np.tile(mask_d, (1, in_factor, out_factor)).reshape(input_dim, out_dim)
# Off-diagonal block mask for upper triangular weight matrix
mask_o = vec_to_tril_matrix(
jnp.ones(num_blocks * (num_blocks - 1) // 2), diagonal=-1
).T[..., None]
mask_o = jnp.tile(mask_o, (1, in_factor, out_factor)).reshape(input_dim, out_dim)
def init_fun(rng, input_shape):
assert input_dim == input_shape[-1]
*k1, k2, k3 = random.split(rng, num_blocks + 2)
# Initialize each column block using W_init
W = jnp.zeros((input_dim, out_dim))
for i in range(num_blocks):
W = W.at[: (i + 1) * in_factor, i * out_factor : (i + 1) * out_factor].set(
W_init(k1[i], ((i + 1) * in_factor, out_factor))
)
# initialize weight scale
ws = jnp.log(uniform(1.0)(k2, (out_dim,)))
if bias:
b = (uniform(1.0)(k3, (out_dim,)) - 0.5) * (2 / jnp.sqrt(out_dim))
params = (W, ws, b)
else:
params = (W, ws)
return input_shape[:-1] + (out_dim,), params
def apply_fun(params, inputs, **kwargs):
x, logdet = inputs
if bias:
W, ws, b = params
else:
W, ws = params
# Form block weight matrix, making sure it's positive on diagonal!
w = jnp.exp(W) * mask_d + W * mask_o
# Compute norm of each column (i.e. each output features)
w_norm = jnp.linalg.norm(w, axis=-2, keepdims=True)
# Normalize weight and rescale
w = jnp.exp(ws) * w / w_norm
out = jnp.dot(x, w)
if bias:
out = out + b
dense_logdet = ws + W - jnp.log(w_norm)
# logdet of block diagonal
dense_logdet = dense_logdet[mask_d.astype(bool)].reshape(
num_blocks, in_factor, out_factor
)
if logdet is None:
logdet = jnp.broadcast_to(dense_logdet, x.shape[:-1] + dense_logdet.shape)
else:
logdet = logmatmulexp(logdet, dense_logdet)
return out, logdet
return init_fun, apply_fun
def Tanh():
"""
Tanh nonlinearity with its log jacobian.
:return: an (`init_fn`, `update_fn`) pair.
"""
def init_fun(rng, input_shape):
return input_shape, ()
def apply_fun(params, inputs, **kwargs):
x, logdet = inputs
out = jnp.tanh(x)
tanh_logdet = -2 * (x + softplus(-2 * x) - jnp.log(2.0))
# logdet.shape = batch_shape + (num_blocks, in_factor, out_factor)
# tanh_logdet.shape = batch_shape + (num_blocks x out_factor,)
# so we need to reshape tanh_logdet to: batch_shape + (num_blocks, 1, out_factor)
tanh_logdet = tanh_logdet.reshape(logdet.shape[:-2] + (1, logdet.shape[-1]))
return out, logdet + tanh_logdet
return init_fun, apply_fun
def FanInResidualNormal():
"""
Similar to stax.FanInSum but also keeps track of log determinant of Jacobian.
It is required that the second fan-in branch is identity.
:return: an (`init_fn`, `update_fn`) pair.
"""
def init_fun(rng, input_shape):
return input_shape[0], ()
def apply_fun(params, inputs, **kwargs):
# f(x) + x
(fx, logdet), (x, _) = inputs
return fx + x, softplus(logdet)
return init_fun, apply_fun
def FanInResidualGated(gate_init=normal(1.0)):
"""
Similar to FanInNormal uses a learnable parameter `gate` to interpolate two fan-in branches.
It is required that the second fan-in branch is identity.
:param gate_init: initialization method for the gate.
:return: an (`init_fn`, `update_fn`) pair.
"""
def init_fun(rng, input_shape):
return input_shape[0], gate_init(rng, ())
def apply_fun(params, inputs, **kwargs):
# a * f(x) + (1 - a) * x
(fx, logdet), (x, _) = inputs
gate = sigmoid(params)
out = gate * fx + (1 - gate) * x
logdet = softplus(logdet + params) - softplus(params)
return out, logdet
return init_fun, apply_fun
def BlockNeuralAutoregressiveNN(input_dim, hidden_factors=[8, 8], residual=None):
"""
An implementation of Block Neural Autoregressive neural network.
**References**
1. *Block Neural Autoregressive Flow*,
<NAME>, <NAME>, <NAME>
:param int input_dim: The dimensionality of the input.
:param list hidden_factors: Hidden layer i has ``hidden_factors[i]`` hidden units per
input dimension. This corresponds to both :math:`a` and :math:`b` in reference [1].
The elements of hidden_factors must be integers.
:param str residual: Type of residual connections to use. One of `None`, `"normal"`, `"gated"`.
:return: an (`init_fn`, `update_fn`) pair.
"""
layers = []
in_factor = 1
for hidden_factor in hidden_factors:
layers.append(BlockMaskedDense(input_dim, in_factor, hidden_factor))
layers.append(Tanh())
in_factor = hidden_factor
layers.append(BlockMaskedDense(input_dim, in_factor, 1))
arn = stax.serial(*layers)
if residual is not None:
FanInResidual = (
FanInResidualGated if residual == "gated" else FanInResidualNormal
)
arn = stax.serial(
stax.FanOut(2), stax.parallel(arn, stax.Identity), FanInResidual()
)
def init_fun(rng, input_shape):
return arn[0](rng, input_shape)
def apply_fun(params, inputs, **kwargs):
out, logdet = arn[1](params, (inputs, None), **kwargs)
return out, logdet.reshape(inputs.shape)
return init_fun, apply_fun
|
"""
Pyro4 compatibility layer for RA's support module.
"""
import logging
import time
import os
import signal
import re
import Pyro4
from support.tunneling import Tunnel
from support.process import invoke, search_response, BasicProcess
from support.logs import logging_config
from pyro3_util import full_name
from pyro4_client import AutoReconnectingProxy
full_name = full_name.copy()
try:
full_name.pop('localhost')
except KeyError:
pass
module_logger = logging.getLogger(__name__)
logging_config(logger=module_logger, loglevel=logging.DEBUG)
class Pyro4ObjectDiscoverer(object):
"""
An class used to represent a set of ssh tunnels to a Pyro4 object located remotely.
This is meant to be a replacement for Tom's support.pyro.get_device_server function.
This does not just work for JPL tunnels, however. We can tunnel to arbitrary IP addresses
and gain access to Pyro objects as well.
Example Usage:
If we want to get the URI of the APC Pyro object on crux, we would do the following:
```
crux_tunnel = Pyro4ObjectDiscoverer("crux", remote_ns_host='localhost', remote_ns_port=50000,
tunnel_username='ops', username='ops')
apc = crux_tunnel.get_pyro_object('APC')
```
To create the APC client, we would have to send the URI to TAMS_BackEnd.clients.APCClient:
```
apc_client = TAMS_BackEnd.clients.APCClient(proxy=apc)
# now we can call APC methods.
apc_client.get_azel()
```
Let's say I wanted to find an object on a remote server, but that remote server wasn't on
the JPL network. I might do the following:
```
remote_discoverer = Pyro4ObjectDiscoverer('192.168.0.2', remote_ns_host='localhost', remote_ns_port=2224,
username='user', port=2222)
basic_server = remote_discoverer.get_pyro_object('BasicServer')
print(basic_server.name)
>> u'BasicServer'
```
Public Attributes:
remote_server_name (str): The name or ip address of the remote server
remote_port (int): The remote port to be used for tunneling.
This should be a listening port on the remote server.
remote_ns_port (int): The remote nameserver port
remote_ns_host (str): The remote nameserver host name
local_forwarding_port (int): The local port on which to listen;
the local port we used for port forwarding
tunnel_username (str): The username for the creation of a support.tunneling.Tunnel.
This could be, for example, a login to the JPL ops gateway.
username (str): The username to use for port forwarding. On crux, this would be 'ops'
logger (logging.getLogger): Return value of logging_util.logging_config
processes (list): A list of the running processes. These processes might be
subprocess.Popen instances, or BasicProcess instances.
uris (dict): A dictionary of Pyro4 URI objects. The keys are the server names
on the remote nameserver.
Public Methods:
get_pyro_object_uri: Creates a tunnel to the remote server (if not in place already)
and then creates tunnels to the nameserver and the requested object.
cleanup: Kill the processes that are associated with the nameserver and the requested
object(s).
Private Attributes:
_local (bool): Is this a local connection (ie, on this same computer)?
"""
def __init__(self,
remote_server_name='localhost',
remote_port=None,
remote_ns_port=50000,
remote_ns_host='localhost',
local_forwarding_port=None,
tunnel_username=None,
remote_username=None,
loglevel=logging.INFO,
**kwargs):
"""
Create a Pyro4ObjectDiscoverer object.
Args:
remote_server_name (str): Name or ip of remote server.
Keyword Args:
remote_port (int): The remote listening port.
remote_ns_port (int): The remote nameserver port
remote_ns_host (str): The remote nameserver host name
local_forwarding_port (int): The local port on which to listen;
the local port we used for port forwarding
tunnel_username (str): The username for the creation of a support.tunneling.Tunnel.
This could be, for example, a login to the JPL ops gateway.
username (str): The username to use for port forwarding. On crux, this would be 'ops'
**kwargs: For logging_util.logging_config
"""
self.remote_server_name = remote_server_name
self.remote_ns_host = remote_ns_host
self.remote_ns_port = remote_ns_port
if not local_forwarding_port: local_forwarding_port = remote_ns_port
self.local_forwarding_port = local_forwarding_port
self.tunnel_username = tunnel_username
self.remote_username = remote_username
logger = logging.getLogger(module_logger.name + ".Pyro4Tunnel")
self.logger = logging_config(logger=logger, loglevel=loglevel, **kwargs)
self.processes = []
if remote_server_name in full_name.keys():
self.local = False
self.logger.debug("Checking for existing Tunnel.")
self.tunnel = Tunnel(remote_server_name, username=tunnel_username)
self.remote_port = self.tunnel.port
self.remote_server_ip = 'localhost'
elif remote_server_name == 'localhost':
self.remote_server_ip = 'localhost'
if remote_port:
self.local = False
self.remote_port = remote_port
else:
self.local = True
else:
self.local = False
self.logger.debug("Provided server name not on JPL network.")
self.tunnel = None
self.remote_server_ip = remote_server_name
self.remote_port = remote_port
if self.local:
self.logger.debug("Local nameserver host:port: {}:{}".format(self.remote_ns_host, self.remote_ns_port))
self.ns = Pyro4.locateNS(host=self.remote_ns_host, port=self.remote_ns_port)
else:
self.ns = self.find_nameserver(self.remote_server_ip,
self.remote_ns_host,
self.remote_ns_port,
self.local_forwarding_port,
self.remote_port,
self.remote_username)
self.uris = {}
self.requested_objects = []
def find_nameserver(self,
remote_server_ip,
remote_ns_host,
remote_ns_port,
local_forwarding_port,
remote_port,
remote_username):
"""
Get the nameserver sitting on remote_ns_port on the remote server.
We explicitly pass arguments instead of using attributes so we can
use this method outside of __init__.
Args:
remote_server_ip (str): The IP address of remote server.
remote_ns_host (str): The hostname of the remote nameserver
(I don't imagine a situation in which this would change)
remote_ns_port (int): The port of the remote nameserver
local_forwarding_port (int): The local port to use for forwarding.
remote_port (int): A listening port on remote
Returns:
Pyro4.naming.NameServer instance or
None if can't be found.
"""
self.logger.debug("Remote server IP: {}".format(remote_server_ip))
proc_ns = arbitrary_tunnel(remote_server_ip, 'localhost', local_forwarding_port,
remote_ns_port, username=remote_username, port=remote_port)
self.processes.append(proc_ns)
if check_connection(Pyro4.locateNS, kwargs={'host': remote_ns_host, 'port': local_forwarding_port}):
ns = Pyro4.locateNS(host=remote_ns_host, port=local_forwarding_port)
return ns
else:
self.logger.error("Couldn't connect to the remote Nameserver", exc_info=True)
return None
def register_daemon(self, daemon):
"""
Args:
daemon (Pyro4.Daemon):
Returns:
"""
if self.local:
return None
else:
daemon_host, daemon_port = daemon.locationStr.split(":")
proc_daemon = arbitrary_tunnel(self.remote_server_ip, 'localhost', daemon_port,
daemon_port, username=self.remote_username,
port=self.remote_port, reverse=True)
self.processes.append(proc_daemon)
def get_pyro_object(self, remote_obj_name, use_autoconnect=False):
"""
Say we wanted to connect to the APC server on crux, and the APC server
was sitting on nameserver port 50000 on crux. We could do this as follows:
Args:
remote_obj_name (str): The name of the Pyro object.
Returns:
Pyro4.URI corresponding to requested pyro object, or
None if connections wasn't successful.
"""
try:
obj_uri = self.ns.lookup(remote_obj_name)
except AttributeError:
self.logger.error("Need to call find_nameserver.")
return None
self.requested_objects.append(remote_obj_name)
if use_autoconnect:
obj_proxy = AutoReconnectingProxy(obj_uri)
else:
obj_proxy = Pyro4.Proxy(obj_uri)
if self.local:
return obj_proxy
elif not self.local:
obj_host, obj_port = obj_uri.location.split(":")
proc_obj = arbitrary_tunnel(self.remote_server_ip, 'localhost', obj_port,
obj_port, username=self.remote_username, port=self.remote_port)
self.processes.append(proc_obj)
# if check_connection(getattr, args=(obj_proxy, 'name')): # We are trying to get property, hence getattr
if check_connection(obj_proxy._pyroBind):
self.uris[remote_obj_name] = obj_uri
return obj_proxy
else:
self.logger.error("Couldn't connect to the object", exc_info=True)
return None
def cleanup(self):
"""
Kill all the existing tunnels that correspond to processes created
Returns:
None
"""
# try:
#
# ns = self.ns
# for name in self.requested_objects:
# ns.remove(name)
# except AttributeError as err:
# self.logger.error("cleanup: Couldn't remove requested objects from the nameserver: {}".format(err))
self.logger.debug("Cleaning up ssh connections.")
for proc in self.processes:
proc.kill()
def __enter__(self):
return self
def __exit__(self, *args):
self.cleanup()
def check_connection(callback, timeout=1.0, attempts=10, args=(), kwargs={}):
"""
Check to see if a connection is viable, by running a callback.
Args:
callback: The callback to test the connection
Keyword Args:
timeout (float): The amount of time to wait before trying again
attempts (int): The number of times to try to connect.
args: To be passed to callback
kwargs: To be passed to callback
Returns:
bool: True if the connection was successful, False if not successful.
"""
attempt_i = 0
while attempt_i < attempts:
try:
callback(*args, **kwargs)
except Exception as e:
module_logger.debug("Connection failed: {}. Timing out".format(e))
time.sleep(timeout)
attempt_i += 1
else:
module_logger.info("Successfully connected.")
return True
module_logger.error("Connection failed completely.")
return False
def arbitrary_tunnel(remote_ip, relay_ip,
local_port, remote_port,
port=22, username='', reverse=False):
"""
Create an arbitrary ssh tunnel, after checking to see if a tunnel already exists.
This just spawns the process that creates the tunnel, it doesn't check to see if the tunnel
has successfully connected.
Executes the following command:
```
ssh -p {port} -l {username} -L {local_port}:{relay_ip}:{remote_port} {remote_ip}
```
Args:
remote_ip (str): The remote, or target ip address.
For local port forwarding this can be localhost
relay_ip (str): The relay ip address.
local_port (int): The local port on which we listen
remote_port (int): The remote port on which we listen
Keyword Args:
port (int): The -p argument for ssh
username (str): The username to use for tunneling
Returns:
subprocess.Popen: if there isn't an existing process corresponding to tunnel:
or else BasicProcess instance, the corresponds to already running tunnel command.
"""
#-c arcfour -o ServerAliveInterval=60 -o TCPKeepAlive=no
# First thing is check to see if the remote_ip is ~/.ssh/config
home_dir = os.path.expanduser("~")
ssh_config = os.path.join(home_dir, ".ssh/config")
with open(ssh_config, 'r') as config:
contents = config.read()
pattern = "host (.*)\n"
hosts = [match for match in re.findall(pattern, contents)]
r_option = "-L"
if reverse:
r_option = "-R"
if remote_ip in hosts:
command = "ssh -N {0} {1}:{2}:{3} {4}"
command = command.format(r_option, local_port, relay_ip, remote_port, remote_ip)
else:
command = "ssh -N -l {0} -p {1} {2} {3}:{4}:{5} {6}"
command = command.format(username, port, r_option, local_port, relay_ip, remote_port, remote_ip)
command_relay = "{0} {1}:{2}:{3} {4}".format(r_option, local_port, relay_ip, remote_port, remote_ip)
# module_logger.debug(command_relay)
ssh_proc = search_response(['ps', 'x'], ['grep', 'ssh'])
# re_pid = re.compile("\d+")
# re_name = re.compile("ssh.*")
for proc in ssh_proc:
if command_relay in proc:
module_logger.debug("Found matching process: {}".format(proc))
# proc_id = int(re_pid.findall(proc)[0])
# proc_name = re_name.findall(proc)[0]
return BasicProcess(ps_line=proc, command_name='ssh')
# return BasicProcess(name=proc_name, pid=proc_id)
module_logger.debug("Invoking command {}".format(command))
p = invoke(command)
return p
if __name__ == '__main__':
proc = arbitrary_tunnel('localhost', 'localhost', 2222, 50000, port=50046, username='ops') |
<reponame>Arlind1992/PolimiResearch
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 16:27:01 2019
@author: arlind
"""
import ai_analysis.join_data_different_sources as ds
import sys
import pandas as pd
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton,QInputDialog,QListWidget
from PyQt5.QtWidgets import QGridLayout, QDesktopWidget
from PyQt5.QtGui import QIcon
import ai_analysis.data_loading.load_data_locally as ldl
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'Market Parameter Tool'
self.width = 540
self.height = 500
self.material=''
self.dialogs = list()
self.initUI()
def divide_market_by_product(self,market):
divided_market=market.drop(columns=['Pack','Anatomical Therapeutic Class 4','Molecule','Key'])
divided_market.index=divided_market['Product']+'-'+divided_market['Manufacturer']
divided_market=divided_market.drop(columns=['Product','Manufacturer'])
divided_market[divided_market.columns]=divided_market[divided_market.columns].astype(float)
divided_market=divided_market.groupby(divided_market.index).sum()
self.divided_market=divided_market
def get_perimeter_for_key(self,market_data,perimeter,key):
market_data['Key']=market_data['Product'] + ' '+market_data['Pack']
market_data_perimeter_filtered=market_data.merge(perimeter[perimeter['Key']==key],how='inner',on='Key')
market_by_molecule=perimeter[(perimeter['Mkt Molecola']==market_data_perimeter_filtered['Mkt Molecola'].iloc[0])]
if str(market_data_perimeter_filtered['Special Market'].iloc[0])!='nan':
perimeter_to_join_by=perimeter[perimeter['Special Market']==market_data_perimeter_filtered['Special Market'].iloc[0]]
else:
perimeter_to_join_by=perimeter[(perimeter['Mkt Molecola']==market_data_perimeter_filtered['Mkt Molecola'].iloc[0])&(perimeter['Special Market'].isnull())]
to_return=market_data.merge(perimeter_to_join_by['Key'].to_frame(),how='inner',on='Key')
by_molecule=market_data.merge(market_by_molecule['Key'].to_frame(),how='inner',on='Key')
return to_return,by_molecule
def show_products(self,item):
self.divide_market_by_product(self.selected_market)
window=QMainWindow()
list_w=QListWidget(window)
list_w.addItems(list(self.divided_market.index))
list_w.resize(400,600)
list_w.itemClicked.connect(self.plot_diagram_product)
window.resize(400,600)
self.dialogs.append(window)
divided_market_to_plot=self.divided_market.T
divided_market_to_plot.index=pd.to_datetime(divided_market_to_plot.index,format='%d/%m/%Y')
divided_market_to_plot.plot()
window.show()
def plot_diagram_product(self,item):
divided_market_to_plot=self.divided_market.T
divided_market_to_plot.index=pd.to_datetime(divided_market_to_plot.index,format='%d/%m/%Y')
df=divided_market_to_plot[item.text().strip()].to_frame()
df.plot()
def plot_diagram(self,item):
to_return,bymolecule=self.get_perimeter_for_key(self.market_data,self.perimeter,item.text().strip())
self.selected_market=to_return
to_return=to_return.drop(columns=['Manufacturer','Product','Pack','Anatomical Therapeutic Class 4','Molecule','Key'])
to_return[to_return.columns]=to_return[to_return.columns].astype(float)
to_return=to_return.sum()
to_return=to_return.T
to_return.index=pd.to_datetime(to_return.index,format='%d/%m/%Y')
to_return.sort_index().plot(title='Market by perimeter')
bymolecule=bymolecule.drop(columns=['Manufacturer','Product','Pack','Anatomical Therapeutic Class 4','Molecule','Key'])
bymolecule[bymolecule.columns]=bymolecule[bymolecule.columns].astype(float)
bymolecule=bymolecule.sum()
bymolecule=bymolecule.T
bymolecule.index=pd.to_datetime(to_return.index,format='%d/%m/%Y')
new_series_toprint=bymolecule.sort_index().copy()
new_series_toprint.plot(title='Market by molecule')
self.show_products(item)
def initUI(self):
self.setWindowTitle(self.title)
list_w=QListWidget(self)
self.market_data=ldl.load_market_data()
self.perimeter=ldl.load_market_perimeter_doc()
market_data_sandoz=self.market_data[(self.market_data['Manufacturer']=='SANDOZ')&(self.market_data['01/9/2018']!=0)&(self.market_data['01/10/2018']!=0)&(self.market_data['01/11/2018']!=0)]
to_add_items=(market_data_sandoz['Product']+ ' '+market_data_sandoz['Pack'])
list_w.addItems(list(to_add_items.sort_values()))
list_w.itemClicked.connect(self.plot_diagram)
list_w.resize(400,600)
self.resize(400,600)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) |
#!/usr/bin/env python
import sys
from collections import defaultdict
from networkx import shortest_path
import json
iii = None
def pathFromRoot(graph, cand, node, root):
nodes = shortest_path(graph, root, node)
pathComponents = [root]
waypoints = nodes[0:-1]
for (f,t) in zip(waypoints, waypoints[1:]):
pathComponents.append(graph.labelInGraph((f,t)) or "missing")
# pathComponents.append(nodes[-1].upper())
# terminus is a leaf node, named after class plus relation
# we only want the relation
pathComponents.append(node.split('.')[-1])
path = ".".join(pathComponents)
return path
class Outline(object):
def __init__(self, graph, subgraph, query, root, verbose=False, explain=False, **kwargs):
self.graph = graph
self.subgraph = subgraph
self.query = query
self.root = root
self.verbose = verbose
self.explain = explain
def intermediate(self):
global iii
relationsMentioned = []
classesMentioned = []
must = []
should = []
i = defaultdict(list)
i["root"] = self.root
# to begin with, no terms are covered
touches = defaultdict(list)
for a in self.query.ngrams.values():
for cand in a["candidates"]:
if cand.referentType == 'node':
node = cand.referent
if self.graph.isLeaf(node):
# Leaf node corresponds to an equality/fuzzy relation constraint
m = {"path": pathFromRoot(self.graph, cand, node, self.root),
"matchType": "direct" if cand.candidateType == "direct" else "inferred",
# "operands": [cand.referent, cand.content],
"className": cand.referent.split('.')[0],
"relationName": cand.referent.split('.')[1],
"value": cand.content}
if self.explain:
m["_explanation"] = cand.explain(self.explain)
must.append(m)
else:
# Other node corresponds to mention of a class (e.g., the word 'seller' is mentioned)
m = {"className": self.graph.labelInGraph(node)}
if self.explain:
m["_explanation"] = cand.explain(self.explain)
classesMentioned.append(m)
# Record (possibly partial) coverage of query terms
for w in a["words"]:
t = {"term": w,
"foundIn": "node"}
if self.explain:
t["_explanation"] = cand.explain(self.explain)
touches[w].append(t)
elif cand.referentType == 'edge':
edge = cand.referent
# Edge match corresponds to mention of an edge
# May or may not correspond to relation constraint on that edge
# In future, this might mean we want result to include its class
m = {"className": self.graph.labelInGraph(edge[0]),
"relationName": self.graph.labelInGraph(edge)}
if self.explain:
m["_explanation"] = cand.explain(self.explain)
relationsMentioned.append(m)
# Record (possibly partial) coverage of query terms
for w in a["words"]:
t = {"term": w,
"foundIn": "edge"}
if self.explain:
t["_explanation"] = cand.explain(self.explain)
touches[w].append(t)
# Any terms never covered are now free-text matches
for term in self.query.terms:
if not touches[term]:
s = {"matchType": "free",
"operands": [term]}
if self.explain:
s["_explanation"] = "{} uninterpretable".format(term)
should.append(s)
i["touches"] = touches
i["relationsMentioned"] = relationsMentioned
i["classesMentioned"] = classesMentioned
i["must"] = must
i["should"] = should
iii = i
return i
def detail(self, file=sys.stdout):
# print (root,g,q,s,m,wg,sg)
print("", file=file)
if self.verbose:
print("\nRoot {}".format(self.root), file=file)
print("\nDetail of outline {}".format(self), file=file)
print("Input Graph: {}".format(self.graph), file=file)
print("Input Keywords: {}".format(self.query.terms), file=file)
print("Input Keyword Coloring: \n{}".format(self.query.dumpToString(indent=2)), file=file)
print("Relevant Subgraph: {}".format(self.subgraph), file=file)
print("Intermediate Repn:", file=file)
print(json.dumps(self.intermediate(), sort_keys=True, indent=4), file=file)
|
<reponame>itavero/revprox
#!/usr/bin/env python3
import argparse
from git import Repo
from pathlib import Path
import sys
from blessings import Terminal
import os
import yaml
import pprint
import sewer
import traceback
from datetime import datetime, timedelta
from OpenSSL import crypto
import nginx
from shutil import which
def create_dir(directory_path):
p = Path(directory_path)
if p.exists():
if not p.is_dir():
sys.exit('Path exists but is not a directory: ' + directory_path)
if not os.access(str(p), os.W_OK):
sys.exit('Path is not readable: ' + directory_path)
return
try:
p.mkdir(parents=True, exist_ok=True)
except:
sys.exit('Failed to create storage path.\n' + traceback.format_exc())
def all_subclasses(cls):
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)])
def all_available_dns_types():
result = {}
for cls in all_subclasses(sewer.BaseDns):
key = cls.__name__
try:
if cls.dns_provider_name:
key = cls.dns_provider_name
except AttributeError:
print('{t.normal}No DNS provider name in class {t.bold}{name}'.format(t=Terminal(), name=key))
result[key] = cls
return result
def should_renew_cert(cert_file):
if cert_file.exists():
existing_cert = None
with open(cert_file, 'r') as stream:
existing_cert = crypto.load_certificate(crypto.FILETYPE_PEM, stream.read())
expire_date = datetime.strptime(
existing_cert.get_notAfter().decode("utf-8"), "%Y%m%d%H%M%SZ")
threshold = datetime.now() + timedelta(weeks=+2)
if expire_date < threshold:
return True
return False
def get_certs(domain, cert_dir, dns_class, email):
try:
create_dir(cert_dir)
cert_file = cert_dir / 'certificate.crt'
cert_key_file = cert_dir / 'certificate.key'
account_key_file = cert_dir / 'account.key'
renew = False
account_key = None
if cert_file.exists() and cert_key_file.exists() and account_key_file.exists():
renew = should_renew_cert(cert_file)
if not renew:
return True
with open(account_key_file, 'r') as stream:
account_key = stream.read()
client = sewer.Client(domain_name=domain, dns_class=dns_class, account_key=account_key)
certificate = None
if renew:
print('{t.normal}Renewing certificate for {t.magenta}{t.bold}{domain}{t.normal}...'.format(
t=Terminal(), domain=domain))
certificate = client.renew()
else:
print('{t.normal}Requesting new certificate for {t.magenta}{t.bold}{domain}{t.normal}...'.format(
t=Terminal(), domain=domain))
certificate = client.cert()
certificate_key = client.certificate_key
with open(cert_file, 'w') as f:
f.write(certificate)
with open(cert_key_file, 'w') as f:
f.write(certificate_key)
if account_key is None:
account_key = client.account_key
with open(account_key_file, 'w') as f:
f.write(account_key)
return True
except Exception:
print('{t.normal}{t.bold}{t.red}Failed to get certificate for domain {domain}, due to error: {e}{t.normal}'.format(
t=Terminal(), e=traceback.format_exc(), domain=domain))
return False
def generation_comment(what, subject):
now = datetime.now().strftime("%H:%M on %B %d, %Y")
return '{w} for {s}, generated by revprox at {t}'.format(w=what, s=subject, t=now)
def create_nginx_config_for_domain(domain, subdomains, subdomain_dir, forward_others, use_ssl, cert_dir):
c = nginx.Conf()
c.add(nginx.Comment(generation_comment('NGINX config', domain)))
for subdomain in subdomains:
c.add(nginx.Key('include', str(subdomain_dir / '{}.cfg'.format(subdomain))))
if forward_others is not None:
others = nginx.Server()
others.add(
nginx.Comment('Forward remaining (sub)domains to ' + forward_others),
nginx.Key('server_name', '{domain} *.{domain}'.format(domain=domain)),
nginx.Key('return', '302 {}$request_uri'.format(forward_others)),
nginx.Key('listen', '80')
)
if use_ssl:
others.add(
nginx.Comment('use_ssl = True'),
nginx.Key('listen', '443 ssl'),
nginx.Key('ssl', 'on'),
nginx.Key('ssl_certificate', str(cert_dir / 'certificate.crt')),
nginx.Key('ssl_certificate_key', str(cert_dir / 'certificate.key'))
)
c.add(others)
return c
def create_nginx_config_for_subdomain(domain, subdomain, destination, use_ssl, force_ssl, cert_dir):
full_domain = '{sub}.{main}'.format(main=domain, sub=subdomain)
c = nginx.Conf()
c.add(nginx.Comment(generation_comment('NGINX config', full_domain)))
if use_ssl and force_ssl:
non_ssl = nginx.Server()
non_ssl.add(
nginx.Comment('force_ssl = True'),
nginx.Key('listen', '80'),
nginx.Key('server_name', full_domain),
nginx.Key('return', '301 https://$host$request_uri')
)
c.add(non_ssl)
main = nginx.Server()
if not force_ssl:
main.add(
nginx.Comment('force_ssl = False'),
nginx.Key('listen', '80')
)
proto = 'http'
if use_ssl:
proto = 'https'
main.add(
nginx.Comment('use_ssl = True'),
nginx.Key('listen', '443 ssl'),
nginx.Key('ssl', 'on'),
nginx.Key('ssl_certificate', str(cert_dir / 'certificate.crt')),
nginx.Key('ssl_certificate_key', str(cert_dir / 'certificate.key'))
)
main.add(
nginx.Key('server_name', full_domain),
nginx.Location('/',
nginx.Key('proxy_set_header', 'Host $host'),
nginx.Key('proxy_set_header', 'X-Real-IP $remote_addr'),
nginx.Key('proxy_set_header', 'X-Forwarded-For $proxy_add_x_forwarded_for'),
nginx.Key('proxy_set_header', 'X-Forwarded-Proto $scheme'),
nginx.Key('proxy_set_header', 'Upgrade $http_upgrade'),
nginx.Key('proxy_set_header', 'Connection $connection_upgrade'),
nginx.Key('proxy_pass', destination),
nginx.Key('proxy_read_timeout', '90'),
nginx.Key('proxy_redirect',
'{dst} {proto}://{full}'.format(dst=destination, full=full_domain, proto=proto))
)
)
c.add(main)
return c
parser = argparse.ArgumentParser(
description='Check if a new config is available from Git and update all files accordingly, if an update is available')
parser.add_argument('-f', '--force', dest='forced', action='store_true',
help='Force refresh of generated files.')
parser.add_argument('storage', help='Storage directory')
parser.set_defaults(forced=False)
args = parser.parse_args()
storage = Path(args.storage)
if not storage.exists() or not storage.is_dir() or not os.access(str(storage), os.R_OK) or not os.access(str(storage), os.W_OK):
sys.exit('Storage directory does not exists or insufficient access.')
repo_path = storage / 'config'
cert_path = storage / 'certs'
nginx_path = storage / 'nginx'
# Check if an update is available
repo = Repo(str(repo_path))
old_hash = repo.head.object.hexsha
repo.remotes.origin.fetch()
repo.git.reset('--hard', repo.active_branch.tracking_branch().name)
new_hash = repo.head.object.hexsha
generate_config = args.forced
if old_hash != new_hash:
generate_config = True
print('{t.normal}Detected change on {t.bold}{t.yellow}{branch}{t.normal}. Updated from {t.bold}{t.magenta}{old}{t.normal} to {t.bold}{t.magenta}{new}{t.normal}.'.format(
t=Terminal(), branch=repo.active_branch.name, old=old_hash, new=new_hash))
renew_certificates = False
if not generate_config:
# Quick scan for certificates that should be renewed
for cert in cert_path.glob('**/*.crt'):
if should_renew_cert(cert):
renew_certificates = True
break
if not renew_certificates:
# No need to continue
sys.exit()
# Read config file
config_file = repo_path / 'config.yml'
if not config_file.exists():
sys.exit('File config.yml not found in repository. Please try again.')
config = None
with open(config_file, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
sys.exit('{t.normal}{t.bold}{t.red}Failed to load config, due to error: {e}{t.normal}'.format(
t=Terminal(), e=exc))
if config is None:
sys.exit('{t.normal}{t.bold}{t.red}Failed to load config.{t.normal}'.format(t=Terminal()))
# Uncomment the following line for development/debugging purposes
# pprint.pprint(repr(config))
# Process DNS providers
dns_types = all_available_dns_types()
dns_providers = {}
for (provider, cfg) in config['dns'].items():
try:
dns_type = cfg['type']
if dns_type not in dns_types:
sys.exit('{t.normal}{t.bold}{t.red}Unknown DNS provider type: {type}. Available types: "{avail}".{t.normal}'.format(
t=Terminal(), type=dns_type, avail='", "'.join(dns_types.keys())))
dns_providers[provider] = dns_types[dns_type](**cfg['config'])
except:
print('{t.normal}Init DNS provider failed for {t.bold}{t.magenta}{provider}{t.normal}.\n{t.red}{error}{t.normal}'.format(
t=Terminal(), provider=provider, error=traceback.format_exc()))
default_dns = 'default'
if default_dns not in dns_providers:
if len(dns_providers.keys()) > 0:
default_dns = dns_providers.keys()[0]
else:
sys.exit(
'{t.normal}{t.bold}{t.red}No valid DNS provider configuration!{t.normal}'.format(t=Terminal()))
print('{t.normal}Using DNS provider {t.bold}{t.magenta}{provider}{t.normal} as the default provider.'.format(
t=Terminal(), provider=default_dns))
# Process domain configuration
domain_names = []
for (domain, cfg) in config['domains'].items():
try:
# Prepare directories
domain_cert = cert_path / domain
create_dir(domain_cert)
domain_nginx = nginx_path / domain
create_dir(domain_nginx)
subdomain_nginx = domain_nginx / 'subdomains'
create_dir(subdomain_nginx)
# Create / refresh certificates
use_ssl = False
force_ssl = False
if 'ssl' in cfg and 'enabled' in cfg['ssl'] and cfg['ssl']['enabled']:
use_ssl = True
force_ssl = ('forced' in cfg['ssl'] and cfg['ssl']['forced'])
if 'email' not in cfg['ssl']:
print('{t.normal}{t.red}{t.bold}If you wish to use SSL for domain {domain}, you MUST configure an "email".{t.normal}'.format(
t=Terminal(), domain=domain))
continue
ssl_email = cfg['ssl']['email']
cert_domain = '*.{domain}'.format(domain=domain)
dns_class = dns_providers[default_dns]
if 'dns' in cfg:
dns_key = cfg['dns']
if dns_key in dns_providers:
dns_class = dns_provider[dns_key]
else:
print('{t.normal}{t.red}{t.bold}Domain "{domain}" is configured to use DNS provider "{dns}", but it is not found or not properly configured.{t.normal}'.format(
t=Terminal(), domain=domain, dns=dns_key))
continue
if not get_certs(cert_domain, domain_cert, dns_class, ssl_email):
print('{t.normal}{t.red}{t.bold}Failed to get certificates for "{domain}".{t.normal}'.format(
t=Terminal(), domain=domain))
continue
if generate_config:
# NGINX config
subdomains = []
for (subdomain, destination) in cfg['subdomains'].items():
sub_cfg = create_nginx_config_for_subdomain(
domain, subdomain, destination, use_ssl, force_ssl, domain_cert)
nginx.dumpf(sub_cfg, str(subdomain_nginx / '{}.cfg'.format(subdomain)))
subdomains.append(subdomain)
# Forward others?
forward_others = None
if 'forward_others' in cfg and cfg['forward_others']:
forward_others = cfg['forward_others']
main_cfg = create_nginx_config_for_domain(
domain, subdomains, subdomain_nginx, forward_others, use_ssl, domain_cert)
nginx.dumpf(main_cfg, str(domain_nginx / 'main.cfg'))
domain_names.append(domain)
except:
print('{t.normal}Processing failed for domain {t.bold}{t.magenta}{domain}{t.normal}.\n{t.red}{error}{t.normal}'.format(
t=Terminal(), domain=domain, error=traceback.format_exc()))
# Generate main revprox NGINX config file
if generate_config:
rp_config = nginx.Conf()
map = nginx.Map('$http_upgrade $connection_upgrade')
map.add(
nginx.Key('default', 'upgrade'),
nginx.Key('\'\'', 'close')
)
rp_config.add(
nginx.Comment(generation_comment('Main configuration', 'NGINX')),
nginx.Comment('This file needs to be included in your NGINX configuration.'),
map
)
for domain in domain_names:
rp_config.add(nginx.Key('include', str(nginx_path / domain / 'main.cfg')))
nginx.dumpf(rp_config, str(nginx_path / 'revprox.cfg'))
# Clean up old, unused configuration files
# TODO clean up
# Validate new configuration
nginx_exec = which('nginx')
if nginx_exec is not None:
if os.system('{exec} -t'.format(exec=nginx_exec)) > 0:
sys.exit('{t.normal}NGINX config {t.red}{t.bold}INVALID{t.normal} - {t.bold}Please fix this manually!{t.normal}'.format(t=Terminal()))
# Check if NGINX will use configuration
# TODO create check
# Restart NGINX with new configuration
if generate_config or renew_certificates:
is_restarted = False
# - FreeBSD (and possibly others)
service_manager = which('service')
if service_manager is not None:
exit_code = os.system('{program} nginx restart'.format(program=service_manager))
is_restarted = exit_code == 0
if is_restarted:
print('{t.normal}Restart NGINX: {t.green}{t.bold}SUCCESS{t.normal}'.format(t=Terminal()))
else:
print('{t.normal}Restart NGINX: {t.red}{t.bold}FAILED{t.normal} - {t.bold}Please restart NGINX manually!{t.normal}'.format(t=Terminal()))
|
import io
import zipfile
from datetime import date
from unittest.mock import patch
import pytest
from applications.enums import ApplicationStatus, BenefitType
from applications.services.ahjo_integration import (
export_application_batch,
generate_composed_files,
generate_single_approved_file,
generate_single_declined_file,
)
from applications.tests.factories import ApplicationFactory, DecidedApplicationFactory
from calculator.models import Calculation
from calculator.tests.factories import PaySubsidyFactory
from companies.tests.factories import CompanyFactory
from helsinkibenefit.tests.conftest import * # noqa
def _assert_html_content(html, include_keys=(), excluded_keys=()):
for k in include_keys:
assert k in html
for k in excluded_keys:
assert k not in html
@pytest.mark.parametrize(
"company_type, de_minimis_aid",
[
("ry", False),
("oy", False),
("oy", True),
],
)
@patch("applications.services.ahjo_integration.pdfkit.from_string")
def test_generate_single_approved_template_html(
mock_pdf_convert, company_type, de_minimis_aid
):
mock_pdf_convert.return_value = {}
company = CompanyFactory(company_form=company_type)
apps = DecidedApplicationFactory.create_batch(
3,
company=company,
de_minimis_aid=de_minimis_aid,
status=ApplicationStatus.ACCEPTED,
)
for app in apps:
app.calculation.calculated_benefit_amount = 1000
app.calculation.save()
# Only assert html content for easier comparison
_, _, html = generate_single_approved_file(apps[0].company, apps)
for app in apps:
_assert_html_content(
html,
(
app.ahjo_application_number,
app.employee.first_name,
app.employee.last_name,
),
)
@patch("applications.services.ahjo_integration.pdfkit.from_string")
def test_generate_single_declined_template_html(mock_pdf_convert):
mock_pdf_convert.return_value = {}
company = CompanyFactory()
apps = ApplicationFactory.create_batch(
3, company=company, status=ApplicationStatus.REJECTED
)
# Only assert html content for easier comparison
_, _, html = generate_single_declined_file(apps[0].company, apps)
for app in apps:
_assert_html_content(
html,
(
app.ahjo_application_number,
app.employee.first_name,
app.employee.last_name,
),
)
@patch("applications.services.ahjo_integration.pdfkit.from_string")
def test_generate_composed_template_html(mock_pdf_convert):
mock_pdf_convert.return_value = {}
accepted_app_1 = DecidedApplicationFactory(
status=ApplicationStatus.ACCEPTED,
start_date=date.today(),
)
accepted_app_1.calculation.calculated_benefit_amount = 1000
accepted_app_1.calculation.save()
accepted_app_2 = DecidedApplicationFactory(
status=ApplicationStatus.ACCEPTED,
start_date=date.today(),
)
accepted_app_2.calculation.calculated_benefit_amount = 1000
accepted_app_2.calculation.save()
rejected_app_1 = DecidedApplicationFactory(
status=ApplicationStatus.REJECTED, start_date=date.today()
)
rejected_app_2 = DecidedApplicationFactory(
status=ApplicationStatus.REJECTED, start_date=date.today()
)
# Only assert html content for easier comparison
files = generate_composed_files(
[accepted_app_1, accepted_app_2], [rejected_app_1, rejected_app_2]
)
assert len(files) == 3
# files[0]: Public accepted composed files
# files[1]: Private accepted composed files
# files[2]: Private rejected composed files
_assert_html_content(
files[0][2],
(
accepted_app_1.ahjo_application_number,
accepted_app_2.ahjo_application_number,
),
(
rejected_app_1.ahjo_application_number,
rejected_app_2.ahjo_application_number,
accepted_app_1.employee.first_name,
accepted_app_2.employee.first_name,
),
)
_assert_html_content(
files[1][2],
(
accepted_app_1.ahjo_application_number,
accepted_app_2.ahjo_application_number,
),
(rejected_app_1.ahjo_application_number,),
)
_assert_html_content(
files[2][2],
(
rejected_app_1.ahjo_application_number,
rejected_app_2.ahjo_application_number,
),
(
accepted_app_1.ahjo_application_number,
accepted_app_2.ahjo_application_number,
),
)
def test_export_application_batch(application_batch):
application_batch.applications.add(
DecidedApplicationFactory.create(
status=ApplicationStatus.ACCEPTED,
calculation__calculated_benefit_amount=1000,
)
)
application_batch.applications.add(
DecidedApplicationFactory.create(status=ApplicationStatus.REJECTED)
)
application_batch.applications.add(
DecidedApplicationFactory.create(status=ApplicationStatus.CANCELLED)
)
zip_file = export_application_batch(application_batch)
file_like_object = io.BytesIO(zip_file)
archive = zipfile.ZipFile(file_like_object)
assert (
len(archive.infolist())
== application_batch.applications.exclude(
status=ApplicationStatus.CANCELLED
).count()
+ 3
)
@patch("applications.services.ahjo_integration.pdfkit.from_string")
def test_multiple_benefit_per_application(mock_pdf_convert):
mock_pdf_convert.return_value = {}
# Test case data and expected results collected from
# calculator/tests/Helsinki-lisä laskurin testitapaukset.xlsx/ Sheet Palkan Helsinki-lisä / Column E
application = ApplicationFactory(
association_has_business_activities=True,
company__company_form="ry",
start_date=date(2021, 7, 10),
end_date=date(2021, 11, 10),
status=ApplicationStatus.RECEIVED,
benefit_type=BenefitType.SALARY_BENEFIT,
)
application.calculation = Calculation(
application=application,
monthly_pay=3200,
vacation_money=0,
other_expenses=200,
start_date=application.start_date,
end_date=application.end_date,
state_aid_max_percentage=50,
calculated_benefit_amount=0,
override_benefit_amount=None,
)
pay_subsidy = PaySubsidyFactory(
pay_subsidy_percent=40, start_date=date(2021, 7, 10), end_date=date(2021, 9, 10)
)
application.pay_subsidies.add(pay_subsidy)
application.save()
application.calculation.save()
application.refresh_from_db()
application.calculation.init_calculator()
application.calculation.calculate()
_, _, html = generate_single_approved_file(application.company, [application])
assert (
html.count(application.ahjo_application_number) == 2
) # Make sure there are two rows in the report
_assert_html_content(
html,
(
application.ahjo_application_number,
application.employee.first_name,
application.employee.last_name,
"691",
"340",
"1600",
"800",
),
)
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 12:09:37 2017
@author: alxgr
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline as interpol2D
from scipy.interpolate import griddata
from scipy.interpolate import LinearNDInterpolator
'''
################################################################################
'''
class ValueIteration_2D:
""" Dynamic programming for 2D continous dynamic system, one continuous input u """
############################
def __init__(self, dDS , cost_function ):
# Dynamic system
self.dDS = dDS # Discretized Dynamic system class
self.DS = dDS.DS # Base Dynamic system class
# Cost function
self.CF = cost_function
# Options
self.uselookuptable = True
##############################
def initialize(self):
""" initialize cost-to-go and policy """
self.J = np.zeros( self.dDS.xgriddim , dtype = float )
self.action_policy = np.zeros( self.dDS.xgriddim , dtype = int )
self.Jnew = self.J.copy()
self.Jplot = self.J.copy()
# Initial evaluation
# For all state nodes
for node in range( self.dDS.nodes_n ):
x = self.dDS.nodes_state[ node , : ]
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
# Final Cost
self.J[i,j] = self.CF.h( x )
###############################
def compute_step(self):
""" One step of value iteration """
# Get interpolation of current cost space
J_interpol = interpol2D( self.dDS.xd[0] , self.dDS.xd[1] , self.J , bbox=[None, None, None, None], kx=1, ky=1,)
# For all state nodes
for node in range( self.dDS.nodes_n ):
x = self.dDS.nodes_state[ node , : ]
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
# One steps costs - Q values
Q = np.zeros( self.dDS.actions_n )
# For all control actions
for action in range( self.dDS.actions_n ):
u = self.dDS.actions_input[ action , : ]
# Compute next state and validity of the action
if self.uselookuptable:
x_next = self.dDS.x_next[node,action,:]
action_isok = self.dDS.action_isok[node,action]
else:
x_next = self.DS.fc( x , u ) * self.dt + x
x_ok = self.DS.isavalidstate(x_next)
u_ok = self.DS.isavalidinput(x,u)
action_isok = ( u_ok & x_ok )
# If the current option is allowable
if action_isok:
J_next = J_interpol( x_next[0] , x_next[1] )
# Cost-to-go of a given action
Q[action] = self.CF.g( x , u ) + J_next[0,0]
else:
# Not allowable states or inputs/states combinations
Q[action] = self.CF.INF
self.Jnew[i,j] = Q.min()
self.action_policy[i,j] = Q.argmin()
# Impossible situation ( unaceptable situation for any control actions )
if self.Jnew[i,j] > (self.CF.INF-1) :
self.action_policy[i,j] = -1
# Convergence check
delta = self.J - self.Jnew
j_max = self.Jnew.max()
delta_max = delta.max()
delta_min = delta.min()
print('Max:',j_max,'Delta max:',delta_max, 'Delta min:',delta_min)
self.J = self.Jnew.copy()
################################
def compute_steps(self, l = 50, plot = False):
""" compute number of step """
for i in range(l):
print('Step:',i)
self.compute_step()
################################
def plot_J(self):
""" print graphic """
xname = self.DS.state_label[0] + ' ' + self.DS.state_units[0]
yname = self.DS.state_label[1] + ' ' + self.DS.state_units[1]
self.Jplot = self.J.copy()
###################
fs = 10
self.fig1 = plt.figure(figsize=(4, 4),dpi=300, frameon=True)
self.fig1.canvas.set_window_title('Cost-to-go')
self.ax1 = self.fig1.add_subplot(1,1,1)
plt.ylabel(yname, fontsize = fs)
plt.xlabel(xname, fontsize = fs)
self.im1 = plt.pcolormesh( self.dDS.xd[0] , self.dDS.xd[1] , self.Jplot.T )
plt.axis([self.DS.x_lb[0] , self.DS.x_ub[0], self.DS.x_lb[1] , self.DS.x_ub[1]])
plt.colorbar()
plt.grid(True)
plt.tight_layout()
################################
def plot_policy(self, i = 0 ):
""" print graphic """
xname = self.DS.state_label[0] + ' ' + self.DS.state_units[0]
yname = self.DS.state_label[1] + ' ' + self.DS.state_units[1]
policy_plot = self.u_policy_grid[i].copy()
###################
fs = 10
self.fig1 = plt.figure(figsize=(4, 4),dpi=300, frameon=True)
self.fig1.canvas.set_window_title('Policy for u[%i]'%i)
self.ax1 = self.fig1.add_subplot(1,1,1)
plt.ylabel(yname, fontsize = fs)
plt.xlabel(xname, fontsize = fs)
self.im1 = plt.pcolormesh( self.dDS.xd[0] , self.dDS.xd[1] , policy_plot.T )
plt.axis([self.DS.x_lb[0] , self.DS.x_ub[0], self.DS.x_lb[1] , self.DS.x_ub[1]])
plt.colorbar()
plt.grid(True)
plt.tight_layout()
################################
def assign_interpol_controller(self):
""" controller from optimal actions """
# Compute grid of u
self.u_policy_grid = [ None ]
self.u_policy_grid[0] = np.zeros( self.dDS.xgriddim , dtype = float )
# For all state nodes
for node in range( self.dDS.nodes_n ):
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
if ( self.action_policy[i,j] == -1 ):
self.u_policy_grid[0][i,j] = 0
else:
self.u_policy_grid[0][i,j] = self.dDS.actions_input[ self.action_policy[i,j] , 0 ]
# Compute Interpol function
self.x2u0 = interpol2D( self.dDS.xd[0] , self.dDS.xd[1] , self.u_policy_grid[0] , bbox=[None, None, None, None], kx=1, ky=1,)
# Asign Controller
self.DS.ctl = self.ctl_interpol
################################
def ctl_interpol(self, x , t = 0 ):
""" controller from optimal actions """
u = np.zeros( self.DS.m )
u[0] = self.x2u0( x[0] , x[1] )
return u
################################
def load_data(self, name = 'DP_data'):
""" Save optimal controller policy and cost to go """
try:
self.J = np.load( name + '_J' + '.npy' )
self.action_policy = np.load( name + '_a' + '.npy' ).astype(int)
except:
print('Failed to load DP data ' )
################################
def save_data(self, name = 'DP_data'):
""" Save optimal controller policy and cost to go """
np.save( name + '_J' , self.J )
np.save( name + '_a' , self.action_policy.astype(int))
'''
################################################################################
'''
class ValueIteration_3D:
""" Dynamic programming for 3D continous dynamic system, 2 continuous input u """
############################
def __init__(self, dDS , cost_function ):
# Dynamic system
self.dDS = dDS # Discretized Dynamic system class
self.DS = dDS.DS # Base Dynamic system class
# Cost function
self.CF = cost_function
# Options
self.uselookuptable = False
##############################
def initialize(self):
""" initialize cost-to-go and policy """
self.J = np.zeros( self.dDS.xgriddim , dtype = float )
self.J_1D = np.zeros( self.dDS.nodes_n , dtype = float )
self.action_policy = np.zeros( self.dDS.xgriddim , dtype = int )
self.Jnew = self.J.copy()
self.J_1D_new = self.J_1D.copy()
self.Jplot = self.J.copy()
# Initial evaluation
# For all state nodes
for node in range( self.dDS.nodes_n ):
x = self.dDS.nodes_state[ node , : ]
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
k = self.dDS.nodes_index[ node , 2 ]
# Final Cost
j = self.CF.h( x )
self.J[i,j,k] = j
self.J_1D[node] = j
###############################
def compute_step(self):
""" One step of value iteration """
# Get interpolation of current cost space
#J_interpol = interpol2D( self.dDS.xd[0] , self.dDS.xd[1] , self.J , bbox=[None, None, None, None], kx=1, ky=1,)
cartcoord = self.dDS.nodes_state
values = self.J_1D
J_interpol = LinearNDInterpolator(cartcoord, values, fill_value=0)
# For all state nodes
for node in range( self.dDS.nodes_n ):
x = self.dDS.nodes_state[ node , : ]
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
k = self.dDS.nodes_index[ node , 3 ]
# One steps costs - Q values
Q = np.zeros( self.dDS.actions_n )
# For all control actions
for action in range( self.dDS.actions_n ):
u = self.dDS.actions_input[ action , : ]
# Compute next state and validity of the action
x_next = self.DS.fc( x , u ) * self.dt + x
x_ok = self.DS.isavalidstate(x_next)
u_ok = self.DS.isavalidinput(x,u)
action_isok = ( u_ok & x_ok )
# If the current option is allowable
if action_isok:
J_next = J_interpol( x_next )
# Cost-to-go of a given action
Q[action] = self.CF.g( x , u ) + J_next[0,0]
else:
# Not allowable states or inputs/states combinations
Q[action] = self.CF.INF
self.Jnew[i,j,k] = Q.min()
self.J_1D_new[node] = self.Jnew[i,j,k]
self.action_policy[i,j,k] = Q.argmin()
# Impossible situation ( unaceptable situation for any control actions )
if self.Jnew[i,j,k] > (self.CF.INF-1) :
self.action_policy[i,j,k] = -1
# Convergence check
delta = self.J - self.Jnew
j_max = self.Jnew.max()
delta_max = delta.max()
delta_min = delta.min()
print('Max:',j_max,'Delta max:',delta_max, 'Delta min:',delta_min)
self.J = self.Jnew.copy()
self.J_1D = self.J_1D_new.copy()
################################
def compute_steps(self, l = 50, plot = False):
""" compute number of step """
for i in range(l):
print('Step:',i)
self.compute_step()
################################
def plot_J_ij(self, k ):
""" print graphic """
xname = self.DS.state_label[0] + ' ' + self.DS.state_units[0]
yname = self.DS.state_label[1] + ' ' + self.DS.state_units[1]
self.Jplot = self.J[:,:,i].copy()
###################
fs = 10
self.fig1 = plt.figure(figsize=(4, 4),dpi=300, frameon=True)
self.fig1.canvas.set_window_title('Cost-to-go')
self.ax1 = self.fig1.add_subplot(1,1,1)
plt.ylabel(yname, fontsize = fs)
plt.xlabel(xname, fontsize = fs)
self.im1 = plt.pcolormesh( self.dDS.xd[0] , self.dDS.xd[1] , self.Jplot.T )
plt.axis([self.DS.x_lb[0] , self.DS.x_ub[0], self.DS.x_lb[1] , self.DS.x_ub[1]])
plt.colorbar()
plt.grid(True)
plt.tight_layout()
################################
def plot_policy_ij(self, k , i = 0 ):
""" print graphic """
xname = self.DS.state_label[0] + ' ' + self.DS.state_units[0]
yname = self.DS.state_label[1] + ' ' + self.DS.state_units[1]
policy_plot = self.u_policy_grid[i][:,:,k].copy()
###################
fs = 10
self.fig1 = plt.figure(figsize=(4, 4),dpi=300, frameon=True)
self.fig1.canvas.set_window_title('Policy for u[%i]'%i)
self.ax1 = self.fig1.add_subplot(1,1,1)
plt.ylabel(yname, fontsize = fs)
plt.xlabel(xname, fontsize = fs)
self.im1 = plt.pcolormesh( self.dDS.xd[0] , self.dDS.xd[1] , policy_plot.T )
plt.axis([self.DS.x_lb[0] , self.DS.x_ub[0], self.DS.x_lb[1] , self.DS.x_ub[1]])
plt.colorbar()
plt.grid(True)
plt.tight_layout()
################################
def assign_interpol_controller(self):
""" controller from optimal actions """
# Compute grid of u
self.u_policy_grid = [ None ]
self.u_policy_grid[0] = np.zeros( self.dDS.xgriddim , dtype = float )
# For all state nodes
for node in range( self.dDS.nodes_n ):
i = self.dDS.nodes_index[ node , 0 ]
j = self.dDS.nodes_index[ node , 1 ]
if ( self.action_policy[i,j] == -1 ):
self.u_policy_grid[0][i,j] = 0
else:
self.u_policy_grid[0][i,j] = self.dDS.actions_input[ self.action_policy[i,j] , 0 ]
# Compute Interpol function
self.x2u0 = interpol2D( self.dDS.xd[0] , self.dDS.xd[1] , self.u_policy_grid[0] , bbox=[None, None, None, None], kx=1, ky=1,)
# Asign Controller
self.DS.ctl = self.ctl_interpol
################################
def ctl_interpol(self, x , t = 0 ):
""" controller from optimal actions """
u = np.zeros( self.DS.m )
u[0] = self.x2u0( x[0] , x[1] )
return u
################################
def load_data(self, name = 'DP_data'):
""" Save optimal controller policy and cost to go """
try:
self.J = np.load( name + '_J' + '.npy' )
self.action_policy = np.load( name + '_a' + '.npy' ).astype(int)
except:
print('Failed to load DP data ' )
################################
def save_data(self, name = 'DP_data'):
""" Save optimal controller policy and cost to go """
np.save( name + '_J' , self.J )
np.save( name + '_a' , self.action_policy.astype(int))
|
<filename>DirectoryCreate/DirectoryCreateEngine.py
"""
AyxPlugin (required) has-a IncomingInterface (optional).
Although defining IncomingInterface is optional, the interface methods are needed if an upstream tool exists.
"""
import os, sys
os.environ['PATH'] = r'C:\program files\Alteryx\bin;' + os.environ['PATH']
sys.path.insert(1, r'C:\program files\Alteryx\bin\plugins')
import AlteryxPythonSDK as Sdk
import xml.etree.ElementTree as Et
import os
class AyxPlugin:
"""
Implements the plugin interface methods, to be utilized by the Alteryx engine to communicate with a plugin.
Prefixed with "pi", the Alteryx engine will expect the below five interface methods to be defined.
"""
def __init__(self, n_tool_id: int, alteryx_engine: object, output_anchor_mgr: object):
"""
Constructor is called whenever the Alteryx engine wants to instantiate an instance of this plugin.
:param n_tool_id: The assigned unique identification for a tool instance.
:param alteryx_engine: Provides an interface into the Alteryx engine.
:param output_anchor_mgr: A helper that wraps the outgoing connections for a plugin.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#__init__
This is the standard Python constructor, which is called each time the Alteryx Engine instantiates an instance of your plugin.
__init__(self, n_tool_id, alteryx_engine, output_anchor_mgr)
The arguments to this function are values provided by the Alteryx Engine, and will be used when communicating with the engine or with other tools.
n_tool_id: An integer representing a unique identifier for the instance of the tool being created. Your plugin should save this value to communicate with the Alteryx Engine.
alteryx_engine: A complex object representing the interface for communicating with the Alteryx Engine.
output_anchor_mgr: A complex object used to interface with the output connections of your tool. You need to call get_output_anchor on it with the name of your output in the config file to get the handle for the output.
"""
# Default properties
self.n_tool_id = n_tool_id
self.alteryx_engine = alteryx_engine
self.output_anchor_mgr = output_anchor_mgr
# Custom properties
self.is_initialized = True
self.single_input = None
self.rootFolderField = None
self.targetFolderField = None
self.record_info_inbound = None
self.record_info_outbound = None
def pi_init(self, str_xml: str):
"""
Called when the Alteryx engine is ready to provide the tool configuration from the GUI.
:param str_xml: The raw XML from the GUI.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#pi_init
Provides the tool with its configuration data. Required method.
pi_init(self, str_xml)
str_xml: A string formatted as XML that holds the config data.
This function is called when the tool is first initialized and any time the tool configuration changes.
"""
# Getting the user-entered selections from the GUI.
if Et.fromstring(str_xml).find('rootFolderFieldSelect') is not None:
self.rootFolderField = Et.fromstring(str_xml).find('rootFolderFieldSelect').text
if Et.fromstring(str_xml).find('targetFolderFieldSelect') is not None:
self.targetFolderField = Et.fromstring(str_xml).find('targetFolderFieldSelect').text
# Letting the user know of the necessary selections, if they haven't been selected.
if self.rootFolderField is None:
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.error, 'Please select the root folder field')
if self.targetFolderField is None:
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.error, 'Please select the target folder field')
self.success_output_anchor = self.output_anchor_mgr.get_output_anchor('Success') # Getting the output anchor from the XML file.
self.error_output_anchor = self.output_anchor_mgr.get_output_anchor('Error') # Getting the output anchor from the XML file.
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.info, 'Completed mapping the anchors and inputs')
def pi_close(self, b_has_errors: bool):
"""
Called after all records have been processed..
:param b_has_errors: Set to true to not do the final processing.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#pi_close
Required method.
pi_close(self, b_has_errors)
b_has_errors: An option the indicates if errors occurred.
Use pi_close() if you require file cleanup that must handled manually or to issue an error that cannot be detected before pi_close.
"""
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.info, self.xmsg('Method: pi_close: starting'))
self.success_output_anchor.assert_close() # Checks whether connections were properly closed.
self.error_output_anchor.assert_close()
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.info, self.xmsg('Method: pi_close: ending'))
def pi_add_incoming_connection(self, str_type: str, str_name: str) -> object:
"""
The IncomingInterface objects are instantiated here, one object per incoming connection.
Called when the Alteryx engine is attempting to add an incoming data connection.
:param str_type: The name of the input connection anchor, defined in the Config.xml file.
:param str_name: The name of the wire, defined by the workflow author.
:return: The IncomingInterface object(s).
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#pi_add_i
Manages input data, metadata, and progress for one or more incoming connections.
pi_add_incoming_connection(self, str_type, str_name)
Returns an object that implements the incoming interface functions.
"""
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.info, self.xmsg('Method: pi_add_incoming_connection: starting'))
self.single_input = IncomingInterface(self) #Store this for later. This code triggers the constructor for ii
self.alteryx_engine.output_message(self.n_tool_id, Sdk.EngineMessageType.info, self.xmsg('Method: pi_add_incoming_connection: ending'))
return self.single_input
def pi_add_outgoing_connection(self, str_name: str) -> bool:
"""
Called when the Alteryx engine is attempting to add an outgoing data connection.
:param str_name: The name of the output connection anchor, defined in the Config.xml file.
:return: True signifies that the connection is accepted.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#pi_add_o
Passes output anchor and related connections.
pi_add_outgoing_connection(self, str_name)
"""
return True
def pi_push_all_records(self, n_record_limit: int) -> bool:
"""
Called when a tool has no incoming data connection.
:param n_record_limit: Set it to <0 for no limit, 0 for no records, and >0 to specify the number of records.
:return: True for success, False for failure.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#pi_push_
Called for tools that do not have inputs.
pi_push_all_records(self, n_record_limit)
"""
if not self.is_initialized:
return False
n_record_limit = 99999
return true
def xmsg(self, msg_string: str) -> str:
"""
A non-interface, non-operational placeholder for the eventual localization of predefined user-facing strings.
:param msg_string: The user-facing string.
:return: msg_string
"""
return msg_string
class IncomingInterface:
"""
This optional class is returned by pi_add_incoming_connection, and it implements the incoming interface methods, to
be utilized by the Alteryx engine to communicate with a plugin when processing an incoming connection.
Prefixed with "ii", the Alteryx engine will expect the below four interface methods to be defined.
"""
def __init__(self, parent: object):
"""
Constructor for IncomingInterface.
:param parent: AyxPlugin
"""
# Default properties
self.parent = parent
#Custom properties
self.record_info_inbound = None #holds the record info object for the inbound data stream
self.record_info_out = None #holds the record info object for the outbound side of the data stream
self.record_copier = None #do we need to store our own record copier?
self.record_creator = None #do we need to store our own record creator?
self.fldCreationResult = None #store the field reference for the first field added to the ouput. Gives a status on the field creation
self.fldCreationDescr = None #store the field reference for the second field added to the ouput. Gives a detiled message on the field creation e.g. error messages
def ii_init(self, record_info_in: object) -> bool:
"""
Called to report changes of the incoming connection's record metadata to the Alteryx engine.
:param record_info_in: A RecordInfo object for the incoming connection's fields.
:return: True for success, otherwise False.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#ii_init
ii_init(self, record_info_in)
record_info_in: The incoming record structure.
"""
#standard pieces
self.parent.record_info_inbound = record_info_in
self.record_info_inbound = record_info_in
#Set up the outbound record structure - in this case it will be identical to the input; except with 2 new fields
self.record_info_out = self.record_info_inbound.clone()
#Add two additional fields to the output
self.fldCreationResult = self.record_info_out.add_field(
field_name= 'FolderCreationResult',
field_type=Sdk.FieldType.v_wstring,
size=200,
source='DirectoryCreate Tool - ID: ' + str(self.parent.n_tool_id),
description='Result of folder creation')
self.fldCreationDescr = self.record_info_out.add_field(
field_name= 'FolderCreationDescription',
field_type=Sdk.FieldType.v_wstring,
size=200,
source='DirectoryCreate Tool - ID: ' + str(self.parent.n_tool_id),
description='Detailed status / error message for folder creation')
self.fldRootFolder = self.record_info_out[self.record_info_out.get_field_num(self.parent.rootFolderField)]
self.fldTargetFolder = self.record_info_out[self.record_info_out.get_field_num(self.parent.targetFolderField)]
#Assign this recordInfo and recordset to the outbound anchor
self.parent.success_output_anchor.init(self.record_info_out) # Lets the downstream tools know what the outgoing record metadata will look like, based on record_info_out.
self.parent.error_output_anchor.init(self.record_info_out) # Lets the downstream tools know what the outgoing record metadata will look like, based on record_info_out.
#Create a record Creator, from the new recordInfo structure
self.record_creator = self.record_info_out.construct_record_creator()
#Then create a record copier
self.record_copier = Sdk.RecordCopier(self.record_info_out, self.record_info_inbound)
# Map each column of the input to where we want in the output - for the inbound fields - still have to do the 2 additional fields later
for index in range(self.record_info_inbound.num_fields):
# Adding a field index mapping.
self.record_copier.add(index, index)
self.record_copier.done_adding()
return True
def ii_push_record(self, in_record: object) -> bool:
"""
Responsible for pushing records out.
Called when an input record is being sent to the plugin.
:param in_record: The data for the incoming record.
:return: true if this record should be pushed.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#ii_push_
Pushes records downstream. If your tool processes a single record at a time, it is best to push the record downstream from within the tool.
ii_push_record(self, in_record)
Return False to indicate that no additional records are required.
"""
#Reset the record creator and record copier to copy the fields that are staying the same
self.record_creator.reset()
self.record_copier.copy(self.record_creator, in_record)
strThisRowRootFolder = self.fldRootFolder.get_as_string(in_record)
strThisRowTargetFolder = self.fldTargetFolder.get_as_string(in_record)
result,message = self.createFolder(strThisRowRootFolder,strThisRowTargetFolder)
if result:
self.fldCreationResult.set_from_string(self.record_creator,"True")
else:
self.fldCreationResult.set_from_string(self.record_creator,"False")
self.fldCreationDescr.set_from_string(self.record_creator,message)
out_record = self.record_creator.finalize_record()
if result: #if we've created a successful directory
self.parent.success_output_anchor.push_record(out_record)
self.parent.success_output_anchor.output_record_count(False)
else:
self.parent.error_output_anchor.push_record(out_record)
self.parent.error_output_anchor.output_record_count(False)
return True
def ii_update_progress(self, d_percent: float):
"""
Called by the upstream tool to report what percentage of records have been pushed.
:param d_percent: Value between 0.0 and 1.0.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#ii_updat
Updates the upstream tool of record-processing progress.
ii_update_progress(self, d_percent)
"""
self.parent.alteryx_engine.output_tool_progress(self.parent.n_tool_id, d_percent) # Inform the Alteryx engine of the tool's progress.
self.parent.success_output_anchor.update_progress(d_percent) # Inform the downstream tool of this tool's progress.
self.parent.error_output_anchor.update_progress(d_percent) # Inform the downstream tool of this tool's progress.
def ii_close(self):
"""
Called when the incoming connection has finished passing all of its records.
https://help.alteryx.com/developer/current/Python/use/AyxPluginClass.htm#ii_close
Closes connection to upstream tool when all records have been pushed, indicated by upstream tool calling self.output_anchor.close().
Close all resources opened in ii_init.
ii_close(self)
"""
self.parent.success_output_anchor.output_record_count(True) # True: Let Alteryx engine know that all records have been sent downstream.
self.parent.success_output_anchor.close() # Close outgoing connections.
self.parent.error_output_anchor.output_record_count(True) # True: Let Alteryx engine know that all records have been sent downstream.
self.parent.error_output_anchor.close() # Close outgoing connections.
def createFolder(self, rootFolder:str, targetFolder: str):
#Function to do the actual directory creation
import os
# validate if the root folder exists
if not(os.path.isdir(rootFolder)):
result = False
message = "Root folder {} does not exist".format(rootFolder)
return result, message
fullFolder = rootFolder + '\\' + targetFolder
# Create the sub-folder
if os.path.isdir(fullFolder):
#folder already exists
result = True
message = 'Folder already existed'
else:
try:
os.makedirs(fullFolder)
except:
result = False
message = 'Error while creating folder'
else:
result = True
message = 'Created Successfully'
return result, message
|
<filename>streamlabswater/stream.py
from urllib.parse import urljoin, urlencode
import requests
import os
class Stream(object):
__STREAMLABSWATER_API_HOST = ""
def __init__(self, api_key: str = None, api_host:str = None):
api_key= api_key or os.environ.get('STREAMLABSWATER_API_KEY')
if api_key is None:
raise ValueError('api_key is required')
self.__STREAMLABSWATER_API_HOST = (api_host or os.environ.get('__STREAMLABSWATER_API_HOST', 'https://dev-api.streamlabswater.com'))
self.__headers = {
"Authorization" : "Bearer {}".format(api_key),
"Content-Type" : "application/json"
}
def get_locations(self) -> dict:
"""Retrieves information for all locations
:return: dictionary containing information for all locations
"""
url = urljoin(self.__STREAMLABSWATER_API_HOST,'v1/locations')
return requests.get(url, headers=self.__headers).json()
def get_location(self, location_id: str) -> dict:
"""Retrieves information for a specific location
:param location_id: id of location to retrieve
:return: dictionary containing information for the specified location
"""
if location_id is None:
raise ValueError('location_id is required')
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}'.format(location_id))
return requests.get(url, headers=self.__headers).json()
def update_location(self, location_id: str, home_away: str) -> dict:
"""Sets the home/away mode of location
:param location_id: id of location to update
:param home_away: The desired home or away mode of the location
:return: dictionary containing updated location information for the specified location
"""
if location_id is None:
raise ValueError('location_id is required')
if home_away not in ['home', 'away']:
raise ValueError("Invalid homeAway setting")
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}'.format(location_id))
return requests.put(url, json={"homeAway": home_away}, headers=self.__headers).json()
def subscribe_to_location_alerts(self, location_id: str, endpoint: str) -> dict:
"""Subscribes to a locations alerts
:param location_id: id of location to update
:param endpoint: a url to send the alerts to
:return: dictionary containing subscription information for the specified location
"""
if location_id is None:
raise ValueError('location_id is required')
if endpoint is None:
raise ValueError("endpoint is required")
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}/subscriptions'.format(location_id))
return requests.post(url, json={"endpoint": endpoint}, headers=self.__headers).json()
def confirm_subscription(self, subscription_id: str, confirmation_token: str) -> dict:
"""Confirm a pending subscription
:param subscription_id: id of subscription to update
:param confirmation_token: confirmation token provided via the subscription endpoint
:return: dictionary containing subscription information for the specified location
"""
if subscription_id is None:
raise ValueError('subscription_id is required')
if confirmation_token is None:
raise ValueError("confirmation_token is required")
url = urljoin(self.__STREAMLABSWATER_API_HOST,
'v1/subscriptions/{}/confirm/?confirmationToken={}'.format(subscription_id, confirmation_token))
return requests.get(url, headers=self.__headers).json()
def get_location_subscriptions(self, location_id: str) -> dict:
"""Retrieves information for a specific location
:param location_id: id of location to retrieve
:return: dictionary containing all the subscriptions for the specified location
"""
if location_id is None:
raise ValueError('location_id is required')
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}/subscriptions'.format(location_id))
return requests.get(url, headers=self.__headers).json()
def get_subscriptions(self) -> dict:
"""Retrieves information about all subscriptions
:return: dictionary containing all the subscriptions
"""
url = urljoin(self.__STREAMLABSWATER_API_HOST,'v1/subscriptions')
return requests.get(url, headers=self.__headers).json()
def get_subscription(self, subscription_id: str) -> dict:
"""Retrieves information for a specific subscription
:param subscription_id: id of subscription to retrieve
:return: dictionary containing information for the specified subscription
"""
if subscription_id is None:
raise ValueError('subscription_id is required')
url = urljoin(self.__STREAMLABSWATER_API_HOST,
'v1/subscriptions/{}'.format(subscription_id))
return requests.get(url, headers=self.__headers).json()
def delete_subscription(self, subscription_id: str) -> dict:
"""Delete a subscription
:param subscription_id: id of subscription to delete
:return: None
"""
if subscription_id is None:
raise ValueError('subscription_id is required')
url = urljoin(self.__STREAMLABSWATER_API_HOST,
'v1/subscriptions/{}'.format(subscription_id))
requests.delete(url, headers=self.__headers)
return
def get_location_water_usage_summary(self, location_id: str) -> dict:
"""Retrieves water usage summary for the location
:param location_id: id of location to retrieve usage for
:return: dictionary containing usage in gallons for the current day, month, and year
"""
if location_id is None:
raise ValueError('location_id is required')
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}/readings/water-usage/summary'.format(location_id))
return requests.get(url, headers=self.__headers).json()
def get_location_water_usage(self, location_id: str, params: dict) -> dict:
"""Retrieves water usage readings for the location
:param location_id: id of location to retrieve usage for
:param params: dictionary containing startTime, endTime, groupBy, page and perPage options. Only startTime is required
:return: dictionary containing usage in gallons for the current day, month, and year
"""
if location_id is None:
raise ValueError('location_id is required')
if 'startTime' not in params:
raise ValueError("startTime in params is required")
queryparams = urlencode(params, safe=":,+")
url = urljoin(self.__STREAMLABSWATER_API_HOST, 'v1/locations/{}/readings/water-usage?{}'
.format(location_id, queryparams))
return requests.get(url, headers=self.__headers).json()
|
<filename>cloneAlexNet.py
import os
import csv
import cv2
import numpy as np
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
lines = []
with open(os.path.join('..', 'data', 'driving_log.csv')) as csvfile:
reader = csv.reader(csvfile)
next(reader) # skip header
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path = line[0]
filename = source_path.split('/')[-1] # data is created on Linux
current_path = os.path.join('..', 'data', 'IMG', filename)
image = cv2.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
X_train = np.array(images)
y_train = np.array(measurements)
model = Sequential()
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320, 3)))
model.add(Lambda(lambda x: x / 255.0 - 0.5))
# 1st Convolutional Layer
model.add(Conv2D(filters=96, kernel_size=(11,11),\
strides=(1,1), padding='valid', activation='relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(7,7), strides=(1,1), padding='valid', activation='relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# Passing it to a dense layer
model.add(Flatten())
# 1st Dense Layer
model.add(Dense(4096, activation='relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 2nd Dense Layer
model.add(Dense(2048, activation='relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Dense Layer
model.add(Dense(1000, activation='relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# Output Layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=4)
model.save('modelAlexNet.h5')
|
import pytest
from mendola import graph
from nose.tools import assert_raises, assert_equal
def test_graph():
net = graph.Graph()
net.add_edge(0, 1, weight=5)
net.add_edge(0, 5, weight=2)
net.add_edge(1, 2, weight=3)
net.add_edge(2, 3, weight=4)
net.add_edge(3, 4, weight=5)
net.add_edge(3, 5, weight=6)
net.add_edge(4, 0, weight=7)
net.add_edge(5, 4, weight=8)
net.add_edge(5, 2, weight=9)
assert_raises(TypeError, net.add_edge, None, 1)
assert_raises(KeyError, net.add_edge, 5, 2, "1")
assert_raises(TypeError, net.add_edge, 5, None)
assert_equal(net.nodes[0].adj_weights[net.nodes[1].key], 5)
assert_equal(net.nodes[0].adj_weights[net.nodes[5].key], 2)
assert_equal(net.nodes[1].adj_weights[net.nodes[2].key], 3)
assert_equal(net.nodes[2].adj_weights[net.nodes[3].key], 4)
assert_equal(net.nodes[3].adj_weights[net.nodes[4].key], 5)
assert_equal(net.nodes[3].adj_weights[net.nodes[5].key], 6)
assert_equal(net.nodes[4].adj_weights[net.nodes[0].key], 7)
assert_equal(net.nodes[5].adj_weights[net.nodes[4].key], 8)
assert_equal(net.nodes[5].adj_weights[net.nodes[2].key], 9)
assert_equal(net.nodes[0].incoming_edges, 1)
assert_equal(net.nodes[1].incoming_edges, 1)
assert_equal(net.nodes[2].incoming_edges, 2)
assert_equal(net.nodes[3].incoming_edges, 1)
assert_equal(net.nodes[4].incoming_edges, 2)
assert_equal(net.nodes[5].incoming_edges, 2)
assert_equal(len(net.nodes[0].adj_nodes), 2)
assert_equal(len(net.nodes[1].adj_nodes), 1)
assert_equal(len(net.nodes[2].adj_nodes), 1)
assert_equal(len(net.nodes[3].adj_nodes), 2)
assert_equal(len(net.nodes[4].adj_nodes), 1)
assert_equal(len(net.nodes[5].adj_nodes), 2)
net.nodes[0].remove_neighbor(net.nodes[1])
assert_equal(net.nodes[1].incoming_edges, 0)
net.nodes[3].remove_neighbor(net.nodes[4])
assert_equal(net.nodes[4].incoming_edges, 1)
assert_equal(net.nodes[0] < net.nodes[1], True)
def test_shortest_path():
net = graph.Graph()
net.add_edge('a', 'b', weight=5)
net.add_edge('a', 'c', weight=3)
net.add_edge('a', 'e', weight=2)
net.add_edge('b', 'd', weight=2)
net.add_edge('c', 'b', weight=1)
net.add_edge('c', 'd', weight=1)
net.add_edge('d', 'a', weight=1)
net.add_edge('d', 'g', weight=2)
net.add_edge('d', 'h', weight=1)
net.add_edge('e', 'a', weight=1)
net.add_edge('e', 'h', weight=4)
net.add_edge('e', 'i', weight=7)
net.add_edge('f', 'b', weight=3)
net.add_edge('f', 'g', weight=1)
net.add_edge('g', 'c', weight=3)
net.add_edge('g', 'i', weight=2)
net.add_edge('h', 'c', weight=2)
net.add_edge('h', 'f', weight=2)
net.add_edge('h', 'g', weight=2)
shortest_path = graph.ShortestPath(net)
result = shortest_path.find_shortest_path('a', 'i')
assert_equal(result, ['a', 'c', 'd', 'g', 'i'])
assert_equal(shortest_path.path_weight['i'], 8)
assert_raises(TypeError, shortest_path.find_shortest_path, None, None)
assert_raises(ValueError, shortest_path.find_shortest_path, 'j', 'i')
if __name__ == '__main__':
pytest.main([__file__])
|
<reponame>kyokyos/bioinform<filename>physical_period.py
# 运行程序,只需输入某人生日,计算机就可智能列出此人生理周期最值(max,min),并可以图形化展示,抽取部分数据验证,程序计算准确。但仍有一些小瑕疵,以后有空改进。
#time transformation
#common year, month accumulation of days
#python 2.7 import do not support """ """ mark format
import leap_common_year
def total_day_calculate(year,month,day):
which_year=leap_common_year.leap_common_year(year)
#for common year
if which_year[0].lower()=='c':
dict_of_month={"january":0,"feburary":59,"march":90,"april":120,
"may":151,"june":181,"july":212,"august":243,"september":273
,"october":304,"november":334,"december":365} #month means accumulation of days
for i in dict_of_month:
if month==i:
month=dict_of_month[i]
total_day=month+day
return total_day
#for leap year
if which_year[0].lower()=='l':
dict_of_month={"january":0,"feburary":60,"march":91,"april":121,
"may":152,"june":182,"july":213,"august":244,"september":274
,"october":305,"november":335,"december":366}
for i in dict_of_month:
if month==i:
month=dict_of_month[i]
total_day=month+day
return total_day
def number_to_time(year,number):
which_year=leap_common_year.leap_common_year(year)
#for common year
if which_year[0].lower()=='c':
dict_of_month={"january":0,"feburary":59,"march":90,"april":120,
"may":151,"june":181,"july":212,"august":243,"september":273
,"october":304,"november":334,"december":365}
sorted_dict_of_month=sorted(dict_of_month.items(), key=lambda d: d[1])
count=0
for i in sorted_dict_of_month:
if number<i[1]:
month=sorted_dict_of_month[count-1][0]
#print "month:",month
day=number-dict_of_month[month]
return (month,day)
count+=1
#for leap year
if which_year[0].lower()=='l':
dict_of_month={"january":0,"feburary":60,"march":91,"april":121,
"may":152,"june":182,"july":213,"august":244,"september":274
,"october":305,"november":335,"december":366}
sorted_dict_of_month=sorted(dict_of_month.items(), key=lambda d: d[1])
count=0
for i in sorted_dict_of_month:
#print "i:",i
#print "i[1]:",i[1]
#print "count:",count
if number<i[1]:
#print "i[0]:",i[0]
#print "i[1]:",i[1]
month=sorted_dict_of_month[count-1][0]
#print "month:",month
day=number-dict_of_month[month]
return (month,day)
count+=1
#input birth day and period to plot your emotional,intelligent,physical sine curve
#y=sin(Bx+C),
import leap_common_year, day_transformation,math,pylab,numpy
def plot_sine_all(year,month,day):
B_emotion=(2*math.pi)/23
B_physical=(2*math.pi)/28
B_intelligence=(2*math.pi)/33
birthday_number=day_transformation.total_day_calculate(year,month,day)
initial_number_emotion=birthday_number%23 #initial_number是正弦函数起始值
initial_number_physical=birthday_number%28
initial_number_intelligence=birthday_number%33
C_emotion=(-B_emotion)*initial_number_emotion
C_physical=(-B_physical)*initial_number_physical
C_intelligence=(-B_intelligence)*initial_number_intelligence
#use numpy arange to get an array of float values.
x_values=numpy.arange(0,366,0.001) #精确值到1.4秒
y_values_emotion=numpy.sin(x_values*B_emotion+C_emotion) #numpy.sin() 表示y值
y_values_physical=numpy.sin(x_values*B_physical+C_physical)
y_values_intelligence=numpy.sin(x_values*B_intelligence+C_intelligence)
pylab.plot(x_values,y_values_emotion,'r') #plot命令允许绘制二维线图,红色是情绪图,绿色是体力图,蓝色是智力图
pylab.plot(x_values,y_values_physical,'g')
pylab.plot(x_values,y_values_intelligence,'b')
pylab.xlabel('x axis') #x轴标签命名'x values'
pylab.ylabel('y axis') #y轴标签命名'sin of x'
pylab.title("red=emotion,green=physical,blue=intelligence") #图表的题目
pylab.grid(True) #图表添加一个网格
pylab.show() #展示图表
def plot_sine_single(year,month,day,period):
B=(2*math.pi)/period
birthday_number=day_transformation.total_day_calculate(year,month,day)
initial_number=birthday_number%period #initial_number是正弦函数起始值
C=-B*initial_number
#use numpy arange to get an array of float values.
x_values=numpy.arange(0,math.pi*20,0.1)
y_values=numpy.sin(x_values*B+C) #numpy.sin() 表示y值
pylab.plot(x_values,y_values) #plot命令允许绘制二维线图
pylab.xlabel('x axis') #x轴标签命名'x values'
pylab.ylabel('y axis') #y轴标签命名'sin of x'
pylab.title("sine curve") #图表的题目
pylab.grid(True) #图表添加一个网格
pylab.show() #展示图表
#to calculate the physiologic_index
#calculate_physiologic_index(year,month,day):#输入的年月日是生日
import leap_common_year,day_transformation,math,pylab,numpy
physiologic_index=0
dict_physiologic_index={}
def calculate_physiologic_index(year,month,day):#输入的年月日是生日
B_emotion=(2*math.pi)/23
B_physical=(2*math.pi)/28
B_intelligence=(2*math.pi)/33
birthday_number=day_transformation.total_day_calculate(year,month,day)
initial_number_emotion=birthday_number%23 #initial_number是正弦函数起始值
initial_number_physical=birthday_number%28
initial_number_intelligence=birthday_number%33
C_emotion=(-B_emotion)*initial_number_emotion
C_physical=(-B_physical)*initial_number_physical
C_intelligence=(-B_intelligence)*initial_number_intelligence
x_values=numpy.arange(0,366,1)
y_values_emotion=numpy.sin(x_values*B_emotion+C_emotion) #numpy.sin() 表示y值
y_values_physical=numpy.sin(x_values*B_physical+C_physical)
y_values_intelligence=numpy.sin(x_values*B_intelligence+C_intelligence)
# to calculate the physiologic_index
for i in x_values:
y_values_emotion=numpy.sin(i*B_emotion+C_emotion) #numpy.sin() 表示y值
y_values_physical=numpy.sin(i*B_physical+C_physical)
y_values_intelligence=numpy.sin(i*B_intelligence+C_intelligence)
physiologic_index=(y_values_emotion+y_values_physical+y_values_intelligence)/3
dict_physiologic_index[i]=physiologic_index
small_sorted_physiologic_index=sorted(dict_physiologic_index.items(),key=lambda d:d[1],reverse=False) #排序有小到大
big_sorted_physiologic_index=sorted(dict_physiologic_index.items(),key=lambda d:d[1],reverse=True) #排序有小到大
#print "small_sorted_physiologic_index is:",small_sorted_physiologic_index
#print "big_sorted_physiologic_index is:",big_sorted_physiologic_index
print "day of small values are:"
i=0
while i<10:
#print big_sorted_physiologic_index[i]
#print big_sorted_physiologic_index[i][0]
#测试输出时间相对应的具体值
#print small_sorted_physiologic_index[i]
print day_transformation.number_to_time(year,small_sorted_physiologic_index[i][0])
i+=1
print "day of big values are:"
i=0
while i<10:
print day_transformation.number_to_time(year,big_sorted_physiologic_index[i][0])
i+=1
#leap year and common year
def leap_common_year(year):
if year%100==0:
if year%400==0:
return "leap year"
else:
return "common year"
elif year%4==0:
return "leap year"
else:
return "common year"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `EVRClient`."""
import unittest
from pyevr import EVRClient
from pyevr.openapi_client.models import ForestNotice, ForestNoticeAllOf
class TestEVRClient(unittest.TestCase):
api_key = 'asd123'
host = 'https://api.evr.test'
def setUp(self) -> None:
super().setUp()
self.client = EVRClient(self.api_key, self.host)
def test_initial_api_key(self):
self.assertRaises(TypeError, EVRClient)
self.assertDictEqual(self.client.openapi_client.configuration.api_key, {'EVR-APIKEY': self.api_key})
def test_initial_host(self):
self.assertEqual(self.client.openapi_client.configuration.host, self.host)
client = EVRClient(self.api_key)
self.assertEqual(client.openapi_client.configuration.host, 'https://evr.veoseleht.ee')
def test_api_groups(self):
from pyevr import apis
self.assertEqual(type(self.client.assortments), apis.AssortmentsAPI)
self.assertEqual(type(self.client.certificates), apis.CertificatesAPI)
self.assertEqual(type(self.client.measurements), apis.MeasurementsAPI)
self.assertEqual(type(self.client.measurement_units), apis.MeasurementUnitsAPI)
self.assertEqual(type(self.client.organizations), apis.OrganizationsAPI)
self.assertEqual(type(self.client.place_of_deliveries), apis.PlaceOfDeliveriesAPI)
self.assertEqual(type(self.client.waybills), apis.WaybillsAPI)
from pyevr.openapi_client import api
self.assertTrue(isinstance(self.client.assortments, api.AssortmentsApi))
self.assertTrue(isinstance(self.client.certificates, api.CertificatesApi))
self.assertTrue(isinstance(self.client.measurements, api.MeasurementsApi))
self.assertTrue(isinstance(self.client.measurement_units, api.MeasurementUnitsApi))
self.assertTrue(isinstance(self.client.organizations, api.OrganizationsApi))
self.assertTrue(isinstance(self.client.place_of_deliveries, api.PlaceOfDeliveriesApi))
self.assertTrue(isinstance(self.client.waybills, api.WaybillsApi))
class TestExtendedApiClient(unittest.TestCase):
api_key = 'asd123'
host = 'https://api.evr.test'
def setUp(self) -> None:
super().setUp()
self.client = EVRClient(self.api_key, self.host)
def test_sanitize_for_serialization(self):
notice = ForestNotice(cadaster='1', compartment='c', forest_allocation_number='f', number='n')
obj_dict = self.client.openapi_client.sanitize_for_serialization(notice)
self.assertDictEqual(obj_dict, {
'type': 'ForestNotice',
'cadaster': '1',
'compartment': 'c',
'forestAllocationNumber': 'f',
'number': 'n',
})
notice_all_of = ForestNoticeAllOf(cadaster='12', compartment='c2', forest_allocation_number='f2', number='n2')
obj_dict = self.client.openapi_client.sanitize_for_serialization(notice_all_of)
self.assertDictEqual(obj_dict, {
'type': 'ForestNotice',
'cadaster': '12',
'compartment': 'c2',
'forestAllocationNumber': 'f2',
'number': 'n2',
})
if __name__ == '__main__':
unittest.main()
|
from yann.special.gan import gan
from yann.network import network
from yann.utils.graph import draw_network
from theano import tensor as T
import numpy
import theano
import cPickle
rng = numpy.random
class igan (object):
"""
This class creates and train two networks a GAN and a MLP.
"""
def __init__ (self, init_dataset, root = '.', temperature = 3, verbose = 1):
"""
Args:
dataset: As usual.
temperature: For the softmax layer.
"""
self.base_dataset = init_dataset
f = open(self.base_dataset + '/data_params.pkl', 'rb')
data_params = cPickle.load(f)
f.close()
self.data_splits = data_params ['splits']
self.temperature = temperature
if self.data_splits['p'] == 0:
self.base_num_classes = len( self.data_splits ['base'] )
else:
self.base_num_classes = len( self.data_splits ['shot'] + self.data_splits ['base'] )
def setup_gan ( self,
dataset = None,
params = None,
cook = True,
root = '.', verbose = 1 ):
"""
This function is a demo example of a generative adversarial network.
This is an example code. You should study this code rather than merely run it.
Args:
dataset: Supply a dataset.
root: location to save down stuff.
params: Initialize network with parameters.
cook: <True> If False, won't cook.
verbose: Similar to the rest of the dataset.
Returns:
net: A Network object.
Notes:
This is not setup properly therefore does not learn at the moment. This network here mimics
Ian Goodfellow's original code and implementation for MNIST adapted from his source code:
https://github.com/goodfeli/adversarial/blob/master/mnist.yaml .It might not be a perfect
replicaiton, but I tried as best as I could.
"""
if dataset is None:
dataset = self.base_dataset
if verbose >=2:
print (".. Creating a GAN network")
input_params = None
if verbose >=2:
print (".. Creating a GAN network")
optimizer_params = {
"momentum_type" : 'false',
"momentum_params" : (0.51, 0.95, 40),
"regularization" : (0.00001, 0.00001),
"optimizer_type" : 'adam',
"id" : "main"
}
dataset_params = {
"dataset" : dataset,
"type" : 'xy',
"id" : 'data'
}
visualizer_params = {
"root" : root + '/visualizer/gan',
"frequency" : 1,
"sample_size": 225,
"rgb_filters": True,
"debug_functions" : False,
"debug_layers": False,
"id" : 'main'
}
resultor_params = {
"root" : root + "/resultor/gan",
"id" : "resultor"
}
regularize = True
batch_norm = True
dropout_rate = 0.5
# intitialize the network
self.gan_net = gan ( borrow = True,
verbose = verbose )
self.gan_net.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )
self.gan_net.add_module ( type = 'visualizer',
params = visualizer_params,
verbose = verbose
)
self.gan_net.add_module ( type = 'resultor',
params = resultor_params,
verbose = verbose
)
self.mini_batch_size = self.gan_net.datastream['data'].mini_batch_size
#z - latent space created by random layer
self.gan_net.add_layer(type = 'random',
id = 'z',
num_neurons = (self.mini_batch_size,128),
distribution = 'normal',
mu = 0,
sigma = 1,
limits = (0,1),
verbose = verbose)
# Generator layers
if not params is None:
input_params = params['G1']
self.gan_net.add_layer ( type = "dot_product",
origin = "z",
id = "G1",
num_neurons = 1200,
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params['G2']
self.gan_net.add_layer ( type = "dot_product",
origin = "G1",
id = "G2",
num_neurons = 5408,
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
verbose = verbose
)
self.gan_net.add_layer ( type = "unflatten",
origin = "G2",
id = "G2-unflatten",
shape = (13, 13, 32),
batch_norm = batch_norm,
verbose = verbose
)
if not params is None:
input_params = params['G3']
self.gan_net.add_layer ( type = "deconv",
origin = "G2-unflatten",
id = "G3",
num_neurons = 32,
filter_size = (3,3),
output_shape = (28,28,32),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
stride = (2,2),
verbose = verbose
)
if not params is None:
input_params = params['G4']
self.gan_net.add_layer ( type = "deconv",
origin = "G3",
id = "G4",
num_neurons = 32,
filter_size = (3,3),
output_shape = (30,30,64),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
stride = (1,1),
verbose = verbose
)
if not params is None:
input_params = params['G(z)']
self.gan_net.add_layer ( type = "deconv",
origin = "G4",
id = "G(z)",
num_neurons = 64,
filter_size = (3,3),
output_shape = (32,32,3),
activation = 'tanh',
regularize = regularize,
stride = (1,1),
input_params = input_params,
verbose = verbose
)
#x - inputs come from dataset 1 X 3072
self.gan_net.add_layer ( type = "input",
id = "x",
verbose = verbose,
datastream_origin = 'data', # if you didnt add a dataset module, now is
# the time.
mean_subtract = False )
#D(x) - Contains params theta_d creates features 1 X 800.
# Discriminator Layers
# add first convolutional layer
if not params is None:
input_params = params['D1-x']
self.gan_net.add_layer ( type = "conv_pool",
origin = "x",
id = "D1-x",
num_neurons = 20,
filter_size = (5,5),
pool_size = (2,2),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
verbose = verbose
)
self.gan_net.add_layer ( type = "conv_pool",
origin = "G(z)",
id = "D1-z",
num_neurons = 20,
filter_size = (5,5),
pool_size = (2,2),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = self.gan_net.dropout_layers["D1-x"].params,
verbose = verbose
)
if not params is None:
input_params = params['D2-x']
self.gan_net.add_layer ( type = "conv_pool",
origin = "D1-x",
id = "D2-x",
num_neurons = 50,
filter_size = (3,3),
pool_size = (2,2),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = input_params,
verbose = verbose
)
self.gan_net.add_layer ( type = "conv_pool",
origin = "D1-z",
# origin = "G(z)",
id = "D2-z",
num_neurons = 50,
filter_size = (3,3),
pool_size = (2,2),
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
input_params = self.gan_net.dropout_layers["D2-x"].params,
verbose = verbose
)
if not params is None:
input_params = params['D3-x']
self.gan_net.add_layer ( type = "dot_product",
id = "D3-x",
origin = "D2-x",
num_neurons = 1200,
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
dropout_rate = dropout_rate,
input_params = input_params,
verbose = verbose
)
self.gan_net.add_layer ( type = "dot_product",
id = "D3-z",
origin = "D2-z",
input_params = self.gan_net.dropout_layers["D3-x"].params,
num_neurons = 1200,
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
dropout_rate = dropout_rate,
verbose = verbose
)
if not params is None:
input_params = params['D4-x']
self.gan_net.add_layer ( type = "dot_product",
id = "D4-x",
origin = "D3-x",
num_neurons = 1200,
activation = 'relu',
regularize = regularize,
batch_norm = batch_norm,
dropout_rate = dropout_rate,
input_params = input_params,
verbose = verbose
)
self.gan_net.add_layer ( type = "dot_product",
id = "D4-z",
origin = "D3-z",
input_params = self.gan_net.dropout_layers["D4-x"].params,
num_neurons = 1200,
activation = 'relu',
regularize = regularize,
dropout_rate = dropout_rate,
batch_norm = batch_norm,
verbose = verbose
)
#C(D(x)) - This is the opposite of C(D(G(z))), real
if not params is None:
input_params = params['D(x)']
self.gan_net.add_layer ( type = "dot_product",
id = "D(x)",
origin = "D4-x",
num_neurons = 1,
activation = 'sigmoid',
regularize = regularize,
input_params = input_params,
verbose = verbose
)
#C(D(G(z))) fake - the classifier for fake/real that always predicts fake
self.gan_net.add_layer ( type = "dot_product",
id = "D(G(z))",
origin = "D4-z",
num_neurons = 1,
activation = 'sigmoid',
regularize = regularize,
input_params = self.gan_net.dropout_layers["D(x)"].params,
verbose = verbose
)
#C(D(x)) - This is the opposite of C(D(G(z))), real
if not params is None:
input_params = params['softmax']
self.gan_net.add_layer ( type = "classifier",
id = "softmax",
origin = "D4-x",
num_classes = 10,
regularize = regularize,
input_params = input_params,
activation = 'softmax',
verbose = verbose
)
# objective layers
# discriminator objective
self.gan_net.add_layer (type = "tensor",
input = - 0.5 * T.mean(T.log(self.gan_net.layers['D(x)'].output)) - \
0.5 * T.mean(T.log(1-self.gan_net.layers['D(G(z))'].output)),
input_shape = (1,),
id = "discriminator_task"
)
self.gan_net.add_layer ( type = "objective",
id = "discriminator_obj",
origin = "discriminator_task",
layer_type = 'value',
objective = self.gan_net.dropout_layers['discriminator_task'].output,
datastream_origin = 'data',
verbose = verbose
)
#generator objective
self.gan_net.add_layer (type = "tensor",
input = - 0.5 * T.mean(T.log(self.gan_net.layers['D(G(z))'].output)),
input_shape = (1,),
id = "objective_task"
)
self.gan_net.add_layer ( type = "objective",
id = "generator_obj",
layer_type = 'value',
origin = "objective_task",
objective = self.gan_net.dropout_layers['objective_task'].output,
datastream_origin = 'data',
verbose = verbose
)
#softmax objective.
self.gan_net.add_layer ( type = "objective",
id = "classifier_obj",
origin = "softmax",
objective = "nll",
layer_type = 'discriminator',
datastream_origin = 'data',
verbose = verbose
)
# from yann.utils.graph import draw_network
# draw_network(net.graph, filename = 'gan.png')
# self.gan_net.pretty_print()
if cook is True:
self.gan_net.cook ( objective_layers = ["classifier_obj", "discriminator_obj", "generator_obj"],
optimizer_params = optimizer_params,
discriminator_layers = ["D1-x", "D2-x","D3-x","D4-x"],
generator_layers = ["G1","G2","G3", "G4", "G(z)"],
classifier_layers = ["D1-x", "D2-x","D3-x","D4-x","softmax"],
softmax_layer = "softmax",
game_layers = ("D(x)", "D(G(z))"),
verbose = verbose )
def train_init_gan (self, lr = (0.04, 0.001), save_after_epochs = 1, epochs= (15), verbose = 2):
"""
This method will train the initial GAN on base dataset.
Args:
lr : leanring rates to train with. Default is (0.04, 0.001)
epochs: Epochs to train with. Default is (15)
save_after_epochs: Saves the network down after so many epochs.
verbose : As usual.
"""
if verbose >=2 :
print ( ".. Training GAN ")
self.gan_net.train( epochs = epochs,
k = 1,
learning_rates = lr,
pre_train_discriminator = 2,
validate_after_epochs = 10,
visualize_after_epochs = 1,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = True,
verbose = verbose)
def setup_base_mlp (self,
dataset = None,
root = '.',
params = None,
cook = True,
verbose = 1 ):
"""
This method is the same as the tutorial on building a two layer multi-layer neural
network. The built network is mnist->1200->1200->10 .It optimizes with nesterov momentum and
rmsprop.
Args:
root: save location for data
params: Initialize network with params.
cook: <True> If False, won't cook.
dataset: an already created dataset.
"""
if verbose >=2:
print (".. Creating the MLP network")
if dataset is None:
dataset = self.base_dataset
input_params = None
optimizer_params = {
"momentum_type" : 'false',
"momentum_params" : (0.65, 0.9, 30),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'adam',
"id" : "optim-base"
}
dataset_params = {
"dataset" : dataset,
"svm" : False,
"n_classes" : self.base_num_classes,
"id" : 'data-base'
}
visualizer_params = {
"root" : root + '/visualizer/base-network',
"frequency" : 1,
"sample_size": 225,
"rgb_filters": True,
"debug_functions" : False,
"debug_layers": False,
"id" : 'visualizer-base'
}
resultor_params = {
"root" : root + "/resultor/base-network",
"id" : "resultor-base"
}
self.base = network( borrow = True,
verbose = verbose )
self.base.add_module ( type = 'optimizer',
params = optimizer_params,
verbose = verbose )
self.base.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )
self.base.add_module ( type = 'visualizer',
params = visualizer_params,
verbose = verbose
)
self.base.add_module ( type = 'resultor',
params = resultor_params,
verbose = verbose
)
self.base.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data-base')
if not params is None:
input_params = params ['c1']
self.base.add_layer ( type = "conv_pool",
id = "c1",
origin = "input",
num_neurons = 20,
filter_size = (5,5),
pool_size = (1,1),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c2']
self.base.add_layer ( type = "conv_pool",
id = "c2",
origin = "c1",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c3']
self.base.add_layer ( type = "conv_pool",
id = "c3",
origin = "c2",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c4']
self.base.add_layer ( type = "conv_pool",
id = "c4",
origin = "c3",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c5']
self.base.add_layer ( type = "conv_pool",
id = "c5",
origin = "c4",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c6']
self.base.add_layer ( type = "conv_pool",
id = "c6",
origin = "c5",
num_neurons = 250,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['fc1']
self.base.add_layer ( type = "dot_product",
origin = "c6",
id = "fc1",
num_neurons = 1024,
activation = 'relu',
batch_norm= True,
regularize = True,
dropout_rate = 0.7,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['fc2']
self.base.add_layer ( type = "dot_product",
origin = "fc1",
id = "fc2",
num_neurons = 1024,
activation = 'relu',
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['softmax']
self.base.add_layer ( type = "classifier",
id = "softmax",
origin = "fc2",
num_classes = self.base_num_classes,
activation = 'softmax',
regularize = True,
input_params = input_params,
verbose = verbose
)
self.base.add_layer ( type = "objective",
id = "obj-base",
origin = "softmax",
verbose = verbose
)
self.base.pretty_print()
# draw_network(self.gan_net.graph, filename = 'base.png')
if cook is True:
self.base.cook( optimizer = 'optim-base',
objective_layers = ['obj-base'],
datastream = 'data-base',
classifier = 'softmax-base',
verbose = verbose
)
def train_base_mlp (self,
lr = (0.05, 0.01, 0.001),
epochs = (20, 20),
save_after_epochs = 1,
early_terminate = False,
verbose = 2):
"""
This method will train the initial MLP on base dataset.
Args:
lr : leanring rates to train with. Default is (0.05, 0.01, 0.001)
save_after_epochs: Saves the network down after so many epochs.
epochs: Epochs to train with. Default is (20, 20)
early_terminate: Simply passed to the train method.
verbose : As usual.
"""
if verbose >=2 :
print ( ".. Training Base MLP ")
self.base.train( epochs = epochs,
validate_after_epochs = 1,
visualize_after_epochs = 1,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = early_terminate,
learning_rates = lr,
verbose = verbose)
self.base.test(verbose = verbose)
def setup_baseline_inc(self, dataset, root = '.', verbose= 2):
"""
This method updates the increment the mlp on the increment batch.
Args:
root: location to save outputs.
dataset: Increment dataset.
Notes:
This network does not share parameters, from the base_mlp, but creates a new copy
of all the parameters with a new network.
"""
if verbose >=2:
print (".. Creating the increment network")
f = open(dataset + '/data_params.pkl', 'rb')
data_params = cPickle.load(f)
f.close()
self.data_splits = data_params ['splits']
self.inc_num_classes = len(self.data_splits ['shot']) + len( self.data_splits ['base'] )
optimizer_params = {
"momentum_type" : 'false',
"momentum_params" : (0.65, 0.9, 30),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'adam',
"id" : "optim-inc-baseline"
}
dataset_params = {
"dataset" : dataset,
"svm" : False,
"n_classes" : self.inc_num_classes,
"id" : 'data-inc-baseline'
}
self.baseline = network ()
visualizer_params = {
"root" : root + '/visualizer/baseline-inc',
"frequency" : 1,
"sample_size": 225,
"rgb_filters": True,
"debug_functions" : False,
"debug_layers": False,
"id" : 'visualizer-inc-baseline'
}
resultor_params = {
"root" : root + "/resultor/baseline-inc",
"id" : "resultor-inc-baseline"
}
self.baseline.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )
self.baseline.add_module ( type = 'optimizer',
params = optimizer_params,
verbose = verbose )
self.baseline.add_module ( type = 'visualizer',
params = visualizer_params,
verbose = verbose
)
self.baseline.add_module ( type = 'resultor',
params = resultor_params,
verbose = verbose
)
self.baseline.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data-inc-baseline')
base_params = self.base.get_params(verbose = verbose)
from yann.utils.pickle import shared_params
base_params = shared_params (base_params)
self.baseline.add_layer ( type = "conv_pool",
id = "c1",
origin = "input",
num_neurons = 20,
filter_size = (5,5),
pool_size = (1,1),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = base_params ['c1'],
verbose = verbose
)
self.baseline.add_layer ( type = "conv_pool",
id = "c2",
origin = "c1",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params ['c2'],
verbose = verbose
)
self.baseline.add_layer ( type = "conv_pool",
id = "c3",
origin = "c2",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c3'],
verbose = verbose
)
self.baseline.add_layer ( type = "conv_pool",
id = "c4",
origin = "c3",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c4'],
verbose = verbose
)
self.baseline.add_layer ( type = "conv_pool",
id = "c5",
origin = "c4",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c5'],
verbose = verbose
)
self.baseline.add_layer ( type = "conv_pool",
id = "c6",
origin = "c5",
num_neurons = 250,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c6'],
verbose = verbose
)
self.baseline.add_layer ( type = "dot_product",
origin = "c6",
id = "fc1",
num_neurons = 1024,
activation = 'relu',
input_params = base_params ['fc1'],
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
verbose = verbose
)
self.baseline.add_layer ( type = "dot_product",
origin = "fc1",
id = "fc2",
num_neurons = 1024,
activation = 'relu',
input_params = base_params ['fc2'],
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
verbose = verbose
)
# For classifier layer, recreating...
old_w = self.base.dropout_layers['softmax'].w.get_value(borrow = True)
old_b = self.base.dropout_layers['softmax'].b.get_value(borrow = True)
new_w = numpy.asarray(0.01 * rng.standard_normal( size=(old_w.shape[0],
len( self.data_splits ['base'])
)
),
dtype=theano.config.floatX)
new_w_values = numpy.concatenate((old_w,new_w), axis = 1)
new_b = numpy.asarray(0.01 * rng.standard_normal( size = (len( self.data_splits ['base']))),
dtype=theano.config.floatX)
new_b_values = numpy.concatenate((old_b,new_b), axis = 0)
new_w = theano.shared(value= new_w_values, name='inc-weights', borrow = True)
new_b = theano.shared(value= new_b_values, name='inc-bias', borrow = True)
# This removes the last two parameters added (Which should be the softmax)
self.baseline.add_layer ( type = "classifier",
id = "softmax-inc-baseline",
origin = "fc2",
num_classes = self.inc_num_classes,
activation = 'softmax',
regularize = True,
input_params = [new_w, new_b],
verbose = verbose
)
self.baseline.add_layer ( type = "objective",
id = "obj-inc-baseline",
origin = "softmax-inc-baseline",
verbose = verbose
)
# self.baseline.pretty_print()
# draw_network(self.baseline.graph, filename = 'baseline.png')
self.baseline.cook( optimizer = 'optim-inc-baseline',
objective_layers = ['obj-inc-baseline'],
datastream = 'data-inc-baseline',
classifier_layer = 'softmax-inc-baseline',
verbose = verbose
)
def train_baseline_inc (self,
save_after_epochs = 1,
lr = (0.05, 0.01, 0.001),
epochs = (20, 20),
verbose = 2):
"""
This method will train the incremental MLP on incremental dataset.
Args:
lr : leanring rates to train with. Default is (0.05, 0.01, 0.001)
epochs: Epochs to train with. Default is (20, 20)
verbose : As usual.
"""
if verbose >= 2:
print ".. Training baseline network"
self.baseline.train( epochs = epochs,
validate_after_epochs = 1,
visualize_after_epochs = 10,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = False,
learning_rates = lr,
verbose = verbose)
self.baseline.test(verbose = verbose)
def setup_mentor(self, temperature = None, verbose= 2):
"""
This method sets up the metor network which is basically the same network that takes the
GAN as input and produces softmaxes.
"""
if verbose >=2:
print (".. Creating the mentor network n")
self.mentor = network ()
if not temperature is None:
self.temperature = temperature
self.mentor.add_layer ( type = "tensor",
id = "input",
input = self.gan_net.dropout_layers['G(z)'].output,
input_shape = (self.mini_batch_size,3072),
verbose = verbose )
self.mentor.add_layer ( type = "unflatten",
id = "input-unflattened",
origin ="input",
shape = (32,32,3),
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c1",
origin = "input-unflattened",
num_neurons = 20,
filter_size = (5,5),
pool_size = (1,1),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = self.base.dropout_layers['c1'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c2",
origin = "c1",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.base.dropout_layers['c2'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c3",
origin = "c2",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.base.dropout_layers['c3'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c4",
origin = "c3",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.base.dropout_layers['c4'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c5",
origin = "c4",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.base.dropout_layers['c5'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "conv_pool",
id = "c6",
origin = "c5",
num_neurons = 250,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.base.dropout_layers['c6'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "dot_product",
origin = "c6",
id = "fc1",
num_neurons = 1024,
activation = 'relu',
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
input_params = self.base.dropout_layers['fc1'].params,
verbose = verbose
)
self.mentor.add_layer ( type = "dot_product",
origin = "fc1",
id = "fc2",
num_neurons = 1024,
activation = 'relu',
input_params = self.base.dropout_layers['fc2'].params,
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
verbose = verbose
)
self.mentor.add_layer ( type = "classifier",
id = "softmax",
origin = "fc2",
num_classes = self.base.dropout_layers['softmax'].output_shape[1],
activation = 'softmax',
input_params = self.base.dropout_layers['softmax'].params,
regularize = True,
verbose = verbose
)
self.mentor.add_layer ( type = "classifier",
id = "softmax-base-temperature",
origin = "fc2",
num_classes = self.base.dropout_layers['softmax'].output_shape[1],
activation = ('softmax', self.temperature),
input_params = self.base.dropout_layers['softmax'].params,
regularize = True,
verbose = verbose
)
# self.mentor.pretty_print()
# draw_network(self.mentor.graph, filename = 'mentor.png')
def setup_hallucinated_inc(self, dataset, root = '.', verbose= 2):
"""
This method setup the increment the mlp on the increment net.
Args:
root: location to save outputs.
dataset: Increment dataset.
Notes:
This method creates two networks with shared parameters. One network is used to update
the parameters using the dataset and the other network is used to update the parameters
for the mentoring via GAN.
The parameters are not shared with the mentor network they are newly created copies, but
the two networks created in this method do share parameters.
"""
if verbose >=2:
print (".. Creating the increment network with mentoring")
f = open(dataset + '/data_params.pkl', 'rb')
data_params = cPickle.load(f)
f.close()
self.data_splits = data_params ['splits']
optimizer_params = {
"momentum_type" : 'false',
"momentum_params" : (0.65, 0.9, 30),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'adam',
"id" : "optim-inc-hallucinated"
}
dataset_params = {
"dataset" : dataset,
"svm" : False,
"n_classes" : len (self.data_splits['base']) ,
"id" : 'data-inc-hallucinated'
}
visualizer_params = {
"root" : root + '/visualizer/hallucinated-inc',
"frequency" : 1,
"sample_size": 225,
"rgb_filters": True,
"debug_functions" : False,
"debug_layers": False,
"id" : 'hallucinated'
}
resultor_params = {
"root" : root + "/resultor/hallucianted-inc",
"id" : "hallucinated"
}
self.hallucinated = network()
self.hallucinated.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )
self.hallucinated.add_module ( type = 'optimizer',
params = optimizer_params,
verbose = verbose )
self.hallucinated.add_module ( type = 'visualizer',
params = visualizer_params,
verbose = verbose
)
self.hallucinated.add_module ( type = 'resultor',
params = resultor_params,
verbose = verbose
)
# Collecting parameters as copies from the base mentor network.
base_params = self.base.get_params(verbose = verbose)
from yann.utils.pickle import shared_params
base_params = shared_params (base_params)
##########
# Network from dataset just the incremental network inference.
##########
self.hallucinated.add_layer ( type = "input",
id = "data",
verbose = verbose,
datastream_origin = 'data-inc-hallucinated')
self.hallucinated.add_layer ( type = "conv_pool",
id = "c1-data",
origin = "data",
num_neurons = 20,
filter_size = (5,5),
pool_size = (1,1),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = base_params ['c1'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c2-data",
origin = "c1-data",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params ['c2'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c3-data",
origin = "c2-data",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params ['c3'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c4-data",
origin = "c3-data",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params ['c4'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c5-data",
origin = "c4-data",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c5'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c6-data",
origin = "c5-data",
num_neurons = 250,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = base_params['c6'],
verbose = verbose
)
self.hallucinated.add_layer ( type = "dot_product",
origin = "c6-data",
id = "fc1-data",
num_neurons = 1024,
activation = 'relu',
input_params = base_params ['fc1'],
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
verbose = verbose )
self.hallucinated.add_layer ( type = "dot_product",
origin = "fc1-data",
id = "fc2-data",
num_neurons = 1024,
activation = 'relu',
input_params = base_params ['fc2'],
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
verbose = verbose )
# For classifier layer, recreating...
old_w = base_params ['softmax'][0].eval()
old_b = base_params ['softmax'][1].eval()
if self.inc_num_classes > old_w.shape[1]:
assert len(self.data_splits ['shot']) == old_w.shape[1]
new_w = numpy.asarray(0.01 * rng.standard_normal( size=(old_w.shape[0],
len( self.data_splits ['base'])
)
),
dtype=theano.config.floatX)
new_w_values = numpy.concatenate((old_w,new_w), axis = 1)
new_b = numpy.asarray(0.01 * rng.standard_normal( size = (len(
self.data_splits ['base']))),
dtype=theano.config.floatX)
new_b_values = numpy.concatenate((old_b,new_b), axis = 0)
else:
assert self.inc_num_classes == old_w.shape[1]
new_w_values = old_w
new_b_values = old_b
new_w = theano.shared(value= new_w_values, name='inc-weights', borrow = True)
new_b = theano.shared(value= new_b_values, name='inc-bias', borrow = True)
# This removes the last two parameters added (Which should be the softmax)
# This works on the labels from the dataset.
self.hallucinated.add_layer ( type = "classifier",
id = "softmax-inc-hallucinated-data",
origin = "fc2-data",
num_classes = self.inc_num_classes,
activation = 'softmax',
input_params = [new_w, new_b],
regularize = True,
verbose = verbose )
##########
# Softmax temperature of incremental network from GAN inputs.
##########
self.hallucinated.add_layer ( type = "tensor",
id = "gan-input",
input = self.gan_net.inference_layers [ 'G(z)'].output,
input_shape = self.gan_net.dropout_layers ['G(z)'].output_shape,
verbose = verbose )
self.hallucinated.add_layer ( type = "unflatten",
id = "gan-input-unflattened",
origin ="gan-input",
shape = (32,32,3),
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c1-gan",
origin = "gan-input-unflattened",
num_neurons = 20,
filter_size = (5,5),
pool_size = (1,1),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = self.hallucinated.dropout_layers ['c1-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c2-gan",
origin = "c1-gan",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.hallucinated.dropout_layers ['c2-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c3-gan",
origin = "c2-gan",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.hallucinated.dropout_layers ['c3-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c4-gan",
origin = "c3-gan",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.hallucinated.dropout_layers ['c4-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c5-gan",
origin = "c4-gan",
num_neurons = 100,
filter_shape = (3,3),
pool_size = (1,1),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.hallucinated.dropout_layers ['c5-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "conv_pool",
id = "c6-gan",
origin = "c5-gan",
num_neurons = 250,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = self.hallucinated.dropout_layers ['c6-data'].params,
verbose = verbose
)
self.hallucinated.add_layer ( type = "dot_product",
origin = "c6-gan",
id = "fc1-gan",
num_neurons = 1024,
activation = 'relu',
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
input_params = self.hallucinated.dropout_layers['fc1-data'].params,
verbose = verbose )
self.hallucinated.add_layer ( type = "dot_product",
origin = "fc1-gan",
id = "fc2-gan",
num_neurons = 1024,
activation = 'relu',
batch_norm= True,
dropout_rate = 0.7,
regularize = True,
input_params = self.hallucinated.dropout_layers['fc2-data'].params,
verbose = verbose )
self.hallucinated.add_layer ( type = "classifier",
id = "softmax-inc-hallucinated-gan",
origin = "fc2-gan",
num_classes = self.inc_num_classes,
activation = ('softmax', self.temperature),
input_params = self.hallucinated.dropout_layers \
['softmax-inc-hallucinated-data'].params,
regularize = True,
verbose = verbose )
##########
# This will make the mentor values available to the current network so that we
# can caluclate errors
##########
if self.inc_num_classes > old_w.shape[1]:
# Needed only if mentor and this has different number of classes.
self.hallucinated.add_layer(type = "random",
id = 'zero-targets',
num_neurons = (self.mini_batch_size, \
self.inc_num_classes - \
old_w.shape[1] ),
distribution = 'binomial',
p = 0,
verbose = verbose)
input_shape = [self.mentor.layers['softmax-base-temperature'].output_shape,
self.hallucinated.layers['zero-targets'].output_shape]
# importing a layer from the mentor network.
self.hallucinated.add_layer (type = "tensor",
id = 'merge-import',
input = self.mentor.inference_layers \
['softmax-base-temperature'].output,
input_shape = self.mentor.inference_layers \
['softmax-base-temperature'].output_shape,
verbose = verbose )
if self.inc_num_classes > old_w.shape[1]:
# This layer is a 10 node output of the softmax temperature. Sets up the mentor targets.
self.hallucinated.add_layer (type = "merge",
layer_type = "concatenate",
id = "mentor-target",
origin = ( 'merge-import', 'zero-targets' ),
verbose = verbose
)
mentor_target = 'mentor-target'
else:
mentor_target = 'merge-import'
##########
# objective layers
##########
# This is the regular classifier objective for the incremental net.
self.hallucinated.add_layer ( type = "objective",
id = "obj-inc",
origin = "softmax-inc-hallucinated-data",
verbose = verbose
)
# This is error between the temperature softmax layer and the mentor target.
# This provides the incremental update.
self.hallucinated.add_layer (type = "merge",
id = "obj-temperature",
layer_type = "error",
error = "rmse",
origin = ("softmax-inc-hallucinated-gan", mentor_target),
)
# self.hallucinated.pretty_print()
# draw_network(self.hallucinated.graph, filename = 'hallucinated.png')
self.hallucinated.cook( optimizer = 'optim-inc-hallucinated',
objective_layers = ['obj-inc','obj-temperature'],
objective_weights = [1, 1],
datastream = 'data-inc-hallucinated',
classifier_layer = 'softmax-inc-hallucinated-data',
verbose = verbose
)
def train_hallucinated_inc (self,
save_after_epochs = 1,
lr = (0.05, 0.01, 0.001),
epochs = (20, 20),
verbose = 2):
"""
This method will train the incremental MLP on incremental dataset.
Args:
lr : leanring rates to train with. Default is (0.05, 0.01, 0.001)
epochs: Epochs to train with. Default is (20, 20)
verbose : As usual.
"""
if verbose >=2 :
print (".. Training hallucinated network")
self.hallucinated.train( epochs = epochs,
validate_after_epochs = 1,
visualize_after_epochs = 10,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = False,
learning_rates = lr,
verbose = verbose)
self.hallucinated.test(verbose = verbose)
if __name__ == '__main__':
pass |
<reponame>gitrymt/qgm<filename>qgm/evaluation.py
import copy
import pandas as pd
import numpy as np
import scipy as sp
from numba import jit
import PIL
from PIL import Image
from . import image
from . import function
def psf_evaluation(image: image, coordinates):
pass
# return img_psf
def lattice_geometry_1d_define(Nsite=2*6+1):
def lattice_geometory_1d(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
A, sigmax, a1, x0, C = p
n = np.array([[i - (Nsite-1)/2] for i in range(Nsite)])
xs = n * a1 + x0
y = np.zeros(x.shape)
for n in range(xs.size):
p_tmp = np.array([A, xs[n], sigmax, 0])
y += function.gaussian_1d(x, *p_tmp)
return y + C
return lattice_geometory_1d
def lattice_geometory_1d_evaluation(xedges, Iy, range, step=0.1):
pass
def lattice_geometry_2d_define(Nsite=2*6+1):
def lattice_geometory_2d(xy_mesh, *p):
"""[summary]
Arguments:
xy_mesh {[type]} -- [description]
Returns:
[type] -- [description]
"""
# unpack 1D list into 2D x and y coords
(x, y) = xy_mesh
A, sigmax, sigmay, a1, a2, theta1, theta2, x0, y0, C = p
n = [[i - (Nsite-1)/2] for i in range(Nsite)]
n1, n2 = np.meshgrid(n, n)
xs = n1 * a1 * np.cos(theta1) + n2 * a2 * np.cos(theta2) + x0
ys = n1 * a1 * np.sin(theta1) + n2 * a2 * np.sin(theta2) + y0
xs = np.ravel(xs)
ys = np.ravel(ys)
z = np.zeros(x.shape)
for n in range(xs.size):
p_tmp = np.array([A, xs[n], sigmax, ys[n], sigmay, 0])
z += function.gaussian_2d(xy_mesh, *p_tmp)
return z + C
return lattice_geometory_2d
def lattice_geometry_2d_define_tmp(Nsite=2*6+1):
def lattice_geometory_2d(xy_mesh, *p):
"""[summary]
Arguments:
xy_mesh {[type]} -- [description]
Returns:
[type] -- [description]
"""
# unpack 1D list into 2D x and y coords
(x, y) = xy_mesh
A, sigma, a1, a2, theta1, theta2 = p
n = [[i - (Nsite-1)/2] for i in range(Nsite)]
n1, n2 = np.meshgrid(n, n)
xs = n1 * a1 * np.cos(theta1) + n2 * a2 * np.cos(theta2)
ys = n1 * a1 * np.sin(theta1) + n2 * a2 * np.sin(theta2)
xs = np.ravel(xs)
ys = np.ravel(ys)
z = np.zeros(x.shape)
for n in range(xs.size):
p_tmp = np.array([A, xs[n], sigma, ys[n], sigma, 0])
z += function.gaussian_2d(xy_mesh, *p_tmp)
return z
return lattice_geometory_2d
def site_occupation_evaluation(image, threshold):
factor_sites = np.matrix(np.copy(image.system.lattice['Lattice sites']['Amplitude'])).T
factor_sites[factor_sites < threshold] = 0
factor_sites[factor_sites >= threshold] = 1
(im_width, im_height) = image.image_ROI.shape
psfm = np.copy(image.psfm)
PSFM_flat = np.matrix(np.reshape(psfm, [im_width * im_height, psfm.shape[2]]))
img_sites = np.array(np.reshape(PSFM_flat * factor_sites, [im_width, im_height]))
return img_sites
def fidelity_evaluation(image_1st, image_2nd, threshold):
site1 = image_1st.system.lattice['Lattice sites']
site2 = image_2nd.system.lattice['Lattice sites']
x1s = np.array(site1['X Center'])
y1s = np.array(site1['Y Center'])
flag1s = np.array(site1['Amplitude'])
flag1s[flag1s < threshold] = 0
flag1s[flag1s >= threshold] = 1
x2s_tmp = np.array(site2['X Center'])
y2s_tmp = np.array(site2['Y Center'])
flag2s_tmp = np.array(site2['Amplitude'])
flag2s_tmp[flag2s_tmp < threshold] = 0
flag2s_tmp[flag2s_tmp >= threshold] = 1
x2s = []
y2s = []
flag2s = []
diffs = []
for n, (x1, y1, flag1) in enumerate(zip(x1s, y1s, flag1s)):
dr = np.sqrt((x2s_tmp - x1)**2 + (y2s_tmp - y1)**2)
id_site = np.argmin(dr)
x2s += [x2s_tmp[id_site]]
y2s += [y2s_tmp[id_site]]
flag2s += [flag2s_tmp[id_site]]
diffs += [flag1 - flag2s_tmp[id_site]]
drs = np.sqrt((x1s-x2s)**2+(y1s-y2s)**2)
a_lat = (image_1st.system.lattice['Lattice 1'].info['Constant (um)'] + image_1st.system.lattice['Lattice 2'].info['Constant (um)'])/2
mask = drs < a_lat/3
x1s = x1s[mask]; y1s = y1s[mask]
x2s = np.array(x2s)[mask]
y2s = np.array(y2s)[mask]
flag1s = np.array(flag1s)[mask]
flag2s = np.array(flag2s)[mask]
diffs = np.array(diffs)
diffs = diffs[mask]
drs = drs[mask]
dsite = pd.DataFrame({'x1': x1s, 'y1': y1s, 'x2': x2s, 'y2': y2s, 'dr': drs,
'flag1': flag1s, 'flag2': flag2s, 'difference': diffs})
N1 = np.sum(flag1s)
N2 = np.sum(flag2s)
Nloss = N1 - N2
Nhopping = np.sum(diffs<0)
fidelity = pd.DataFrame({'N1': [N1], 'N2': [N2],
'Nloss': [Nloss],
'Nhopping': [Nhopping],
'Rloss': [Nloss/N1],
'Rhopping': [Nhopping/N1],
})
return dsite, fidelity |
<gh_stars>1-10
# 从dfcf 抓取基金趋势数据进行分析
import requests
import json
import time
# 线性回归
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn import linear_model
# 正常显示 负号
plt.rcParams["axes.unicode_minus"] = False
# 正常显示负号标签
plt.rcParams["font.sans-serif"] = ["SimHei"]
# 类型枚举
# m 一个月 q 3个月 hy 6个月 y 一年
# try 3年 fiy 5年 sy 今年来 se 最大
#
# 重要指数对比
# 000300 沪深300 000905 中证500
# 000001 上证指数 399001 深证成指
# 399005 中小板 399006 创业板
# 数据访问地址
address = "http://api.fund.eastmoney.com/pinzhong/LJSYLZS?fundCode={}&indexcode={}&type={}"
headers = {
'Host': 'api.fund.eastmoney.com',
'Referer': 'http://fund.eastmoney.com/'
}
# 抓取趋势数据
def query_fund_trend_data(fund_code="011615", idx_code="000300", type="hy"):
req_url = address.format(fund_code, idx_code, type)
response = requests.get(req_url, headers=headers, timeout=6000)
resp_body = json.loads(response.text)
print(resp_body)
if "Data" in resp_body:
return resp_body["Data"]
return []
# 中文字体展示
font = FontProperties(fname=r"/System/Library/Fonts/PingFang.ttc", size=10)
def matrix(arr):
result = []
for node in arr:
tmep = []
tmep.append(node)
result.append(tmep)
return result
def run_plt(plt, size=None):
plt.figure(figsize=size)
plt.title('序列与涨幅数据', fontproperties=font)
plt.xlabel('序列', fontproperties=font)
plt.ylabel('涨幅', fontproperties=font)
# x start x end + y start y end
plt.grid(True)
return plt
# 分析基金信息
def analyze_fund_info(fundCode):
data = query_fund_trend_data(fundCode)
if len(data) == 0:
return
# 基金列表数据 同类列表数据 指数数据列表
fund_list, kind_list, idx_list = data[0]["data"], data[1]["data"], data[2]["data"]
fund_rate = fund_list[-1][1]
kind_rate = kind_list[-1][1]
idx_rate = idx_list[-1][1]
cnt, x_arr, y_arr, time_arr = 0, [], [], []
for node in fund_list:
timeArray = time.localtime(node[0] / 1000)
# otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
other_time = time.strftime("%Y-%m-%d", timeArray)
x_arr.append(cnt)
y_arr.append(node[1])
time_arr.append(other_time)
cnt += 1
# x 数据 和 y 数据
mat_x = matrix(x_arr)
mat_y = matrix(y_arr)
model = linear_model.LinearRegression()
model.fit(mat_x, mat_y)
b = model.intercept_[0]
# 线性模型的系数
k = model.coef_[0][0]
# 模型的评分系数
score = model.score(mat_x, mat_y)
print("k {} b {}".format(k, b))
print('score: %.3f' % score)
text = "k={:.6f}\nscore={:.6f}".format(k, score)
# 模拟值
model_x = mat_x
model_y = model.predict(model_x)
# 计算图形展示的边界
x_min, x_max, y_min, y_max = min(x_arr) - 2, max(x_arr) + 2, min(y_arr) - 2, max(y_arr) + 2
# 设置最大值和最小值区间
plt.axis([x_min, x_max, y_min, y_max])
# 展示基金图像
plt.plot(mat_x, mat_y, color="darkblue", linewidth=1, linestyle='--', marker='+', label="fund")
plt.plot(model_x, model_y, 'g-', label="linear")
# 添加文字
plt.text(abs(x_max * 0.4), abs(y_max * 0.9), text)
# 添加图例
plt.legend()
plt.show()
return evaluate_score(fund_rate, idx_rate)
# 基金评分标准
def evaluate_score(rate, level):
"""
基金评分标准
基金和基准差值在[-1,1] 之间评分为 0,和基准表现相似,平庸
基金和基准差值在(1,5) 之间评分为 1,基本跑赢标准,及格
基金和基准差值在(5,10) 之间评分为 2,基本跑赢标准,优良
基金和基准差值在(10,20)之间评分为 3,基本跑赢标准,
基金和基准差值在(20 +) 评分为 4,基本跑赢标准,优秀
:param rate: 基金表现
:param level: 基准表现
:return: 返回评分
"""
diff = rate - level
result = 0
if diff >= -1 and diff <= 1:
result = 0
if abs(diff) > 1 and abs(diff) <= 5:
result = 2
if abs(diff) > 5 and abs(diff) <= 10:
result = 3
if abs(diff) > 10 and abs(diff) <= 20:
result = 4
if abs(diff) >= 20 and abs(diff) <= 40:
result = 5
if abs(diff) >= 40:
result = 6
if diff > 0:
return result
else:
return 0 - result
if __name__ == '__main__':
# 008647 515030
fund_code_list = ["008647"]
for node in fund_code_list:
analyze_fund_info(node)
|
<gh_stars>100-1000
"""Proof pruning library.
The purpose of this library is to optimize proofs. Currently we
minimize the number of tactic application parameters in oder to generate
better training data (with minimum number of tactic parameters).
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import time
import tensorflow as tf
from typing import List, Text
from deepmath.deephol.public import proof_assistant
from deepmath.deephol import deephol_pb2
from deepmath.deephol import prover_util
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.public import error
MIN_HARD_NEGATIVES = 5
MAX_HARD_NEGATIVES = 10
def _create_request(goal: proof_assistant_pb2.Theorem, tactic: Text,
params: List[proof_assistant_pb2.Theorem]
) -> proof_assistant_pb2.ApplyTacticRequest:
tactic = ('%s [ %s ]' % (tactic, ' ; '.join(
['THM %d' % thm.fingerprint for thm in params]))).replace(' ', ' ')
return proof_assistant_pb2.ApplyTacticRequest(
goal=prover_util.theorem_to_goal_proto(goal), tactic=tactic)
def _matches_subgoal(goal: proof_assistant_pb2.Theorem,
thm: proof_assistant_pb2.Theorem):
return (set(list(goal.hypotheses)) == set(list(thm.hypotheses)) and
goal.conclusion == thm.conclusion)
class ParameterPruning(object):
"""Class to do parameter pruning on proof nodes."""
def __init__(self,
theorem_db: proof_assistant_pb2.TheoremDatabase,
hol_wrapper=None):
if hol_wrapper and theorem_db:
tf.logging.warning(
'theorem_db provided will be ignored as hol_wrapper provided.')
self.hol_wrapper = hol_wrapper
if not self.hol_wrapper:
self.hol_wrapper = proof_assistant.ProofAssistant()
for theorem in theorem_db.theorems:
self.hol_wrapper.RegisterTheorem(
proof_assistant_pb2.RegisterTheoremRequest(theorem=theorem))
self.communication_failed = False
def prune_tactic_application(self, goal: proof_assistant_pb2.Theorem,
tapp: deephol_pb2.TacticApplication):
"""Parameter pruning for a single tactic application.
Args:
goal: Goal of the ProofNode to which the tactic application belongs.
tapp: The tactic application to be pruned.
"""
if self.communication_failed:
tf.logging.error('Communication with prover failed. Not pruning...')
return
tactic = tapp.tactic
parameters = tapp.parameters
if not parameters:
return
assert len(parameters) == 1
param = parameters[0]
if param.parameter_type != deephol_pb2.Tactic.THEOREM_LIST:
return
thms = list(param.theorems)
if not thms:
return
index = len(thms) - 1
tactic = tapp.tactic
time_spent = tapp.time_spent
false_positives = []
other_negatives = []
found_true_positive = False
while index >= 0:
thm = thms.pop(index)
request = _create_request(goal, str(tactic), thms)
start_time = time.time()
response = proof_assistant_pb2.ApplyTacticResponse()
try:
response = self.hol_wrapper.ApplyTactic(request)
elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)
time_spent = elapsed_msecs
except error.StatusNotOk as exception:
tf.logging.error(exception)
elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)
if exception.message.startswith(
'Communication') and exception.message.endswith('failed.'):
tf.logging.error('Communication with prover failed. Not pruning...')
self.communication_failed = True
return
if response.HasField('error'):
thms.insert(index, thm)
found_true_positive = True
index -= 1
continue
assert response.HasField('goals'), 'response: %s' % response
new_subgoals = list(response.goals.goals)
no_match = False
if len(new_subgoals) == len(tapp.subgoals):
for i, sg in enumerate(new_subgoals):
if not _matches_subgoal(sg, tapp.subgoals[i]):
no_match = True
break
else:
no_match = True
if no_match:
thms.insert(index, thm)
found_true_positive = True
else:
if found_true_positive:
false_positives.append(thm)
else:
other_negatives.append(thm)
time_spent = elapsed_msecs
index -= 1
del tapp.parameters[0].theorems[:]
tapp.parameters[0].theorems.extend(thms)
tapp.parameters[0].hard_negative_theorems.extend(
false_positives[:MAX_HARD_NEGATIVES])
if len(false_positives) < MIN_HARD_NEGATIVES:
other_negatives.reverse()
tapp.parameters[0].hard_negative_theorems.extend(
other_negatives[:(MIN_HARD_NEGATIVES - len(false_positives))])
tapp.time_spent = time_spent
def prune_tactic_applications(self, proof_node: deephol_pb2.ProofNode):
for proof in proof_node.proofs:
if proof.result == deephol_pb2.TacticApplication.SUCCESS:
self.prune_tactic_application(proof_node.goal, proof)
def prune_closed_tactic_applications(self, proof_node: deephol_pb2.ProofNode):
for proof in proof_node.proofs:
if proof.closed:
assert proof.result == deephol_pb2.TacticApplication.SUCCESS
self.prune_tactic_application(proof_node.goal, proof)
|
import datetime
from django.shortcuts import render, get_object_or_404, redirect
from markdownx.utils import markdownify
from .forms import EventForm
from .forms import MemberForm
from .forms import RegionForm
from .forms import StoryForm
from .forms import TrackForm
from .models import Event
from .models import Member
from .models import Region
from .models import Story
from .models import Track
# Index page
def index(request):
tracks = Track.objects.all()
regions = Region.objects.all()
now = datetime.datetime.now()
last_event = Event.objects.filter(end_date__lt=now).order_by('-end_date').first()
upcoming_event = Event.objects.filter(start_date__gt=now).order_by('start_date').first()
last_stories = Story.objects.all().order_by('-create_date')[:5]
params = {
'regions': regions,
'tracks': tracks,
'last_event': last_event,
'upcoming_event': upcoming_event,
'last_stories': last_stories,
}
return render(request, 'ggit_platform/index.html', params)
# Admin index page
def admin_index(request):
return redirect('region_list')
def track_detail(request, id):
track = get_object_or_404(Track, id=id)
track.markdown = markdownify(track.long_description)
return render(request, 'track/detail.html', {'track': track})
# Track views
def track_list(request):
tracks = Track.objects.all()
return render(request, 'track/admin-list.html', {'tracks': tracks})
def track_new(request):
if request.method == 'POST':
form = TrackForm(request.POST)
if form.is_valid():
track = form.save()
return redirect('track_list')
elif request.method == 'GET':
form = TrackForm()
else:
form = None
return render(request, 'track/admin-edit.html', {'form': form})
def track_edit(request, id):
track = get_object_or_404(Track, id=id)
if request.method == 'GET':
form = TrackForm(instance=track)
elif request.method == 'POST':
form = TrackForm(request.POST, instance=track)
if form.is_valid():
track = form.save()
return redirect('track_list')
else:
form = None
return render(request, 'track/admin-edit.html', {'form': form})
def track_delete(request, id):
track = get_object_or_404(Track, id=id)
if request.method == 'POST':
track.delete()
return redirect('track_list')
# Region views
def region_list(request):
regions = Region.objects.all()
return render(request, 'region/admin-list.html', {'regions': regions})
def region_detail(request, id):
region = get_object_or_404(Region, id=id)
events = Event.objects.filter(region=region).order_by('-start_date')[:3]
stories = Story.objects.filter(region=region).order_by('-create_date')[:3]
members = Member.objects.filter(region=region)
params = {
'region': region,
'events': events,
'stories': stories,
'members': members
}
return render(request, 'region/detail.html', params)
def region_admin_detail(request, id):
region = get_object_or_404(Region, id=id)
return render(request, 'region/admin-detail.html', {'region': region})
def region_new(request):
if request.method == 'POST':
form = RegionForm(request.POST)
if form.is_valid():
region = form.save()
return redirect('region_list')
elif request.method == 'GET':
form = RegionForm()
else:
form = None
return render(request, 'region/admin-edit.html', {'form': form})
def region_edit(request, id):
region = get_object_or_404(Region, id=id)
if request.method == 'GET':
form = RegionForm(instance=region)
elif request.method == 'POST':
form = RegionForm(request.POST, instance=region)
if form.is_valid():
region = form.save()
return redirect('region_list')
else:
form = None
return render(request, 'region/admin-edit.html', {'form': form, 'region': region})
def region_delete(request, id):
region = get_object_or_404(Region, id=id)
if request.method == 'POST':
region.delete()
return redirect('region_list')
# Member views
def member_list(request):
members = Member.objects.all()
return render(request, 'member/admin-list.html', {'members': members})
def member_new(request):
if request.method == 'POST':
form = MemberForm(request.POST)
if form.is_valid():
member = form.save()
return redirect('member_list')
elif request.method == 'GET':
form = MemberForm()
else:
form = None
return render(request, 'member/admin-edit.html', {'form': form})
def member_edit(request, id):
member = get_object_or_404(Member, id=id)
if request.method == 'GET':
form = MemberForm(instance=member)
elif request.method == 'POST':
form = MemberForm(request.POST, instance=member)
if form.is_valid():
member = form.save()
return redirect('member_list')
else:
form = None
return render(request, 'member/admin-edit.html', {'form': form})
def member_delete(request, id):
member = get_object_or_404(Member, id=id)
if request.method == 'POST':
member.delete()
return redirect('member_list')
# Event views
def event_detail(request, id):
event = get_object_or_404(Event, id=id)
event.markdown = markdownify(event.long_description)
return render(request, 'event/detail.html', {'event': event})
def admin_event_list(request):
events = Event.objects.all()
return render(request, 'event/admin-list.html', {'events': events})
def admin_event_new(request):
if request.method == 'POST':
form = EventForm(request.POST)
if form.is_valid():
event = form.save()
return redirect('event_list')
elif request.method == 'GET':
form = EventForm()
else:
form = None
return render(request, 'event/admin-edit.html', {'form': form})
def event_edit(request, id):
event = get_object_or_404(Event, id=id)
if request.method == 'GET':
form = EventForm(instance=event)
elif request.method == 'POST':
form = EventForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
return redirect('event_list')
else:
form = None
return render(request, 'event/admin-edit.html', {'form': form})
def event_delete(request, id):
event = get_object_or_404(Event, id=id)
if request.method == 'POST':
event.delete()
return redirect('event_list')
def event_list(request):
events = Event.objects.all()
return render(request,'event/list.html', {'events': events})
# Story views
def story_list(request):
stories = Story.objects.all()
return render(request, 'story/admin-list.html', {'stories': stories})
def story_new(request):
if request.method == 'POST':
form = StoryForm(request.POST)
if form.is_valid():
story = form.save()
return redirect('story_list')
elif request.method == 'GET':
form = StoryForm()
else:
form = None
return render(request, 'story/admin-edit.html', {'form': form})
def story_edit(request, id):
story = get_object_or_404(Story, id=id)
if request.method == 'GET':
form = StoryForm(instance=story)
elif request.method == 'POST':
form = StoryForm(request.POST, instance=story)
if form.is_valid():
story = form.save()
return redirect('story_list')
else:
form = None
return render(request, 'story/admin-edit.html', {'form': form})
def story_delete(request, id):
story = get_object_or_404(Story, id=id)
if request.method == 'POST':
story.delete()
return redirect('story_list')
def story_detail(request, id):
story = get_object_or_404(Story, id=id)
story.markdown = markdownify(story.long_description)
return render(request, 'story/detail.html', {'story': story})
|
import json
import requests
from tqdm import tqdm
import os
def get_file_from_modac(fname, origin):
""" Downloads a file from the "Model and Data Clearning House" (MoDAC)
repository. Users should already have a MoDAC account to download the data.
Accounts can be created on modac.cancer.gov
Parameters
----------
fname : string
path on disk to save the file
origin : string
original MoDAC URL of the file
Returns
----------
string
Path to the downloaded file
"""
print('Downloading data from modac.cancer.gov, make sure you have an account first.')
total_size_in_bytes = get_dataObject_modac_filesize(origin)
modac_user, modac_token = authenticate_modac()
data = json.dumps({})
headers = {}
headers["Content-Type"] = "application/json"
headers["Authorization"] = "Bearer {0}".format(modac_token)
post_url = origin + '/download'
print("Downloading: " + post_url + " ...")
response = requests.post(post_url, data=data, headers=headers, stream=True)
if response.status_code != 200:
print("Error downloading from modac.cancer.gov")
raise Exception("Response code: {0}, Response message: {1}".format(response.status_code, response.text))
block_size = 1024 # 1 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(fname, 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
raise Exception("ERROR, something went wrong while downloading ", post_url)
print('Saved file to: ' + fname)
return fname
def register_file_to_modac(file_path, metadata, destination_path):
""" Register a file in the "Model and Data Clearning House" (MoDAC)
Parameters
----------
file_path : string
path on disk for the file to be uploaded
metadata: dictionary
dictionary of attribute/value pairs of metadata to associate with
the file in MoDaC
destination : string
The path on MoDaC in form of collection/filename
Returns
----------
integer
The returned code from the PUT request
"""
print('Registering the file {0} at MoDaC location:{1}'.format(file_path, destination_path))
register_url = "https://modac.cancer.gov/api/v2/dataObject/" + destination_path
formated_metadata = [dict([("attribute", attribute), ("value", metadata[attribute])]) for attribute in metadata.keys()]
metadata_dict = {"metadataEntries": formated_metadata}
# Based on: https://www.tutorialspoint.com/requests/requests_file_upload.htm
files = {}
files['dataObjectRegistration'] = ('attributes', json.dumps(metadata_dict), "application/json")
files["dataObject"] = (file_path, open(file_path, 'rb'))
modac_user, modac_token = authenticate_modac()
headers = {}
headers["Authorization"] = "Bearer {0}".format(modac_token)
response = requests.put(register_url, headers=headers, files=files)
if response.status_code != 200:
print(response.headers)
print(response.text)
print("Error registering file to modac.cancer.gov")
raise Exception("Response code: {0}, Response message: {1}".format(response.status_code, response.text))
print(response.text, response.status_code)
return response.status_code
def authenticate_modac(generate_token=False):
"""
Authenticates a user on modac.cancer.gov
Parameters
----------
generate_token : Bool
Either generate a new token, or read saved token if it exists
Returns
----------
tuple(string,string)
tuple with the modac credentials
"""
from os.path import expanduser
home = expanduser("~")
modac_token_dir = os.path.abspath(os.path.join(home, ".nci-modac"))
modac_token_file = "credentials.json"
user_attr = "modac_user"
token_attr = "modac_token"
modac_token_path = os.path.join(modac_token_dir, modac_token_file)
credentials_dic = {}
if not generate_token and os.path.exists(modac_token_path):
with open(modac_token_path) as f:
credentials_dic = json.load(f)
else:
# Get credentials
modac_user = input("MoDaC Username: ")
import getpass
modac_pass = getpass.getpass("MoDaC Password: ")
# Generate token
auth = (modac_user, modac_pass)
auth_url = 'https://modac.cancer.gov/api/authenticate'
print("Authenticating " + modac_user + " ...")
response = requests.get(auth_url, auth=auth, stream=True)
if response.status_code != 200:
print("Error authenticating modac user:{0}", modac_user)
raise Exception("Response code: {0}, Response message: {1}".format(response.status_code, response.text))
else:
token = response.text
if not os.path.exists(modac_token_path):
save_question = "Save MoDaC token in {0}".format(modac_token_path)
save_token = query_yes_no(save_question)
else:
save_token = True
if save_token:
if not os.path.isdir(modac_token_dir):
os.mkdir(modac_token_dir)
credentials_dic[user_attr] = modac_user
credentials_dic[token_attr] = token
with open(modac_token_path, "w") as outfile:
json.dump(credentials_dic, outfile, indent=4)
return (credentials_dic[user_attr], credentials_dic[token_attr])
def query_yes_no(question, default="yes"):
"""
Ask a yes/no question via raw_input() and return their answer.
Parameters
----------
question: string
string that is presented to the user.
default: boolean
The presumed boolean answer if the user just hits <Enter>.
Returns
----------
boolean
True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def get_dataObject_modac_filesize(data_object_path):
"""
Return the file size in bytes for a modac file
Parameters
----------
data_object_path : string
The path of the file on MoDAC
Returns
----------
integer
file size in bytes
"""
self_dic = get_dataObject_modac_meta(data_object_path)
if "source_file_size" in self_dic.keys():
return int(self_dic["source_file_size"])
else:
return None
def get_dataObject_modac_md5sum(data_object_path):
"""
Return the md5sum for a modac file
Parameters
----------
data_object_path : string
The path of the file on MoDAC
Returns
----------
string
The md5sum of the file
"""
self_dic = get_dataObject_modac_meta(data_object_path)
if "checksum" in self_dic.keys():
return self_dic["checksum"]
else:
return None
def get_dataObject_modac_meta(data_object_path):
"""
Return the self metadata values for a file (data_object)
Parameters
----------
data_object_path : string
The path of the file on MoDAC
Returns
----------
dictionary
Dictonary of all metadata for the file in MoDAC
"""
# data_object_path = encode_path(data_object_path)
modac_user, modac_token = authenticate_modac()
headers = {}
headers["Authorization"] = "Bearer {0}".format(modac_token)
get_response = requests.get(data_object_path, headers=headers)
if get_response.status_code != 200:
print("Error downloading from modac.cancer.gov", data_object_path)
raise Exception("Response code: {0}, Response message: {1}".format(get_response.status_code, get_response.text))
metadata_dic = json.loads(get_response.text)
self_metadata = metadata_dic['metadataEntries']['selfMetadataEntries']['systemMetadataEntries']
self_dic = {}
for pair in self_metadata:
self_dic[pair['attribute']] = pair['value']
return self_dic
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--authenticate', action='store_true',
help='Authenticate MoDaC user and create token')
args = parser.parse_args()
if args.authenticate:
authenticate_modac(generate_token=True)
|
<gh_stars>0
# -*- coding: utf-8 -*-
import sys
import numpy as np
from numpy import pi, sqrt, exp, sin, cos, tan, log, log10
import scipy as sp
import scipy.integrate
import h5py
from aux import *
##
## Command line options
##
if len(sys.argv) == 5:
## Alpha parameter
alpha = float(sys.argv[1])
print("alpha = %g" % alpha)
## Turbulent viscosity power index
beta = float(sys.argv[2])
print("beta = %g" % beta)
## Initial time [yr]
t_ini = float(sys.argv[3]) * const.yr
print("t_ini = %g [yr]" % (t_ini/const.yr))
## Initial orbit [AU]
a_ini = float(sys.argv[4]) * const.AU
print("a_ini = %g [AU]" % (a_ini/const.AU))
else:
print("Use\n\t%s <alpha> <beta> <t_ini [yr]> <a_ini [AU]>" % sys.argv[0])
print("Example:\n\t%s 1e-3 1.5 1e9 0.5" % sys.argv[0])
sys.exit(1)
##
## Parameters connected to the star and planet
##
## Final age
t_fin = t_age
## Final orbit
#a_fin = 0.01*const.AU
a_fin = a_ref
#a_fin = 0.1*const.AU
## Initial planetary mass
M_p_ini = const.M_jup
##
## Parameters connected to the gas
##
## Inner radius of the disk
r_0_ini = ( 1 + (M_p_ini/(3*M_s))**(1/3) ) * a_ini
print("ini: r_0 = %.2e [cm] = %.2e [AU] = %.2e a" % (r_0_ini, r_0_ini/const.AU, r_0_ini/a_ini))
## Keplerian orbital frequency of the planet
Omega_0_ini = Omega_K(a_ini)
print("ini: Omega = %.2e [rad/s] = 2pi/(%.2e [yr])" % (Omega_0_ini, 2*pi/Omega_0_ini/const.yr))
## Mass loss rate at the given orbit
dotM_ini = dotM(t_ini, a_ini)
print("ini: dotM = %.2e [g/s] = %.2e M_sol/[yr]" % (dotM_ini, dotM_ini/(const.M_sol/const.yr)))
##
Sigma_0_ini = dotM_ini/(2*pi*r_0_ini**2*Omega_0_ini)
print("ini: Sigma_0 = %.2e [g/cm^2]" % Sigma_0_ini)
## Disk semi-thickness at its inner radius
H_0_ini = cs(T)/Omega_0_ini
print("ini: H_0 = %.2e [cm] = %.2e [AU] = %.2e a" % (H_0_ini, H_0_ini/const.AU, H_0_ini/a_ini))
## Relative disk semi-thickness
h_ini = H_0_ini/r_0_ini
print("ini: h = %g" % h_ini)
##
## The model
##
## Mass flux
def flux(q, mul, h, xi):
sigma = q[2:]
## Torque
tau = mul * xi*x_**0.5 / (x_ - xi)**2 * (x_**1.5 - xi**1.5) / (x_**0.5 - xi**0.5)**3
## Diffusion
n = alpha*h**2 * np.asarray(x)**beta
## The flux
f = np.concatenate(([1], \
- 3 * ( sqrt(x[1:])*n[1:]*sigma[1:] - sqrt(x[:-1])*n[:-1]*sigma[:-1] ) \
/ (sqrt(x_[1:-1])*dx_[1:-1])
+ tau[1:-1] * 0.5*(sigma[1:] + sigma[:-1]) , \
[None]))
## Right boundary condition
f[-1] = x_[-2]*f[-2]/x_[-1]
return f
## R.h.s.
def dotq(t, q):
M_p = q[0]
a = q[1]
sigma = q[2:]
r_0 = ( 1 + (M_p/(3*M_s))**(1/3) ) * a
xi = a/r_0
Omega_0 = Omega_K(r_0)
## Orbital torque factor (see 1980ApJ...241..425G and 2006RPPh...69..119P)
C_0 = 2.82
##
## Auxes
dotM_p = - dotM(t_ini + t, a)
dota_int = x / (x - xi)**2 * (x**1.5 - xi**1.5) / (x**0.5 - xi**0.5)**3 * sigma
dota = sp.integrate.simps(dota_int, x, even='avg')
dota *= 2*a * C_0/pi * M_p*dotM_p/M_s**2 * sqrt(xi)
##
## Mass transfer
mul = 2*C_0/pi * (M_p/M_s)**2
h = cs(T)/Omega_0 / r_0
f = flux(q, mul, h, xi)
dlogdotM_p = dlogdotM(t_ini + t, a)
Q = dlogdotM_p[0] + (dlogdotM_p[1] - 0.5/a) * dota
dotsigma = - Omega_0 * (x_[1:]*f[1:] - x_[:-1]*f[:-1]) / (x*dx) - Q*sigma
return np.concatenate(([dotM_p, dota], dotsigma))
## Event marker
def stop_marker(t, q):
return q[1] - a_fin
stop_marker.terminal = True
stop_marker.direction = -1
## Positions of the nodes
#x_ = np.logspace(0, log10(10000*const.AU/r_0_ini), 1601)
x_ = np.logspace(0, log10(1e4*const.AU/r_0_ini), 1001)
print("%g <= x_ <= %g" % (x_[0], x_[-1]))
## Positions of the centers
x = 0.5*(x_[1:] + x_[:-1])
## Grid steps
dx = x_[1:] - x_[:-1]
dx_ = np.concatenate(([None], 0.5*(x_[2:] - x_[:-2]), [None]))
## Jacobian sparsity matrix
jac_sparsity = np.zeros((2+x.size, 2+x.size))
## dM/da
jac_sparsity[0,1] = 1
## da/dsigma
jac_sparsity[1,2:] = 1
## dsigma/dM
jac_sparsity[2:,0] = 1
## dsigma/da
jac_sparsity[2:,1] = 1
## dsigma/dsigma
jac_sparsity[2:,2:] = sp.sparse.spdiags(np.ones((3, x.size)), [-1, 0, 1], x.size, x.size).toarray()
## Computational time grid (it's expandable)
_t = np.empty(1)
## Field's grid (it's expandable)
q = np.empty((1, 2+x.size))
## Times to snapshot
tmp = const.yr * np.array([1e7, 1e8, 1e9, 3e9, 4.1e9])
t_ss = np.concatenate([tmp[tmp > t_ini], [t_fin]])
## Indexes in the 't' array corresponding to the snapshot times
j_ss = np.zeros_like(t_ss, dtype=np.int)
## Initial state
q[0,0] = M_p_ini
q[0,1] = a_ini
q[0,2:] = np.zeros_like(x)
## Initial time step
dt_ini = 1/Omega_0_ini
print("ini: dt_ini = %.2e [yr]" % (dt_ini/const.yr))
## Logarithmic time step
dlogt = 0.025
## Solve the model
print("Compute mass transfer...")
## Initial time point
_t[0] = 0
## Index of the current time point
j = 0
## Index of the current snapshot time point
jt_ss = 0
## Flag for saving
to_save = False
while True:
print("%d: t_ini + %e [yr] = %e [yr]" % (j, _t[j]/const.yr, (t_ini + _t[j])/const.yr))
## Get the end time point for the current time step
_t_next = dt_ini * 10**(dlogt*j)
## If it is the time to save?
if t_ini + _t_next >= t_ss[jt_ss]:
_t_next = t_ss[jt_ss] - t_ini
to_save = True
## Expand the time grid to the next time point
_t = np.append(_t, [_t_next])
## Expand the field grid to the next time point
q = np.append(q, [np.empty_like(q[0])], axis=0)
## Advance to the next time point
sol = sp.integrate.solve_ivp(dotq, (_t[j], _t[j+1]), q[j], t_eval=[_t[j], _t[j+1]],
method='BDF', events=stop_marker, dense_output=True,
jac_sparsity=jac_sparsity,
atol=1e-6, rtol=1e-3)
if sol.status == -1: ## Error occured
print("\tERROR: sol.status=%d, '%s'" % (sol.status, sol.message))
break
if sol.status == 1: ## Termination event occured
## Set current time to an event time
_t[j+1] = sol.t_events[0][0]
q[j+1] = sol.sol(_t[j+1])
print("\tEvent: t_ini + %e [yr] = %e [yr]" % (_t[j+1]/const.yr, (t_ini + _t[j+1])/const.yr))
print("\t\ta = %g [AU]" % (q[j+1,1]/const.AU))
## Snapshot this state
j_ss[jt_ss] = j+1
jt_ss += 1
break
q[j+1] = sol.y[:,1]
print("\ta = %g [AU]" % (q[j+1,1]/const.AU))
if to_save:
print("\tSave: t_ss[%d] = %e [yr]" % (jt_ss, t_ss[jt_ss]/const.yr))
j_ss[jt_ss] = j+1
jt_ss += 1
to_save = False
## If we finished?
if t_ini + _t[j+1] >= t_fin:
print("Finished!")
break
## Prepare to the next time step
j += 1
t = t_ini + _t
print("... done.")
##
## Finalize
##
M_p = q[:,0]
a = q[:,1]
sigma = q[:,2:]
r_0 = ( 1 + (M_p/(3*M_s))**(1/3) ) * a
Omega_0 = Omega_K(r_0)
Sigma_0 = dotM(t, a) / (2*pi*r_0**2*Omega_0)
Sigma = np.multiply(Sigma_0, sigma.T).T
r = np.array([ r_0_*x for r_0_ in r_0 ])
## Gap width at the semi-height level
Sigma_max = Sigma.max(axis=1)
H_gap = np.zeros_like(t)
for j in range(t.size):
i = np.where(Sigma[j] >= 0.5*Sigma_max[j])[0][0] - 1
if i >= 1:
H_gap[j] = (r[j,i+1] - r[j,i]) / (Sigma[j,i+1] - Sigma[j,i]) * (0.5*Sigma_max[j] - Sigma[j,i]) + r[j,i] - r_0[j]
#print("\t", j, i, Sigma_max[j], H_gap[j]/a[j])
##
## Plot
##
import matplotlib as mpl
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=2, ncols=2)
dashes = [[2,3], [6,3,2,3], [7,3], []]
color = ['#2a608d', '#e77300', '#4daf4a']
ax_ = ax[0,0]
#for j in range(len(t_ss)):
# ax_.loglog(x, Sigma[j_ss[j],:], dashes=dashes[j])
ax_.semilogx(x, Sigma[-1,:], dashes=dashes[-1])
ax_.set_xlabel(r"$x$")
ax_.set_xlim(xmax=1e3)
ax_.set_ylabel(r"$\Sigma$ [g$/$cm$^2$]")
#ax_.set_ylim(1e-6, 1e-1)
ax_ = ax[1,0]
ax_.semilogx(r[-1]/const.AU, Sigma[-1,:], dashes=dashes[-1])
#ax_.loglog(r[-1]/const.AU, Sigma[-1,:], dashes=dashes[-1])
ax_.set_xlabel(r"$r$ [AU]")
#ax_.set_xlim(xmax=1e3)
ax_.set_ylabel(r"$\Sigma$ [g$/$cm$^2$]")
#ax_.set_ylim(1e-6, 1e-1)
ax_ = ax[0,1]
ax_.axhline(a_fin/const.AU, c='k', ls=':')
ax_.plot(t/const.yr, a/const.AU)
ax_.set_xlabel(r"$t$ [yr]")
ax_.set_ylabel(r"$a$ [AU]")
ax_ = ax[1,1]
#ax_.semilogx(t/const.yr, r_0/a)
#ax_.set_xlabel(r"$t$ [yr]")
#ax_.set_ylabel(r"$r_0/a$")
ax_.semilogx(t/const.yr, H_gap/a)
ax_.set_xlabel(r"$t$ [yr]")
ax_.set_ylabel(r"$H_\mathrm{gap}/a$")
plt.tight_layout()
plt.show()
#sys.exit(0)
##
## Write
##
with h5py.File('migration_%g_%g_%g_%.4f.h5' % (alpha, beta, t_ini/const.yr, a_ini/const.AU), 'w') as f:
f.create_dataset('M_s', data=M_s) .attrs['comment'] = "Stellar mass [g]"
f.create_dataset('T', data=T) .attrs['comment'] = "Gas temperature [K]"
f.create_dataset('cs', data=cs(T)) .attrs['comment'] = "Sound velocity [cm s-1]"
f.create_dataset('alpha', data=alpha) .attrs['comment'] = "Alpha-parameter"
f.create_dataset('beta', data=beta) .attrs['comment'] = "Power index for radial dependence of the viscosity coefficient"
f.create_dataset('x', data=x) .attrs['comment'] = "Radial coordinate grid, in the units of 'r_0'."
f.create_dataset('t', data=t) .attrs['comment'] = "Time grid [s]"
f.create_dataset('M_p', data=M_p) .attrs['comment'] = "Planet mass [g]"
f.create_dataset('a', data=a) .attrs['comment'] = "Planetary orbit radius [cm]"
f.create_dataset('r_0', data=r_0) .attrs['comment'] = "Internal radius of the disk [cm]"
f.create_dataset('H_gap', data=H_gap) .attrs['comment'] = "Gap width at the semi-height level [cm]"
f.create_dataset('Sigma', data=Sigma) .attrs['comment'] = "Surface density at the final time [g cm-2]"
f.create_dataset('j_ss', data=j_ss) .attrs['comment'] = "Indexes in the 't' array corresponding to the snapshot times"
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from line import Line
from camera import Camera
from laneimageprocessor import LaneImageProcessor
debugMode = True
if __name__ == "__main__":
# create camera object to handle the dash cam
camera = Camera()
# create lane image processor object
imageProcessor = LaneImageProcessor(camera)
# calibrate the Camera
images = glob.glob('camera_cal/calibration*.jpg')
#calibrated = camera.calibrate(images, debugMode)
calibrated = camera.calibrate(images)
if calibrated == False:
print("Camera calibration not successful")
sys.exit()
if debugMode == True:
# test calibration by showing doing an undistortion
distortedImage = mpimg.imread(images[0])
undistortedImage = camera.undistort(distortedImage)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.imshow(distortedImage)
ax1.set_title("Distorted image")
ax2.imshow(undistortedImage)
ax2.set_title("Undistorted image")
plt.show()
# start by using a static test image to implement pipeline
testimage = mpimg.imread("test_images/straight_lines1.jpg")
#testimage = mpimg.imread("test_images/test2.jpg")
testimages = glob.glob('test_images/*.jpg')
# undistort one of the given testimages
if debugMode == True:
test2 = mpimg.imread('test_images/test2.jpg')
test2undist = camera.undistort(test2)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.imshow(test2)
ax1.set_title("Distorted image")
ax2.imshow(test2undist)
ax2.set_title("Undistorted image")
plt.show()
"""
# sample images
for curImage in testimages:
print(curImage)
testimage = mpimg.imread(curImage)
debug_image = imageProcessor.process(testimage, debugMode, True, True)
plt.imshow(debug_image)
plt.show()
# imageProcessor.process(testimage, debugMode, True, debugFOV=True)
"""
"""
debugimages = glob.glob('output_images/debug/*.png')
for debug in debugimages:
debImg = mpimg.imread(debug)
debImg = debImg[:,:,:3]
plt.imshow(debImg)
plt.show()
debug_image = imageProcessor.process(debImg, debugMode, True, True)
plt.imshow(debug_image)
plt.show()
"""
imageProcessor.reset(camera)
test_output1 = 'output_videos/project_video_output.mp4'
clip1 = VideoFileClip('project_video.mp4')
test_clip1 = clip1.fl_image(imageProcessor.process)
test_clip1.write_videofile(test_output1, audio=False)
"""
imageProcessor.reset(camera)
test_output2 = 'output_videos/challenge_video_output.mp4'
clip2 = VideoFileClip('challenge_video.mp4')
test_clip2 = clip2.fl_image(imageProcessor.process)
test_clip2.write_videofile(test_output2, audio=False)
"""
"""
imageProcessor.reset(camera)
test_output3 = 'output_videos/harder_challenge_video_output.mp4'
clip3 = VideoFileClip('harder_challenge_video.mp4')
test_clip3 = clip3.fl_image(imageProcessor.process)
test_clip3.write_videofile(test_output3, audio=False)
"""
|
<reponame>jld23/python-dlpy<gh_stars>1-10
from dlpy.speech import *
import unittest
import swat
import swat.utils.testing as tm
import tempfile
import os
class TestSpeechUtils(unittest.TestCase):
def setUp(self):
self.data_dir_local = None
if "DLPY_DATA_DIR_LOCAL" in os.environ:
self.data_dir_local = os.environ.get("DLPY_DATA_DIR_LOCAL")
def test_read_audio_1(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with self.assertRaises(wave.Error):
read_audio(os.path.join(self.data_dir_local, "sample_acoustic_model.sashdat"))
with self.assertRaises(wave.Error):
read_audio(os.path.join(self.data_dir_local, "sample_language_model.csv"))
def test_read_audio_2(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with self.assertRaises(FileNotFoundError):
read_audio(os.path.join(self.data_dir_local, "nonexistent.wav"))
def test_read_audio_3(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
self.assertIsInstance(wave_reader, wave.Wave_read)
self.assertIsInstance(wave_params, tuple)
self.assertIsNotNone(wave_reader)
self.assertIsNotNone(wave_params)
wave_reader.close()
def test_check_framerate(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_8khz.wav"))
self.assertFalse(check_framerate(wave_params, 16000))
wave_reader.close()
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
self.assertTrue(check_framerate(wave_params, 16000))
wave_reader.close()
def test_check_sampwidth(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_8bit_16khz.wav"))
self.assertFalse(check_sampwidth(wave_params, 2))
wave_reader.close()
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
self.assertTrue(check_sampwidth(wave_params, 2))
wave_reader.close()
def test_check_stereo(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "recording_1.wav"))
self.assertFalse(check_stereo(wave_params))
wave_reader.close()
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
self.assertTrue(check_stereo(wave_params))
wave_reader.close()
def test_convert_framerate_1(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
fragment = wave_reader.readframes(1000)
# convert from 16k to 16k
new_fragment = convert_framerate(fragment, wave_params.sampwidth, wave_params.nchannels,
wave_params.framerate, wave_params.framerate)
self.assertEqual(fragment, new_fragment)
wave_reader.close()
def test_convert_framerate_2(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
fragment = wave_reader.readframes(1000)
# convert from 16k to 8k
new_fragment = convert_framerate(fragment, wave_params.sampwidth, wave_params.nchannels,
wave_params.framerate, wave_params.framerate // 2)
self.assertEqual(len(fragment) / 2, len(new_fragment))
# convert from 16k to 32k
new_fragment = convert_framerate(fragment, wave_params.sampwidth, wave_params.nchannels,
wave_params.framerate, wave_params.framerate * 2)
self.assertAlmostEqual(len(fragment) / len(new_fragment), 0.5, 2)
wave_reader.close()
def test_convert_sampwidth_1(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
fragment = wave_reader.readframes(1000)
# convert from 16 bit to 16 bit
new_fragment = convert_sampwidth(fragment, wave_params.sampwidth, wave_params.sampwidth)
self.assertEqual(fragment, new_fragment)
wave_reader.close()
def test_convert_sampwidth_2(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_8bit_16khz.wav"))
fragment = wave_reader.readframes(1000)
# convert from 8 bit to 16 bit
new_fragment = convert_sampwidth(fragment, wave_params.sampwidth, wave_params.sampwidth * 2)
self.assertEqual(len(fragment), 0.5 * len(new_fragment))
wave_reader.close()
def test_convert_sampwidth_3(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
fragment = wave_reader.readframes(1000)
# convert from 16 bit to 8 bit
new_fragment = convert_sampwidth(fragment, wave_params.sampwidth, wave_params.sampwidth // 2)
self.assertEqual(len(fragment), len(new_fragment) * 2)
wave_reader.close()
def test_convert_stereo_to_mono(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
wave_reader, wave_params = read_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"))
self.assertTrue(check_stereo(wave_params))
fragment = wave_reader.readframes(1000)
new_fragment = convert_stereo_to_mono(fragment, wave_params.sampwidth)
self.assertEqual(len(fragment), len(new_fragment) * 2)
wave_reader.close()
def test_segment_audio_1(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with tempfile.TemporaryDirectory() as temp_dir:
data_path_after_caslib = "test/"
with self.assertRaises(DLPyError):
segment_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"),
temp_dir, data_path_after_caslib, 40, 16000, 2)
def test_segment_audio_2(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with tempfile.TemporaryDirectory() as temp_dir:
data_path_after_caslib = "test/"
listing_path_after_caslib, listing_path_local, segment_path_after_caslib_list, segment_path_local_list = \
segment_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"),
temp_dir, data_path_after_caslib, 20, 16000, 2)
self.assertTrue(os.path.exists(listing_path_local))
with open(listing_path_local, "r") as listing_file:
lines = listing_file.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(segment_path_after_caslib_list), 1)
self.assertEqual(len(segment_path_local_list), 1)
clean_audio(listing_path_local, segment_path_local_list)
listing_path_after_caslib, listing_path_local, segment_path_after_caslib_list, segment_path_local_list = \
segment_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"),
temp_dir, data_path_after_caslib, 2, 16000, 2)
self.assertTrue(os.path.exists(listing_path_local))
with open(listing_path_local, "r") as listing_file:
lines = listing_file.readlines()
self.assertEqual(len(lines), 4)
self.assertEqual(len(segment_path_after_caslib_list), 4)
self.assertEqual(len(segment_path_local_list), 4)
clean_audio(listing_path_local, segment_path_local_list)
def test_clean_audio(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir_local is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with tempfile.TemporaryDirectory() as temp_dir:
data_path_after_caslib = "test/"
listing_path_after_caslib, listing_path_local, segment_path_after_caslib_list, segment_path_local_list = \
segment_audio(os.path.join(self.data_dir_local, "sample_16bit_16khz.wav"),
temp_dir, data_path_after_caslib, 2, 16000, 2)
self.assertTrue(os.path.exists(listing_path_local))
for segment_path_local in segment_path_local_list:
self.assertTrue(os.path.exists(segment_path_local))
clean_audio(listing_path_local, segment_path_local_list)
for segment_path_local in segment_path_local_list:
self.assertFalse(os.path.exists(segment_path_local))
class TestSpeechToTextInit(unittest.TestCase):
conn = None
server_type = None
server_sep = None
data_dir = None
local_dir = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
self.conn = swat.CAS()
self.server_type = tm.get_cas_host_type(self.conn)
self.server_sep = "\\"
if self.server_type.startswith("lin") or self.server_type.startswith("osx"):
self.server_sep = "/"
if "DLPY_DATA_DIR" in os.environ:
self.data_dir = os.environ.get("DLPY_DATA_DIR")
if self.data_dir.endswith(self.server_sep):
self.data_dir = self.data_dir[:-1]
self.data_dir += self.server_sep
if "DLPY_DATA_DIR_LOCAL" in os.environ:
self.local_dir = os.environ.get("DLPY_DATA_DIR_LOCAL")
def test_init_1(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
speech = Speech(self.conn, self.data_dir, self.local_dir)
action_set_list = self.conn.actionSetInfo().setinfo["actionset"].tolist()
self.assertTrue("audio" in action_set_list)
self.assertTrue("deepLearn" in action_set_list)
self.assertTrue("langModel" in action_set_list)
self.assertIsNone(speech.acoustic_model)
self.assertIsNone(speech.language_model_caslib)
def test_init_2(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
with self.assertRaises(DLPyError):
Speech(self.conn, self.data_dir, os.path.join(self.local_dir, "nonexistent"))
def test_init_3(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
speech = Speech(self.conn, self.data_dir, self.local_dir,
self.data_dir + "sample_acoustic_model.sashdat",
self.data_dir + "sample_language_model.csv")
self.assertIsNotNone(speech.acoustic_model)
self.assertIsNotNone(speech.language_model_caslib)
table_list = self.conn.tableInfo(caslib=speech.language_model_caslib).TableInfo["Name"].tolist()
self.assertTrue(speech.language_model_name.upper() in table_list)
def test_load_acoustic_model(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
speech = Speech(self.conn, self.data_dir, self.local_dir)
speech.load_acoustic_model(self.data_dir + "sample_acoustic_model.sashdat")
self.assertIsNotNone(speech.acoustic_model)
self.assertIsNotNone(speech.acoustic_model.model_name)
self.assertIsNotNone(speech.acoustic_model.model_table)
self.assertIsNotNone(speech.acoustic_model.model_weights)
def test_load_language_model_1(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
speech = Speech(self.conn, self.data_dir, self.local_dir)
self.assertIsNone(speech.language_model_caslib)
speech.load_language_model(self.data_dir + "sample_language_model.csv")
self.assertIsNotNone(speech.language_model_caslib)
table_list = self.conn.tableInfo(caslib=speech.language_model_caslib).TableInfo["Name"].tolist()
self.assertTrue(speech.language_model_name.upper() in table_list)
def test_load_language_model_2(self):
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
speech = Speech(self.conn, self.data_dir, self.local_dir)
self.assertIsNone(speech.language_model_caslib)
with self.assertRaises(DLPyError):
speech.load_language_model(self.data_dir + "language_model_nonexistent.csv")
self.assertIsNone(speech.language_model_caslib)
def tearDown(self):
try:
self.conn.terminate()
except swat.SWATError:
pass
del self.conn
swat.reset_option()
class TestSpeechToText(unittest.TestCase):
conn = None
server_type = None
server_sep = None
data_dir = None
local_dir = None
speech = None
@classmethod
def setUpClass(cls):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
cls.conn = swat.CAS()
cls.server_type = tm.get_cas_host_type(cls.conn)
cls.server_sep = "\\"
if cls.server_type.startswith("lin") or cls.server_type.startswith("osx"):
cls.server_sep = "/"
if "DLPY_DATA_DIR" in os.environ:
cls.data_dir = os.environ.get("DLPY_DATA_DIR")
if cls.data_dir.endswith(cls.server_sep):
cls.data_dir = cls.data_dir[:-1]
cls.data_dir += cls.server_sep
if "DLPY_DATA_DIR_LOCAL" in os.environ:
cls.local_dir = os.environ.get("DLPY_DATA_DIR_LOCAL")
if cls.data_dir is not None:
cls.speech = Speech(cls.conn, cls.data_dir, cls.local_dir,
cls.data_dir + "sample_acoustic_model.sashdat",
cls.data_dir + "sample_language_model.csv")
def test_transcribe_1(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
try:
result = self.speech.transcribe(os.path.join(self.local_dir, "sample_16bit_16khz.wav"))
self.assertIsInstance(result, str)
except DLPyError as err:
self.assertTrue("Cannot load the audio files." in str(err))
def test_transcribe_2(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
try:
result = self.speech.transcribe(os.path.join(self.local_dir, "sample_8bit_16khz.wav"))
self.assertIsInstance(result, str)
except DLPyError as err:
self.assertTrue("Cannot load the audio files." in str(err))
def test_transcribe_3(self):
try:
import wave
except ImportError:
unittest.TestCase.skipTest(self, "wave is not found in the libraries.")
try:
import audioop
except ImportError:
unittest.TestCase.skipTest(self, "audioop is not found in the libraries.")
if self.data_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables.")
if self.local_dir is None:
unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_LOCAL is not set in the environment variables.")
try:
result = self.speech.transcribe(os.path.join(self.local_dir, "sample_16bit_44khz.wav"))
self.assertIsInstance(result, str)
except DLPyError as err:
self.assertTrue("Cannot load the audio files." in str(err))
@classmethod
def tearDownClass(cls):
try:
cls.conn.terminate()
except swat.SWATError:
pass
del cls.conn
swat.reset_option()
|
# -----------------------------------------------------.
# 線形状をポリゴンメッシュのエッジに変換.
#
# @title \en Convert line shape to polygon mesh edges \enden
# @title \ja 線形状をポリゴンメッシュのエッジに変換 \endja
# -----------------------------------------------------.
import numpy
import math
scene = xshade.scene()
#---------------------------------------.
# ゼロチェック.
#---------------------------------------.
def isZero (v):
minV = 1e-5
if v > -minV and v < minV:
return True
return False
#---------------------------------------.
# ベジェ上の位置を計算.
#---------------------------------------.
def getBezierPoint (v1Pos, v1Out, v2In, v2Pos, fPos):
fMin = 1e-6
rPos = [0.0, 0.0, 0.0]
cPos = []
cPos.append([v1Pos[0], v1Pos[1], v1Pos[2]])
cPos.append([v1Out[0], v1Out[1], v1Out[2]])
cPos.append([v2In[0], v2In[1], v2In[2]])
cPos.append([v2Pos[0], v2Pos[1], v2Pos[2]])
fPos2 = float(fPos)
fPos2 = max(0.0, fPos2)
fPos2 = min(1.0, fPos2)
if isZero(fPos2):
rPos = cPos[0]
return rPos
if isZero(fPos2 - 1.0):
rPos = cPos[3]
return rPos
# ベジェ曲線の計算.
t = fPos2
t2 = 1.0 - t
t2d = t2 * t2
td = t * t
b1 = t2d * t2
b2 = 3.0 * t * t2d
b3 = 3.0 * td * t2
b4 = t * td
for i in range(3):
rPos[i] = b1 * cPos[0][i] + b2 * cPos[1][i] + b3 * cPos[2][i] + b4 * cPos[3][i]
return rPos
#---------------------------------------.
# 線形状を直線の集まりに分解.
# @param[in] shape 対象形状.
# @param[in] lineDivCou ラインの全体の分割数.
# @return ポイントの配列.
#---------------------------------------.
def getLinePoints (shape, lineDivCou):
vCou = shape.total_number_of_control_points
vList = []
if shape.type != 4 or vCou < 2: # 線形状でない場合.
return vList
divCou = lineDivCou / vCou
if divCou < 4:
divCou = 4
divD = 1.0 / float(divCou)
# ベジェをポイントで保持.
for i in range(vCou):
if shape.closed == False and (i + 1 >= vCou):
break
p1 = shape.control_point(i)
p2 = shape.control_point((i + 1) % vCou)
dPos = 0.0
for j in range(divCou + 1):
p = getBezierPoint(p1.position, p1.out_handle, p2.in_handle, p2.position, dPos)
if (i == 0) or (i != 0 and j > 0):
vList.append(p)
dPos += divD
return vList
#---------------------------------------.
# ポイントのみで構成された配列情報から、等間隔になるように再計算.
# @param[in] vList ポイントの配列.
# @param[in] divCou 新しいラインの分割数.
# @return ポイントの配列.
#---------------------------------------.
def recalcLinePoints (vList, divCou):
# numpyの形式に配列に格納し直す.
vListLen = len(vList)
if vListLen < 2:
return []
posA = []
for i in range(vListLen):
posA.append(numpy.array([vList[i][0], vList[i][1], vList[i][2]]))
# ラインの長さを計算.
allLen = 0.0
lenList = []
for i in range(vListLen - 1):
vLen = numpy.linalg.norm(posA[i + 1] - posA[i])
lenList.append(vLen)
allLen += vLen
dLen = allLen / (divCou - 1.0)
newPosA = []
newPosA.append([posA[0][0], posA[0][1], posA[0][2]])
dPos = 0.0
for i in range(vListLen - 1):
len1 = lenList[i]
if dPos + len1 < dLen:
dPos += len1
continue
dPos2 = 0.0
while dPos2 < len1:
dd = (dPos2 + (dLen - dPos)) / len1
p = (posA[i + 1] - posA[i]) * dd + posA[i]
newPosA.append([p[0], p[1], p[2]])
if len(newPosA) >= divCou - 1:
break
dd2 = dLen - dPos
if dPos2 + dd2 + dLen > len1:
dPos = len1 - (dPos2 + dd2)
break
dPos2 += dd2
dPos = 0.0
if len(newPosA) >= divCou - 1:
break
newPosA.append([posA[-1][0], posA[-1][1], posA[-1][2]])
return newPosA
# ---------------------------------------------.
shape = scene.active_shape()
if shape.type != 4 or shape.total_number_of_control_points < 2:
xshade.show_message_box('ポイント数が2以上の線形状を選択してください。', False)
else:
# ダイアログボックスの作成と表示.
dlg = xshade.create_dialog_with_uuid('05783960-e90b-4a70-9488-daefa220447d')
div_id = dlg.append_int('分割数')
dlg.set_value(div_id, 10)
dlg.set_default_value(div_id, 10)
dlg.append_default_button()
if dlg.ask("線形状をポリゴンメッシュのエッジに変換"):
rDivCou = dlg.get_value(div_id) + 1
if rDivCou < 2:
rDivCou = 2
# 線形状をポイントで分割.
divCou = min(40, rDivCou * 4)
vList = getLinePoints(shape, divCou)
vList2 = recalcLinePoints(vList, rDivCou)
if shape.closed:
vList2.pop()
# ポリゴンメッシュとして配置.
scene.begin_creating()
scene.begin_polygon_mesh(None)
vCou = len(vList2)
for p in vList2:
scene.append_polygon_mesh_vertex(p)
vCou2 = vCou
if shape.closed == False:
vCou2 = vCou2 - 1
for i in range(vCou2):
i0 = i
i1 = (i + 1) % vCou
scene.append_polygon_mesh_edge(i0, i1)
scene.end_polygon_mesh()
scene.end_creating()
|
import json
from typing import Union
from cryptojwt.jwk.jwk import key_from_jwk_dict
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from spid_cie_oidc.provider.settings import (
OIDCFED_DEFAULT_PROVIDER_PROFILE,
OIDCFED_PROVIDER_PROFILES
)
from spid_cie_oidc.relying_party.settings import (
RP_DEFAULT_PROVIDER_PROFILES,
RP_PROVIDER_PROFILES
)
from .jwks import serialize_rsa_key
from .settings import (
ENCRYPTION_ALG_VALUES_SUPPORTED,
ENCRYPTION_ENC_SUPPORTED,
ENTITY_TYPES,
SIGNING_ALG_VALUES_SUPPORTED
)
def validate_public_jwks(values: Union[dict, list]):
if isinstance(values, dict):
values = [values]
try:
for jwk_dict in values:
_k = key_from_jwk_dict(jwk_dict)
if _k.private_key():
_pub = serialize_rsa_key(_k.public_key())
raise ValidationError(
f"This JWK is is private {json.dumps(jwk_dict)}. "
f"It MUST be public instead, like this: {json.dumps([_pub])}."
)
except Exception as e:
raise ValidationError(f"Not valid: {e}")
def validate_metadata_algs(metadata: dict):
amap = dict(
id_token_signing_alg_values_supported = SIGNING_ALG_VALUES_SUPPORTED,
id_token_encryption_alg_values_supported = ENCRYPTION_ALG_VALUES_SUPPORTED,
id_token_encryption_enc_values_supported = ENCRYPTION_ENC_SUPPORTED,
token_endpoint_auth_signing_alg_values_supported = SIGNING_ALG_VALUES_SUPPORTED,
userinfo_encryption_alg_values_supported = ENCRYPTION_ALG_VALUES_SUPPORTED,
userinfo_encryption_enc_values_supported = ENCRYPTION_ENC_SUPPORTED,
userinfo_signing_alg_values_supported = SIGNING_ALG_VALUES_SUPPORTED,
request_object_encryption_alg_values_supported = ENCRYPTION_ALG_VALUES_SUPPORTED,
request_object_encryption_enc_values_supported = ENCRYPTION_ENC_SUPPORTED,
request_object_signing_alg_values_supported = SIGNING_ALG_VALUES_SUPPORTED,
)
if metadata.get("openid_provider", None):
md = metadata["openid_provider"]
for k, v in amap.items():
if k in md:
for alg in md[k]:
if alg not in v:
raise ValidationError(
f"{k} has an unsupported value {alg}. "
f"Supported algs are {v}"
)
def validate_entity_metadata(value):
status = False
for i in ENTITY_TYPES:
if i in value:
status = True
if not status:
raise ValidationError(
_(f'Need to specify one of {", ".join(ENTITY_TYPES)}')
)
if "openid_provider" in value:
schema = OIDCFED_PROVIDER_PROFILES[OIDCFED_DEFAULT_PROVIDER_PROFILE]
try:
schema["op_metadata"](**value["openid_provider"])
except Exception as e:
raise ValidationError(
f"OP metadata fail {e}. "
)
if "openid_relying_party" in value:
schema = RP_PROVIDER_PROFILES[RP_DEFAULT_PROVIDER_PROFILES]
try:
schema["rp_metadata"](**value["openid_relying_party"])
except Exception as e:
raise ValidationError(
f"RP metadata fail {e}. "
)
def validate_private_jwks(values: Union[dict, list]):
if isinstance(values, dict):
values = [values]
try:
for jwk_dict in values:
_k = key_from_jwk_dict(jwk_dict)
if not _k.private_key():
raise ValidationError(f"Can't extract a private JWK from {jwk_dict}")
except Exception as e:
raise ValidationError(f"Not valid: {e}")
|
import asyncio
from datetime import datetime
import uuid
from urllib.parse import *
from .comms import *
from . import logger
from .modules.echo import EchoModule
from .modules.socks5 import Socks5Module
from .tcp_proxy import *
from .fakehttpserver import *
import websockets
class CommsAgentClient:
def __init__(self, client_uuid, in_queue, out_queue):
self.client_uuid = client_uuid
self.connected_at = datetime.utcnow()
self.last_seen_at = None
self.in_queue = in_queue
self.out_queue = out_queue
self.modules = {} #jobid -> job_in_queue
self.modules_cmd_queue = asyncio.Queue()
self.modules_ctr = Counter()
self.name = '[CommsAgentClient]'
async def create_job(self, module_name):
logger.debug('%s Creating job %s' % (self.name, module_name))
try:
if module_name == 'echo':
job_id = self.modules_ctr.get_next()
in_queue = asyncio.Queue()
em = EchoModule(job_id, in_queue, self.modules_cmd_queue)
asyncio.ensure_future(em.run())
self.modules[job_id] = in_queue
rply = CreateJobRply()
rply.job_name = module_name
rply.job_id = job_id
await self.modules_cmd_queue.put(rply)
if module_name == 'socks5':
job_id = self.modules_ctr.get_next()
in_queue = asyncio.Queue()
em = Socks5Module(job_id, in_queue, self.modules_cmd_queue)
asyncio.ensure_future(em.run())
self.modules[job_id] = in_queue
rply = CreateJobRply()
rply.job_name = module_name
rply.job_id = job_id
await self.modules_cmd_queue.put(rply)
else:
logger.warning('%s Unknown job to create! %s' % (self.name , module_name))
return
except Exception as e:
logger.exception('%s create_job' %s (self.name,))
async def listen_server_cmds(self):
try:
while True:
cmd = await self.in_queue.get()
if isinstance(cmd, JobCmd):
if cmd.job_id in self.modules:
await self.modules[cmd.job_id].put(cmd.job_data)
else:
logger.warning('%s Reply to an unknown job id!' % (self.name, cmd.job_id))
elif isinstance(cmd, CreateJobCmd):
logger.debug('Got command!')
await self.create_job(cmd.job_name)
elif isinstance(cmd, StopJobCmd):
pass
except Exception as e:
logger.exception('%s listen_server_cmds' % (self.name,))
async def listen_module_rplys(self):
try:
while True:
rply = await self.modules_cmd_queue.get()
rply.client_uuid = self.client_uuid
await self.out_queue.put(rply)
except Exception as e:
logger.exception('%s listen_module_rplys' % (self.name,))
async def run(self):
try:
asyncio.ensure_future(self.listen_server_cmds())
t = await asyncio.wait_for(self.listen_module_rplys(), timeout = None)
logger.info('%s Client exiting'% (self.name,))
except Exception as e:
logger.exception('run')
class FakeHTTPProxy:
def __init__(self, proxy_url, destination, listen_ip = '127.0.0.1', listen_port = 10001):
self.listen_ip = listen_ip
self.listen_port = listen_port
self.proxy_url = proxy_url
self.destination = destination
p = urlparse(self.proxy_url)
self.username = p.username
self.password = p.password
self.proxy_ip, self.proxy_port = p.netloc.split(':')
self.proxy_port = int(self.proxy_port)
async def open_proxy_connection(self, reader, writer):
try:
connect_hdr = [
'CONNECT %s HTTP/1.1' % self.destination,
'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Proxy-Connection: keep-alive',
'Connection: keep-alive',
'Host: %s' % self.destination,
]
if self.username and self.password:
adata = '%s:%s' % (self.username, self.password)
adata = base64.b64encode(adata.encode())
connect_hdr.append('Proxy-Authorization: basic %s' % adata)
#'Proxy-Authorization: basic aGVsbG86d29ybGQ=',
connect_cmd = '\r\n'.join(connect_hdr) + '\r\n\r\n'
logger.debug('Connect cmd: %s' % repr(connect_cmd))
writer.write(connect_cmd.encode())
await writer.drain()
response = ''
line = await reader.readuntil(b'\r\n\r\n')
response = line.decode()
logger.debug('Response from proxy server: %s' % response)
status_code = response.split('\r\n')[0].split(' ')[1]
if status_code != '200':
logger.warining('Proxy server doesnt like us! Status code: %s' % status_code)
return 'NO'
logger.debug('HTTP proxy channel is open!')
return 'OK'
except Exception as e:
logger.exception('open_proxy_connection')
return 'NO'
async def handle_client(self, reader, writer):
proxy_reader, proxy_writer = await asyncio.wait_for(asyncio.open_connection(host=self.proxy_ip, port = self.proxy_port), timeout=10)
status = await self.open_proxy_connection(proxy_reader, proxy_writer)
if status == 'NO':
writer.close()
return
tcp_proxy = AioTCPProxy(proxy_reader, proxy_writer, reader, writer, '[FakeHTTPProxy Proxy]', logger, timeout = None)
asyncio.ensure_future(tcp_proxy.run())
async def run(self):
server = await asyncio.start_server(self.handle_client, self.listen_ip, self.listen_port)
#python3.7 has this awesome stuff but in 3.6 this functionality is missing :(
#async with server:
# await server.serve_forever()
asyncio.ensure_future(server.serve_forever())
class CommsAgentServerListening:
def __init__(self, listen_ip = '127.0.0.1', listen_port = 8443):
self.listen_ip = listen_ip
self.listen_port = listen_port
self.uuid = None
self.name = '[CommsAgentServerListening]'
self.client_timeout = 30
self.client_ping_interval = 30
async def keepalive(self, ws, client):
logger.debug('keepalive starting')
await asyncio.sleep(5)
while True:
# No data in 20 seconds, check the connection.
try:
pong_waiter = await ws.ping()
await asyncio.wait_for(pong_waiter, timeout=self.client_timeout)
logger.debug('Server still alive!')
await asyncio.sleep(self.client_ping_interval)
except asyncio.TimeoutError:
logger.info('Server timed out, dropping client!')
await client.in_queue.put('kill')
return
except Exception as e:
logger.exception('Keepalive died!')
return
async def register(self, ws):
try:
msg = await ws.recv()
cc = ClientCmd.from_msg(msg)
logger.debug('CMD recieved! %s' % str(type(cc)))
client_uuid = cc.cmd.client_uuid
rply = RegisterRply()
rply.client_uuid = client_uuid
msg = ClientRply()
msg.uuid = cc.uuid
msg.rply = rply
data = msg.to_msg()
await ws.send(data)
client_in_queue = asyncio.Queue()
client_out_queue = asyncio.Queue()
logger.debug('%s Registration succseeded! Got UUID: %s' % (self.name, client_uuid))
return CommsAgentClient(client_uuid, client_in_queue, client_out_queue)
except Exception as e:
logger.exception()
return
async def handle_client_out(self, ws, client):
while True:
try:
rply = await client.out_queue.get()
msg = ClientRply()
msg.uuid = str(uuid.uuid4())
msg.rply = rply
data = msg.to_msg()
logger.debug('%s Sending data to server: %s' % (self.name, data))
await ws.send(data)
except Exception as e:
logger.exception(self.name)
return
async def handle_client_in(self, ws, client):
while True:
try:
msg = await ws.recv()
logger.debug('%s Got command from server: %s' % (self.name, msg))
cr = ClientCmd.from_msg(msg)
cmd_uuid = cr.uuid
await client.in_queue.put(cr.cmd)
except Exception as e:
logger.exception(self.name)
return
async def handle_client(self, ws, path):
logger.debug('JS proxy connected from %s:%d' % ws.remote_address)
try:
cc = await self.register(ws)
asyncio.ensure_future(self.keepalive(ws, cc))
asyncio.ensure_future(self.handle_client_in(ws, cc))
asyncio.ensure_future(self.handle_client_out(ws, cc))
await cc.run()
except Exception as e:
logger.exception(self.name)
return
async def run(self):
logger.debug('Starting listening agent!')
try:
fh = FakeHTTPServer(logger = logger)
asyncio.ensure_future(fh.run())
ws_server = await websockets.serve(self.handle_client, self.listen_ip, self.listen_port)
return ws_server
except Exception as e:
logger.exception('Failed to start server!')
return
class CommsAgentServer:
def __init__(self, url, proxy = None, proxy_listen_ip = None, proxy_listen_port = None):
self.url = url
self.uuid = None
self.proxy = proxy
self.proxy_listen_ip = proxy_listen_ip
self.proxy_listen_port = proxy_listen_port
self.proxy_server = None
self.name = '[CommsAgentServer]'
if self.proxy:
proxy_url = urlparse(self.proxy)
dest_url = urlparse(self.url)
destination = dest_url.netloc
url = list(urlsplit(self.url))
url[1] = '%s:%d' % (self.proxy_listen_ip, self.proxy_listen_port)
self.url = urlunsplit(url)
logger.debug('Original destination rewritten to connect to proxy! Final url: %s' % self.url)
self.proxy_server = FakeHTTPProxy(self.proxy, destination, self.proxy_listen_ip, self.proxy_listen_port)
async def register(self, ws):
msg = await ws.recv()
cc = ClientCmd.from_msg(msg)
logger.debug('CMD recieved! %s' % str(type(cc)))
client_uuid = cc.cmd.client_uuid
rply = RegisterRply()
rply.client_uuid = client_uuid
msg = ClientRply()
msg.uuid = cc.uuid
msg.rply = rply
data = msg.to_msg()
await ws.send(data)
client_in_queue = asyncio.Queue()
client_out_queue = asyncio.Queue()
return CommsAgentClient(client_uuid, client_in_queue, client_out_queue)
logger.debug('%s Registration succseeded! Got UUID: %s' % (self.name, client_uuid))
async def handle_client_out(self, ws, client):
while True:
rply = await client.out_queue.get()
msg = ClientRply()
msg.uuid = str(uuid.uuid4())
msg.rply = rply
data = msg.to_msg()
logger.debug('%s Sending data to server: %s' % (self.name, data))
await ws.send(data)
async def handle_client_in(self, ws, client):
while True:
msg = await ws.recv()
logger.debug('%s Got command from server: %s' % (self.name, msg))
cr = ClientCmd.from_msg(msg)
cmd_uuid = cr.uuid
await client.in_queue.put(cr.cmd)
async def run(self):
try:
if self.proxy_server:
asyncio.ensure_future(self.proxy_server.run())
async with websockets.connect(self.url) as ws:
client = await self.register(ws)
asyncio.ensure_future(self.handle_client_in(ws, client))
asyncio.ensure_future(self.handle_client_out(ws, client))
await client.run()
except Exception as e:
logger.exception('Error in main loop!')
return |
import os
import logging
import time
from dataclasses import dataclass
from typing import List, Tuple, Optional, TypeVar, Type
from medcat.cdb import CDB
from medcat.utils.decorators import check_positive
T = TypeVar("T", bound="Checkpoint")
class Checkpoint(object):
r""" The base class of checkpoint objects
Args:
dir_path (str):
The path to the parent directory of checkpoint files
steps (int):
The number of processed sentences/documents before a checkpoint is saved
(N.B.: A small number could result in error "no space left on device")
max_to_keep (int):
The maximum number of checkpoints to keep
(N.B.: A large number could result in error "no space left on device")
"""
DEFAULT_STEP = 1000
DEFAULT_MAX_TO_KEEP = 1
log = logging.getLogger(__package__)
@check_positive
def __init__(self, dir_path: str, *, steps: int = DEFAULT_STEP, max_to_keep: int = DEFAULT_MAX_TO_KEEP) -> None:
self._dir_path = os.path.abspath(dir_path)
self._steps = steps
self._max_to_keep = max_to_keep
self._file_paths: List[str] = []
self._count = 0
os.makedirs(self._dir_path, exist_ok=True)
@property
def steps(self) -> int:
return self._steps
@steps.setter
def steps(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._steps = value
@property
def max_to_keep(self) -> int:
return self._max_to_keep
@max_to_keep.setter
def max_to_keep(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._max_to_keep = value
@property
def count(self) -> int:
return self._count
@property
def dir_path(self) -> str:
return self._dir_path
@classmethod
def from_latest(cls: Type[T], dir_path: str) -> T:
r'''
Retrieve the latest checkpoint from the parent directory.
Args:
dir_path (string):
The path to the directory containing checkpoint files
Returns:
A new checkpoint object
'''
if not os.path.isdir(dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = cls._get_ckpt_file_paths(dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
steps, count = cls._get_steps_and_count(latest_ckpt)
checkpoint = cls(dir_path, steps=steps)
checkpoint._file_paths = ckpt_file_paths
checkpoint._count = count
cls.log.info(f"Checkpoint loaded from {latest_ckpt}")
return checkpoint
def save(self, cdb: CDB, count: int) -> None:
r'''
Save the CDB as the latest checkpoint.
Args:
cdb (medcat.CDB):
The MedCAT CDB object to be checkpointed
count (count):
The number of the finished steps
'''
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
while len(self._file_paths) >= self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
cdb.save(ckpt_file_path)
self.log.debug("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._count = count
def restore_latest_cdb(self) -> CDB:
r'''
Restore the CDB from the latest checkpoint.
Returns:
cdb (medcat.CDB):
The MedCAT CDB object
'''
if not os.path.isdir(self._dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = self._get_ckpt_file_paths(self._dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
_, count = self._get_steps_and_count(latest_ckpt)
self._file_paths = ckpt_file_paths
self._count = count
return CDB.load(self._file_paths[-1])
@staticmethod
def _get_ckpt_file_paths(dir_path: str) -> List[str]:
ckpt_file_paths = [os.path.abspath(os.path.join(dir_path, f)) for f in os.listdir(dir_path)]
ckpt_file_paths = [f for f in ckpt_file_paths if os.path.isfile(f) and "checkpoint-" in f]
if ckpt_file_paths:
ckpt_file_paths.sort(key=lambda f: Checkpoint._get_steps_and_count(f)[1])
return ckpt_file_paths
@staticmethod
def _get_steps_and_count(file_path) -> Tuple[int, int]:
file_name_parts = os.path.basename(file_path).split('-')
return int(file_name_parts[1]), int(file_name_parts[2])
@dataclass
class CheckpointConfig(object):
output_dir: str = "checkpoints"
steps: int = Checkpoint.DEFAULT_STEP
max_to_keep: int = Checkpoint.DEFAULT_MAX_TO_KEEP
class CheckpointManager(object):
r"""
The class for managing checkpoints of specific training type and their configuration
Args:
name (str):
The name of the checkpoint manager (also used as the checkpoint base directory name).
checkpoint_config (medcat.utils.checkpoint.CheckpointConfig):
The checkpoint config object.
"""
def __init__(self, name: str, checkpoint_config: CheckpointConfig) -> None:
self.name = name
self.checkpoint_config = checkpoint_config
def create_checkpoint(self, dir_path: Optional[str] = None) -> "Checkpoint":
r'''
Create a new checkpoint inside the checkpoint base directory.
Args:
dir_path (str):
The path to the checkpoint directory
Returns:
A checkpoint object
'''
dir_path = dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name, str(int(time.time())))
return Checkpoint(dir_path,
steps=self.checkpoint_config.steps,
max_to_keep=self.checkpoint_config.max_to_keep)
def get_latest_checkpoint(self, base_dir_path: Optional[str] = None) -> "Checkpoint":
r'''
Retrieve the latest checkpoint from the checkpoint base directory.
Args:
base_dir_path (string):
The path to the directory containing checkpoint files
Returns:
A checkpoint object
'''
base_dir_path = base_dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name)
ckpt_dir_path = self.get_latest_training_dir(base_dir_path=base_dir_path)
checkpoint = Checkpoint.from_latest(dir_path=ckpt_dir_path)
checkpoint.steps = self.checkpoint_config.steps
checkpoint.max_to_keep = self.checkpoint_config.max_to_keep
return checkpoint
@classmethod
def get_latest_training_dir(cls, base_dir_path: str) -> str:
r'''
Retrieve the latest training directory containing all checkpoints.
Args:
base_dir_path (string):
The path to the directory containing all checkpointed trainings
Returns:
The path to the latest training directory containing all checkpoints.
'''
if not os.path.isdir(base_dir_path):
raise ValueError(f"Checkpoint folder passed in does not exist: {base_dir_path}")
ckpt_dir_paths = os.listdir(base_dir_path)
if not ckpt_dir_paths:
raise ValueError("No existing training found")
ckpt_dir_paths.sort()
ckpt_dir_path = os.path.abspath(os.path.join(base_dir_path, ckpt_dir_paths[-1]))
return ckpt_dir_path
|
from .fdc import FDC
from .classify import CLF
import numpy as np
import pickle
from scipy.cluster.hierarchy import dendrogram as scipydendroed
from scipy.cluster.hierarchy import to_tree
from .hierarchy import compute_linkage_matrix
import copy
from collections import OrderedDict as OD
from collections import Counter
class TREENODE:
def __init__(self, id_ = -1, parent = None, child = None, scale = -1):
if child is None:
self.child = [] # has to be list of TreeNode
else:
self.child = child
self.scale = scale
self.parent = parent
self.id_ = id_
def __repr__(self):
return ("Node: [%s] @ s = %.3f" % (self.id_,self.scale))
def is_leaf(self):
return len(self.child) == 0
def get_child(self, id_ = None):
if id_ is None:
return self.child
else:
for c in self.child:
if c.get_id() == id_:
return c
def get_child_id(self):
if len(self.child) == 0:
return []
else:
return [c.id_ for c in self.child]
def get_scale(self):
return self.scale
def get_id(self):
return self.id_
def add_child(self, treenode):
self.child.append(treenode)
def remove_child(self, treenode):
self.child.remove(treenode)
def get_rev_child(self):
child = self.child[:]
child.reverse()
return child
class TREE:
""" Contains all the hierachy and information concerning the clustering """
def __init__(self, n_average = 50, cv_score = 0., min_size = 50, test_size_ratio = 0.5, ignore_root = False):
"""" Tree model to deal with hiearchical clustering stored in FDC().hierarchy
Parameters
-----------
n_average: int, optional (default = 10)
number of classifiers that will be trained on different random partitioned. the score of each
node of the tree corresponds to the average score. the prediction is obtained using a majority vote.
cv_score: float, optional (default = 0.0)
if the average cross-validation score for a node is above this value, it is called a robust node
should be starting with cv_score = 0, then sweeping up, using bottom-up clustering
min_size: int, optional (default = 50)
minimal size of a cluster
test_size_ratio: float (default = 0.5)
size of the test set to be used when cross-validating
ignore_root: bool (default = False)
wether or not to ignore the root score when checking for robust nodes.
the root is special since in many density hierarchies, the top most clusters will never be merged together
thus the root as a node will split into multiple (more than 2) high-level clusters, whereas all other nodes will typically
split into 2 clusters.
Return
------------
self: TREE object
"""
self.node_dict = None
self.mergers = None
self.new_cluster_label = None
self.robust_terminal_node = None #list of the terminal robust nodes
self.robust_clf_node = None # full information about classification is recorded here, keys of dict are the classifying nodes id
self.all_robust_node = None # list of all nodes in the robust tree (classifying nodes and leaf nodes)
self.cluster_to_node_id = None # dictionary mapping cluster labels (displayed on plot) with node id
self.new_idx_centers = None
self.tree_constructed = False
# ------------------> Classifier information
self.ignore_root = ignore_root
self.n_average = n_average
self.cv_score = cv_score
self.min_size = min_size
self.test_size_ratio = test_size_ratio
def build_tree(self, model):
"""Given hierachy, builds a tree of the clusterings. The nodes are class objects define in the class TreeNode
Parameters
---------
model : object from the FDC class
contains the fitted hierarchy produced via the coarse_graining() method
Returns
---------
tuple = (root, node_dict, mergers)
root : TreeNode class object
root of the tree
node_dict : dictionary of TreeNode objects.
Objects are stored by their merger ID
mergers :
list of nodes being merged with the corresponding scale of merging
"""
if self.tree_constructed is True:
return
mergers = find_mergers(model.hierarchy, model.noise_range)
mergers.reverse()
m = mergers[0]
self.node_dict = OD()
self.root = TREENODE(id_ = m[1], scale = m[2])
self.node_dict[self.root.get_id()] = self.root
for m in mergers:
for mc in m[0]:
c_node = TREENODE(id_ = mc, parent = self.node_dict[m[1]], child = [], scale = -1)
self.node_dict[m[1]].add_child(c_node)
self.node_dict[c_node.get_id()] = c_node
self.node_dict[m[1]].scale = m[2]
self.mergers = mergers
self.tree_constructed = True
def merge_nodes(self, node_list, target_node):
for node in node_list:
parent = node.parent
target_node.add_child(node) # transfering node
parent.remove_child(node)
def node_items(self): # breath-first ordering
""" Returns the full list of nodes below the root
"""
stack = [self.root]
list_nodes = []
while stack:
current_node = stack[0]
list_nodes.append(current_node)
for c in current_node.child:
stack.append(c)
stack = stack[1:]
return list_nodes
def identify_robust_merge(self, model, X):
"""Starting from the root, goes down the tree and evaluates which clustering nodes are robust.
Each node in the tree corresponds to a partitioning of the a subset of the data. For each
node one computes a cross-validation score on a downsampled dataset in order to compare nodes.
The nodes that are robust and are terminal (meaning no robust nodes exist below them) are
stored in two attributes:
self.robust_terminal_node (list) # list of the terminal node indices (which can then be accessed by self.node_dict)
self.robust_clf_node (dict) # dictionary of classifiers (CLF objects from classify.py) - keys are node indices ;
"""
self.build_tree(model) # Extracts all the information from model and outputs a tree
root, node_dict, mergers = self.root, self.node_dict, self.mergers
print("[tree.py] : Printing two top-most layers")
print("[tree.py] : Root :", root)
print("[tree.py] : Root's childs :", root.get_child())
self.compute_robust_node(model, X)
# Listing all nodes in the robust tree ...
all_robust_node = set([])
for k, _ in self.robust_clf_node.items():
all_robust_node.add(k)
current_node = node_dict[k]
for c in current_node.child:
all_robust_node.add(c.get_id())
self.all_robust_node = list(all_robust_node)
def compute_robust_node(self, model, X):
""" Start from the root, computes the classification score at every branch in the tree
and stops if classication score is below a certain threshold.
Results are stored in:
self.robust_clf_node : dictionary of node id to classification information (weights, biases, scores, etc.)
self.robust_terminal_node : list of terminal nodes id, whose parents are robust classifiers.
"""
if self.robust_clf_node is None:
self.robust_terminal_node = [] #list of the terminal robust nodes
self.robust_clf_node = OD() # dictionary of the nodes where a partition is made (non-leaf nodes)
else:
######## once the tree has been fully fitted, will just perform bottom-up merges based on desired score
while True:
found_merge = False
for node_id in self.robust_terminal_node:
p = self.node_dict[node_id].parent
if p is None: # root is reached !
break
clf = self.robust_clf_node[p.get_id()]
if clf.cv_score - clf.cv_score_std < self.cv_score: # remove that node
# 2 cases here:
# 1. node is merged with another leaf node
# 2. node is merged with a subtree (which can create big instabilities !)
case = 1
for c in p.get_child_id():
if c not in self.robust_terminal_node:
case = 2
break
if case == 1:
self.robust_clf_node.pop(p.get_id())
#print('popped')
sub_node_list = breath_first_search(p)[1:]
for n in sub_node_list: # need to clear out the full subtree
if n in self.robust_clf_node.keys():
self.robust_clf_node.pop(n)
if n in self.robust_terminal_node:
self.robust_terminal_node.remove(n)
found_merge = True
self.robust_terminal_node.append(p.get_id()) # parent now becomes the terminal node
break
if found_merge is False:
break
return
if self.root.get_id() in self.robust_clf_node.keys():
clf = self.robust_clf_node[self.root.get_id()]
else:
clf = self.classify_node(self.root.get_child(), model, X)
min_cv_score = self.cv_score
clf_score = clf.cv_score
std_score = clf.cv_score_std
if self.ignore_root is True:
print("[tree.py] : root is ignored, # %i \t score = %.4f"%(self.root.get_id(), clf_score))
self.robust_clf_node[self.root.get_id()] = clf
else:
if clf_score-std_score > min_cv_score: # --- search stops if the node is not statistically signicant (threshold)
node_info(self.root, clf_score, std_score, min_cv_score)
self.robust_clf_node[self.root.get_id()] = clf
else:
print("[tree.py] : root not robust # %i \t score = %.4f"%(self.root.get_id(),clf_score))
for current_node in self.node_items()[1:]:
if current_node.parent.get_id() in self.robust_clf_node.keys():
if not current_node.is_leaf():
if current_node.get_id() in self.robust_clf_node.keys():
clf = self.robust_clf_node[current_node.get_id()]
else:
clf = self.classify_node(current_node.get_child(), model, X)
clf_score = clf.cv_score
std_score = clf.cv_score_std
if clf_score-std_score > min_cv_score: # --- search stops if the node is not statistically signicant (threshold)
node_info(current_node, clf_score, std_score, min_cv_score)
self.robust_clf_node[current_node.get_id()] = clf
else:
node_info(current_node, clf_score, std_score, min_cv_score)
self.robust_terminal_node.append(current_node.get_id())
else: # implies it's parent was robust, and is a leaf node
self.robust_terminal_node.append(current_node.get_id())
def fit(self, model, X, cv_score = None):
""" Finds the merges that are statistically significant (i.e. greater than the cv_score)
and relabels the data accordingly
Trick here: first use a low threshold (will compute the tree down to it's lowest components)
Then one can just iterate quickly over score threshold ...
Parameters
------
model : FDC object
Contains the coarse graining information
X : array, shape = (n_sample, n_marker)
Contains the data in the original space
n_average : int
Number of folds in the cross validation
cv_score : float
Classification score threshold
Returns
---------
self : TREE() object
"""
n_average = self.n_average
if cv_score is None:
cv_score = self.cv_score
else:
self.cv_score = cv_score
if cv_score > 1.0 or cv_score < 0.0:
assert False, "** cv_score must be between 0.0 and 1.0 **"
print('[tree.py] : fitting with cv_score = %.4f'%self.cv_score)
self.identify_robust_merge(model, X) # fitting all the classifiers
robust_terminal_node = self.robust_terminal_node # this is a list
root = self.root
node_dict = self.node_dict
mergers = self.mergers
##### Below : relabelling data to output final labels according to classifiers
cluster_n = len(robust_terminal_node)
n_sample = len(model.X)
y_robust = -1*np.ones(n_sample,dtype=np.int)
y_original = model.hierarchy[0]['cluster_labels']
cluster_to_node_id = OD()
# here all terminal nodes are given a label, in the same order they are stored.
y_node = classification_labels([node_dict[i] for i in robust_terminal_node], model)
assert np.count_nonzero(y_node == -1) == 0, "Wrong labelling or ROOT is not robust ... !"
for i, node_id in enumerate(robust_terminal_node):
pos = (y_node == i)
y_robust[pos] = i
cluster_to_node_id[i] = node_id
#print(cluster_n)
#print(Counter(y_node))
if len(robust_terminal_node) == 0:
y_robust *= 0 # only one coloring
new_idx_centers = []
all_idx = np.arange(0, model.X.shape[0], dtype=int)
for i in range(cluster_n):
pos_i = (y_robust == i)
max_rho = np.argmax(model.rho[y_robust == i])
idx_i = all_idx[pos_i][max_rho]
new_idx_centers.append(idx_i)
self.new_cluster_label = y_robust
self.new_idx_centers = np.array(new_idx_centers,dtype=int)
self.cluster_to_node_id = cluster_to_node_id
self.node_to_cluster_id = OD({v: k for k, v in self.cluster_to_node_id.items()})
print("\n")
print("[tree.py] : -----------> VALIDATION SCORING INFORMATION < -----------------")
print("[tree.py] : ", "{0:<15s}{1:<15s}{2:<15s}{3:<15s}{4:<15s}{5:15s}".format("Terminal node", "Parent node", "Displayed node", "Cv score", "Cv +-","Effective CV"))
for n in robust_terminal_node:
p = self.node_dict[n].parent
if p is not None:
p_id = p.get_id()
cv_score = self.robust_clf_node[p_id].cv_score
cv_score_std = self.robust_clf_node[p_id].cv_score_std
print("[tree.py] : ", "{0:<15d}{1:<15d}{2:<15d}{3:<15.4f}{4:<15.5f}{5:<15.4f}".format(n,p_id,self.node_to_cluster_id[n],
cv_score, cv_score_std, cv_score - cv_score_std))
else:
print("[tree.py] : root reached -> one cluster remaining")
return self
def predict(self, X):
""" Uses the root classifiers to perform a hierarchical classification of the nodes !
need to do recursive classification ...
"""
terminal_nodes = set(self.robust_terminal_node)
node_to_cluster = self.node_to_cluster_id
y_pred = -1*np.ones(len(X))
for i, x in enumerate(X):
if i% 1000 == 0:
print(i)
current_clf_node = self.root # recursively go down the tree, starting from root
current_id = current_clf_node.get_id()
while True:
if current_clf_node.get_id() in terminal_nodes:
y_pred[i] = node_to_cluster[current_id] # reached the leaf node
break
else:
y_branch = self.robust_clf_node[current_id].predict([x])[0]
child_list = current_clf_node.child
current_clf_node = child_list[y_branch] # go down one layer
current_id = current_clf_node.get_id()
return y_pred
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
name = self.make_file_name()
fopen = open(name,'wb')
pickle.dump(self,fopen)
fopen.close()
def load(self, name=None):
if name is None:
name = self.make_file_name()
self.__dict__.update(pickle.load(open(name,'rb')).__dict__)
return self
def make_file_name(self):
t_name = "clf_tree.pkl"
return t_name
def write_result_mathematica(self, model, marker) : # graph should be a dict of list
"""
-> Saves results in .txt files, which are easily read with a Mathematica
script for ez plotting ...
"""
if self.robust_clf_node is None :
assert False, "Model not yet fitted !"
self.gate_dict = self.find_full_gate(model)
my_graph = OD()
my_graph_score = OD()
for e,v in self.robust_clf_node.items():
my_graph[e] = []
my_graph_score[e] = v['mean_score']
for c in self.node_dict[e].child:
my_graph[e].append(c.get_id())
self.graph = my_graph
self.graph_score = my_graph_score
self.write_graph_mathematica()
self.write_graph_score_mathematica()
self.write_gate_mathematica(self.gate_dict, marker)
self.write_cluster_label_mathematica()
def write_graph_mathematica(self, out_file = "graph.txt"):
""" Writes graph in mathematica readable format """
f = open(out_file,'w')
my_string_list = []
for node_id, node_childs in self.graph.items(): # v is a list
for child in node_childs :
my_string_list.append("%i -> %i"%(node_id, child))
f.write(",".join(my_string_list))
f.close()
def write_graph_score_mathematica(self, out_file = "graph_score.txt"):
""" Writes scores of classification for every division node """
f = open(out_file, 'w')
string_list = []
for k, v in self.graph_score.items():
string_list.append('%i -> % .5f'%(k,v))
f.write(','.join(string_list))
f.close()
def write_gate_mathematica(self, gate_dict, marker, out_file = "gate.txt"):
""" Writes most important gates for discriminating data in a classification """
f = open(out_file, 'w')
string_list = []
for k, g in gate_dict.items():
string_list.append("{%i -> %i, \"%s\"}"%(k[0],k[1],str_gate(marker[g[0][0]],g[1][0])))
f.write("{")
f.write(','.join(string_list))
f.write("}")
f.close()
def write_cluster_label_mathematica(self, out_file = "n_to_c.txt"): # cton is a dictionary of clusters to node id
""" Node id to cluster labels """
f = open(out_file, 'w')
string_list = []
for k, v in self.cluster_to_node_id.items():
string_list.append("{%i -> %i}"%(v,k))
f.write("<|")
f.write(','.join(string_list))
f.write("|>")
f.close()
def print_mapping(self):
print("Mapping of terminal nodes to plotted labels:")
[print(k, " -> ", v) for k,v in OD(self.node_to_cluster_id).items()]
def describe_clusters(self, X_standard, cluster_label = None, marker = None, perc = 0.05):
""" Checks the composition of each clusters in terms of outliers (define by top and bottom perc)
Parameters
--------------
X_standard : array, shape = (n_sample, n_marker)
Data array with raw marker expression
cluster_label : optional, array, shape = n_sample
Cluster labels for each data point. If none, just uses the labels infered by the Tree
marker : optional, list of str, len(list) = n_marker
Marker labels. If not specified will use marker_0, marker_1, etc.
perc : optional, float
The percentage of most and least expressed data points for a marker that you consider outliers
Return
-------------
df_pos, df_neg : tuple of pandas.DataFrame
dataframes with row index as markers and columns as cluster labels. An additional row also
indicates the size of each cluster as a fraction of the total sample.
"""
if cluster_label is None:
cluster_label = self.new_cluster_label
label_to_idx = OD() # cluster label to data index
unique_label = np.unique(cluster_label)
n_sample, n_marker = X_standard.shape
if marker is None:
marker = ['marker_%i'%i for i in range(n_marker)]
assert n_sample == len(X_standard)
n_perc = int(round(0.05*n_sample))
for ul in unique_label:
label_to_idx[ul] = np.where(cluster_label == ul)[0]
idx_top = []
idx_bot = []
for m in range(n_marker):
asort = np.argsort(X_standard[:,m])
idx_bot.append(asort[:n_perc]) # botoom most expressed markers
idx_top.append(asort[-n_perc:]) # top most expressed markers
cluster_positive_composition = OD()
cluster_negative_composition = OD()
for label, idx in label_to_idx.items():
# count percentage of saturated markers in a given cluster ...
# compare that to randomly distributed (size_of_cluster/n_sample)*n_perc
cluster_positive_composition[label] = []
cluster_negative_composition[label] = []
for m in range(n_marker):
ratio_pos = len(set(idx_top[m]).intersection(set(idx)))/len(idx_top[m])
ratio_neg = len(set(idx_bot[m]).intersection(set(idx)))/len(idx_bot[m])
cluster_positive_composition[label].append(ratio_pos)
cluster_negative_composition[label].append(ratio_neg)
df_pos = pd.DataFrame(cluster_positive_composition, index = marker)
df_neg = pd.DataFrame(cluster_negative_composition, index = marker)
cluster_ratio_size = np.array([len(label_to_idx[ul])/n_sample for ul in unique_label])
df_cluster_ratio_size = pd.DataFrame(cluster_ratio_size.reshape(1,-1), index = ['Cluster_ratio'], columns = label_to_idx.keys())
# data frame, shape = (n_marker + 1, n_cluster) with index labels [cluster_ratio, marker_1, marker_2 ...]
df_pos_new = df_cluster_ratio_size.append(df_pos)
df_neg_new = df_cluster_ratio_size.append(df_neg)
return df_pos_new, df_neg_new
##############################################
###############################################
def classify_node(self, node_list, model, X, C=1.0):
""" Trains a classifier on the childs of "root" and returns a classifier for these types.
Important attributes are (for CLF object):
self.scaler_list -> [mu, std]
self.cv_score -> mean cv score
self.mean_train_score -> mean train score
self.clf_list -> list of sklearn classifiers (for taking majority vote)
Returns
---------
CLF object (from classify.py). Object has similar syntax to sklearn's classifier syntax
"""
## ok need to down sample somewhere here
min_size = self.min_size
test_size_ratio = self.test_size_ratio
n_average = self.n_average
y = classification_labels(node_list, model)
if len(np.unique(y)) == 1:
return CLF(clf_type='trivial')
pos_subset = (y != -1)
Xsubset = X[pos_subset] # original space coordinates
ysubset = y[pos_subset] # labels
count = Counter(ysubset)
for v in count.values():
if v < min_size: # cluster should be merged, it is considered too small
fake_clf = CLF()
fake_clf.cv_score = -1.
fake_clf.cv_score_std = -1.
return fake_clf
return CLF(clf_type='svm', n_average=n_average).fit(Xsubset, ysubset)
def classification_labels(node_list, model):
""" Returns a list of labels for the original data according to the classification
given at root. root is a TreeNode object which contains childrens. Each children (and the data it contains)
is assigned an arbitrary integer label. Data points not contained in that node are labelled as -1.
Parameters
-------
node_list : list of nodes, these should be the child of a parent node for instance.
model : FDC object
Returns
--------
1D array of labels
"""
n_sample = len(model.X)
y = -1*np.ones(n_sample,dtype=np.int)
y_init = model.hierarchy[0]['cluster_labels'] # full set of labels at smallest scale ...
for i, node in enumerate(node_list): # all data points contained a node take label i
init_c = find_idx_cluster_in_root(model, node)
for ic in init_c:
# relabelling here according to merger
y[y_init == ic] = i
return y
def find_mergers(hierarchy, noise_range):
""" Determines the list of merges that are made during the coarse-graining """
n_depth = len(noise_range)
n_initial_cluster = len(hierarchy[0]['idx_centers'])
initial_labels = hierarchy[0]['cluster_labels']
n_pre_cluster = n_initial_cluster
current_merge_idx = n_initial_cluster
n_merge = 0
merging_dict = OD()
merger_record = []
for i in range(n_initial_cluster):
merging_dict[i] = -1
for i, d in enumerate(noise_range[1:]):
n_cluster = len(hierarchy[i+1]['idx_centers'])
if n_pre_cluster != n_cluster: # merger(s) have occured
for j in range(n_cluster):
elements_mask = (hierarchy[i+1]['cluster_labels'] == j)
content = hierarchy[i]['cluster_labels'][elements_mask]
tmp = np.unique(content)
if len(tmp) > 1 :
tmp_u = np.unique(initial_labels[elements_mask])
mapped_u = []
for k in tmp_u:
mapped_k = apply_map(merging_dict, k)
mapped_u.append(mapped_k)
mapped_u = np.unique(mapped_u)
for e in mapped_u:
merging_dict[e] = current_merge_idx
merger_record.append([list(mapped_u), current_merge_idx, d])
merging_dict[current_merge_idx] = -1
current_merge_idx +=1
# merge remaining ----
mapped_u = []
for k, v in merging_dict.items():
if v == -1:
mapped_u.append(k)
# adding top row !!
if len(merger_record) == 0:
merger_record.append([mapped_u, current_merge_idx, 1.0])
elif len(mapped_u) > 1:
merger_record.append([mapped_u, current_merge_idx, 1.5*(merger_record[-1][2])])
return merger_record
########################################################################################
########################################################################################
################################# UTILITY FUNCTIONS ####################################
########################################################################################
########################################################################################
def node_info(node, cv_score, std_score, min_score):
if cv_score > min_score:
print("[tree.py] : {0:<15s}{1:<10d}{2:<10s}{3:<7.4f}{4:5s}{5:6.5f}".format("robust node #",node.get_id(),"score =",cv_score,"\t+-",std_score))
else:
print("[tree.py] : {0:<15s}{1:<10d}{2:<10s}{3:<7.4f}{4:5s}{5:6.5f}".format("reject node #",node.get_id(),"score =",cv_score,"\t+-",std_score))
def str_gate(marker, sign):
if sign < 0. :
return marker+"-"
else:
return marker+"+"
def apply_map(mapdict, k):
old_idx = k
while True:
new_idx = mapdict[old_idx]
if new_idx == -1:
break
old_idx = new_idx
return old_idx
def float_equal(a,b,eps = 1e-6):
if abs(a-b) < 1e-6:
return True
return False
def get_scale(Z, c_1, c_2):
for z in Z:
if (z[0],z[1]) == (c_1,c_2) or (z[0],z[1]) == (c_2,c_1):
return z[2]
return -1
def breath_first_search(root):
"""
Returns
-------
node_list : list of node id contained in root
"""
stack = [root]
node_list = []
# breath-first search
while stack:
current_node = stack[0]
stack = stack[1:]
node_list.append(current_node.get_id())
if not current_node.is_leaf():
for node in current_node.get_child():
stack.append(node)
return node_list
def find_idx_cluster_in_root(model, node):
""" Finds the original (noise_threshold = init) clusters contains in the node
Returns the index of the terminal nodes contained in node.
"""
node_list = np.array(breath_first_search(node)) #list of terminal nodes contained in node.
n_initial_cluster = len(model.hierarchy[0]['idx_centers']) # map out what is going on here .
# recall that the cluster labelling is done following the dendrogram convention (see scipy)
return np.sort(node_list[node_list < n_initial_cluster]) # subset of initial clusters contained in the subtree starting at node |
expected_output = {
"bgp-information": {
"bgp-peer": [
{
"bgp-option-information": {
"bgp-options": "Preference LocalAddress HoldTime LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "v4_pyats_NO-DEFAULT",
"gshut-recv-local-preference": "0",
"holdtime": "720",
"import-policy": "11",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "v4_pyats",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.49.216.179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_pyats",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-bfd": {
"bfd-configuration-state": "disabled",
"bfd-operational-state": "down",
},
"bgp-error": [
{
"name": "Hold Timer Expired Error",
"receive-count": "17",
"send-count": "156",
},
{"name": "Cease", "receive-count": "6", "send-count": "0"},
],
"bgp-option-information": {
"address-families": "inet-unicast inet-labeled-unicast",
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown AddressFamily PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "((LABELSTACK_O2B || HKG-EC_out) && (NEXT-HOP-SELF && HKG-EC_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "10.189.5.252",
"preference": "170",
},
"bgp-output-queue": [
{
"count": "0",
"number": "1",
"rib-adv-nlri": "inet-unicast",
"table-name": "inet.0",
},
{
"count": "0",
"number": "2",
"rib-adv-nlri": "inet-labeled-unicast",
"table-name": "inet.3",
},
],
"bgp-peer-iosession": {
"iosession-state": "Enabled",
"iosession-thread-name": "bgpio-0",
},
"bgp-rib": [
{
"accepted-prefix-count": "684",
"active-prefix-count": "682",
"advertised-prefix-count": "0",
"bgp-rib-state": "BGP restart is complete",
"name": "inet.0",
"received-prefix-count": "684",
"rib-bit": "20000",
"send-state": "in sync",
"suppressed-prefix-count": "0",
},
{
"accepted-prefix-count": "2",
"active-prefix-count": "2",
"advertised-prefix-count": "0",
"bgp-rib-state": "BGP restart is complete",
"name": "inet.3",
"received-prefix-count": "2",
"rib-bit": "30000",
"send-state": "in sync",
"suppressed-prefix-count": "0",
},
],
"active-holdtime": "30",
"peer-id": "10.169.14.240",
"local-id": "10.189.5.252",
"description": "sjkGDS221-EC11",
"entropy-label": "No",
"entropy-label-capability": "Yes",
"entropy-label-no-next-hop-validation": "Yes",
"entropy-label-stitching-capability": "Yes",
"flap-count": "127",
"group-index": "10",
"input-messages": "280022",
"input-octets": "7137084",
"input-refreshes": "0",
"input-updates": "61419",
"keepalive-interval": "10",
"last-checked": "1999164",
"last-error": "Hold Timer Expired Error",
"last-event": "RecvKeepAlive",
"last-flap-event": "HoldTime",
"last-received": "3",
"last-sent": "3",
"last-state": "OpenConfirm",
"local-address": "10.189.5.252+179",
"local-as": "65171",
"local-ext-nh-color-nlri": "inet-unicast",
"nlri-type": "inet-labeled-unicast",
"nlri-type-peer": "inet-unicast inet-labeled-unicast",
"nlri-type-session": "inet-unicast inet-labeled-unicast",
"output-messages": "221176",
"output-octets": "4202359",
"output-refreshes": "0",
"output-updates": "0",
"peer-4byte-as-capability-advertised": "65151",
"peer-addpath-not-supported": True,
"peer-address": "10.169.14.240+60606",
"peer-as": "65151",
"peer-cfg-rti": "master",
"peer-end-of-rib-received": "inet-unicast inet-labeled-unicast",
"peer-end-of-rib-sent": "inet-unicast inet-labeled-unicast",
"peer-flags": "Sync",
"peer-fwd-rti": "master",
"peer-group": "sjkGDS221-EC11",
"peer-index": "0",
"peer-no-llgr-restarter": True,
"peer-no-restart": True,
"peer-refresh-capability": "2",
"peer-restart-flags-received": "Notification",
"peer-restart-nlri-configured": "inet-unicast inet-labeled-unicast",
"peer-restart-nlri-negotiated": "inet-unicast inet-labeled-unicast",
"peer-stale-route-time-configured": "300",
"peer-state": "Established",
"peer-type": "External",
"snmp-index": "15",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(HKG-WC_out && (NEXT-HOP-SELF && HKG-WC_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "obpGCS001-WC11",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.169.14.249",
"peer-as": "65151",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "obpGCS001-WC11",
"peer-state": "Active",
"peer-type": "External",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "ALL_out",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"import-policy": "REJ_LONG_ASPATH",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "cm-hkm003",
"flap-count": "0",
"last-error": "None",
"last-event": "ConnectRetry",
"last-state": "Active",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.5.240+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_RRC_72_SQUARE",
"peer-state": "Connect",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "ALL_out",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"import-policy": "REJ_LONG_ASPATH",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "cm-hkm004",
"flap-count": "0",
"last-error": "None",
"last-event": "ConnectRetry",
"last-state": "Active",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.5.241+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_RRC_72_SQUARE",
"peer-state": "Connect",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && v4_NEXT-HOP-SELF_pyats201) ] Import: [ REJ_LONG_ASPATH",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "cm-hkt003",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.5.242",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_RRC_72_TRIANGLE",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && v4_NEXT-HOP-SELF_pyats201) ] Import: [ REJ_LONG_ASPATH",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "cm-hkt004",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.5.243",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_RRC_72_TRIANGLE",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && v4_NEXT-HOP-SELF_pyats201) ] Import: [ REJ_LONG_ASPATH",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "lg-hkt001",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.5.245",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v4_RRC_72_TRIANGLE",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-bfd": {
"bfd-configuration-state": "disabled",
"bfd-operational-state": "down",
},
"bgp-error": [
{
"name": "Hold Timer Expired Error",
"receive-count": "36",
"send-count": "18",
},
{"name": "Cease", "receive-count": "2", "send-count": "10"},
],
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(v4_WATARI && NEXT-HOP-SELF)",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "10.189.5.252",
"preference": "170",
},
"bgp-output-queue": [
{
"count": "0",
"number": "1",
"rib-adv-nlri": "inet-unicast",
"table-name": "inet.0",
}
],
"bgp-peer-iosession": {
"iosession-state": "Enabled",
"iosession-thread-name": "bgpio-0",
},
"bgp-rib": [
{
"accepted-prefix-count": "682",
"active-prefix-count": "0",
"advertised-prefix-count": "682",
"bgp-rib-state": "BGP restart is complete",
"name": "inet.0",
"received-prefix-count": "682",
"rib-bit": "20001",
"send-state": "in sync",
"suppressed-prefix-count": "0",
}
],
"description": "hktGCS002",
"flap-count": "44",
"group-index": "0",
"input-messages": "110633",
"input-octets": "2104771",
"input-refreshes": "0",
"input-updates": "4",
"keepalive-interval": "20",
"last-checked": "1999134",
"last-error": "Hold Timer Expired Error",
"last-event": "RecvKeepAlive",
"last-flap-event": "RecvNotify",
"last-received": "13",
"last-sent": "3",
"last-state": "OpenConfirm",
"local-address": "10.189.5.252+60144",
"local-as": "65171",
"local-ext-nh-color-nlri": "inet-unicast",
"nlri-type-peer": "inet-unicast",
"nlri-type-session": "inet-unicast",
"output-messages": "171942",
"output-octets": "5078640",
"output-refreshes": "0",
"output-updates": "61307",
"peer-4byte-as-capability-advertised": "65171",
"peer-addpath-not-supported": True,
"active-holdtime": "60",
"peer-id": "10.189.5.253",
"local-id": "10.189.5.252",
"peer-address": "10.189.5.253+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-end-of-rib-received": "inet-unicast",
"peer-end-of-rib-sent": "inet-unicast",
"peer-flags": "Sync",
"peer-fwd-rti": "master",
"peer-group": "hktGCS002",
"peer-index": "0",
"peer-no-llgr-restarter": True,
"peer-no-restart": True,
"peer-refresh-capability": "2",
"peer-restart-flags-received": "Notification",
"peer-restart-nlri-configured": "inet-unicast",
"peer-restart-nlri-negotiated": "inet-unicast",
"peer-stale-route-time-configured": "300",
"peer-state": "Established",
"peer-type": "Internal",
"snmp-index": "0",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && (NEXT-HOP-SELF && HKG-SNG_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "10.189.5.252",
"preference": "170",
},
"description": "sggjbb001",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "10.189.5.252",
"local-as": "65171",
"peer-address": "10.189.6.250",
"peer-as": "65181",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "sggjbb001",
"peer-state": "Active",
"peer-type": "External",
},
{
"bgp-option-information": {
"bgp-options": "Preference LocalAddress HoldTime LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "v6_Kentik_NO-DEFAULT",
"gshut-recv-local-preference": "0",
"holdtime": "720",
"import-policy": "11",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "v6_Kentik",
"flap-count": "0",
"last-error": "None",
"last-event": "ConnectRetry",
"last-state": "Active",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:6be:89bb::1:140+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_Kentik",
"peer-state": "Connect",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-bfd": {
"bfd-configuration-state": "disabled",
"bfd-operational-state": "down",
},
"bgp-error": [
{
"name": "Hold Timer Expired Error",
"receive-count": "24",
"send-count": "171",
},
{"name": "Cease", "receive-count": "5", "send-count": "0"},
],
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(v6_HKG-EC_out && (NEXT-HOP-SELF && v6_HKG-EC_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"bgp-output-queue": [
{
"count": "0",
"number": "3",
"rib-adv-nlri": "inet6-unicast",
"table-name": "inet6.0",
}
],
"bgp-peer-iosession": {
"iosession-state": "Enabled",
"iosession-thread-name": "bgpio-0",
},
"bgp-rib": [
{
"accepted-prefix-count": "0",
"active-prefix-count": "0",
"advertised-prefix-count": "0",
"bgp-rib-state": "BGP restart is complete",
"name": "inet6.0",
"received-prefix-count": "0",
"rib-bit": "40000",
"send-state": "in sync",
"suppressed-prefix-count": "0",
}
],
"description": "sjkGDS221-EC11",
"flap-count": "133",
"group-index": "11",
"input-messages": "218603",
"input-octets": "4153468",
"input-refreshes": "0",
"input-updates": "1",
"keepalive-interval": "10",
"last-checked": "1999159",
"last-error": "Hold Timer Expired Error",
"last-event": "RecvKeepAlive",
"last-flap-event": "HoldTime",
"last-received": "1",
"last-sent": "3",
"last-state": "OpenConfirm",
"local-address": "2001:db8:223c:ca45::b+63754",
"local-as": "65171",
"local-ext-nh-color-nlri": "inet6-unicast",
"nlri-type-peer": "inet6-unicast",
"nlri-type-session": "inet6-unicast",
"output-messages": "221174",
"output-octets": "4202317",
"output-refreshes": "0",
"output-updates": "0",
"peer-4byte-as-capability-advertised": "65151",
"peer-addpath-not-supported": True,
"active-holdtime": "30",
"peer-id": "10.169.14.240",
"local-id": "10.189.5.252",
"peer-address": "2001:db8:eb18:ca45::1+179",
"peer-as": "65151",
"peer-cfg-rti": "master",
"peer-end-of-rib-received": "inet6-unicast",
"peer-end-of-rib-sent": "inet6-unicast",
"peer-flags": "Sync",
"peer-fwd-rti": "master",
"peer-group": "v6_sjkGDS221-EC11",
"peer-index": "0",
"peer-no-llgr-restarter": True,
"peer-no-restart": True,
"peer-refresh-capability": "2",
"peer-restart-flags-received": "Notification",
"peer-restart-nlri-configured": "inet6-unicast",
"peer-restart-nlri-negotiated": "inet6-unicast",
"peer-stale-route-time-configured": "300",
"peer-state": "Established",
"peer-type": "External",
"snmp-index": "16",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(v6_HKG-WC_out && (NEXT-HOP-SELF && v6_HKG-WC_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "obpGCS001-WC11",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:eb18:ca45::11",
"peer-as": "65151",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_obpGCS001-WC11",
"peer-state": "Active",
"peer-type": "External",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && v6_NEXT-HOP-SELF_pyats201)",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "cm-hkt003",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:223c:ca45::7",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_RRC_72_TRIANGLE",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && v6_NEXT-HOP-SELF_pyats201)",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "cm-hkt004",
"flap-count": "0",
"last-error": "None",
"last-event": "ConnectRetry",
"last-state": "Active",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:223c:ca45::8+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_RRC_72_TRIANGLE",
"peer-state": "Connect",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "ALL_out",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "cm-hkm003",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:223c:ca45::9",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_RRC_72_SQUARE",
"peer-state": "Active",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown Cluster PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "ALL_out",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "cm-hkm004",
"flap-count": "0",
"last-error": "None",
"last-event": "ConnectRetry",
"last-state": "Active",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:223c:ca45::a+179",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_RRC_72_SQUARE",
"peer-state": "Connect",
"peer-type": "Internal",
"route-reflector-client": True,
},
{
"bgp-bfd": {
"bfd-configuration-state": "disabled",
"bfd-operational-state": "down",
},
"bgp-error": [
{
"name": "Hold Timer Expired Error",
"receive-count": "40",
"send-count": "27",
},
{"name": "Cease", "receive-count": "0", "send-count": "16"},
],
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Preference LocalAddress HoldTime AuthKey LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(v6_WATARI && NEXT-HOP-SELF)",
"gshut-recv-local-preference": "0",
"holdtime": "60",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"bgp-output-queue": [
{
"count": "0",
"number": "3",
"rib-adv-nlri": "inet6-unicast",
"table-name": "inet6.0",
}
],
"bgp-peer-iosession": {
"iosession-state": "Enabled",
"iosession-thread-name": "bgpio-0",
},
"bgp-rib": [
{
"accepted-prefix-count": "0",
"active-prefix-count": "0",
"advertised-prefix-count": "0",
"bgp-rib-state": "BGP restart is complete",
"name": "inet6.0",
"received-prefix-count": "0",
"rib-bit": "40001",
"send-state": "in sync",
"suppressed-prefix-count": "0",
}
],
"description": "hktGCS002",
"flap-count": "55",
"group-index": "1",
"input-messages": "110662",
"input-octets": "2102633",
"input-refreshes": "0",
"input-updates": "1",
"keepalive-interval": "20",
"last-checked": "16510983",
"last-error": "Hold Timer Expired Error",
"last-event": "RecvKeepAlive",
"last-flap-event": "HoldTime",
"last-received": "6",
"last-sent": "5",
"last-state": "OpenConfirm",
"local-address": "2001:db8:223c:ca45::b+179",
"local-as": "65171",
"local-ext-nh-color-nlri": "inet6-unicast",
"nlri-type-peer": "inet6-unicast",
"nlri-type-session": "inet6-unicast",
"output-messages": "110664",
"output-octets": "2102627",
"output-refreshes": "0",
"output-updates": "0",
"peer-4byte-as-capability-advertised": "65171",
"peer-addpath-not-supported": True,
"active-holdtime": "60",
"peer-id": "10.189.5.253",
"local-id": "10.189.5.252",
"peer-address": "2001:db8:223c:ca45::c+60268",
"peer-as": "65171",
"peer-cfg-rti": "master",
"peer-end-of-rib-received": "inet6-unicast",
"peer-end-of-rib-sent": "inet6-unicast",
"peer-flags": "Sync",
"peer-fwd-rti": "master",
"peer-group": "v6_hktGCS002",
"peer-index": "0",
"peer-no-llgr-restarter": True,
"peer-no-restart": True,
"peer-refresh-capability": "2",
"peer-restart-flags-received": "Notification",
"peer-restart-nlri-configured": "inet6-unicast",
"peer-restart-nlri-negotiated": "inet6-unicast",
"peer-stale-route-time-configured": "300",
"peer-state": "Established",
"peer-type": "Internal",
"snmp-index": "1",
},
{
"bgp-option-information": {
"authentication-configured": True,
"bgp-options": "Multihop Preference LocalAddress HoldTime AuthKey Ttl LogUpDown PeerAS Refresh Confed",
"bgp-options-extended": "GracefulShutdownRcv",
"bgp-options2": True,
"export-policy": "(ALL_out && (NEXT-HOP-SELF && v6_HKG-SNG_AddMED))",
"gshut-recv-local-preference": "0",
"holdtime": "30",
"local-address": "2001:db8:223c:ca45::b",
"preference": "170",
},
"description": "sggjbb001",
"flap-count": "0",
"last-error": "None",
"last-event": "Start",
"last-state": "Idle",
"local-address": "2001:db8:223c:ca45::b",
"local-as": "65171",
"peer-address": "2001:db8:5961:ca45::1",
"peer-as": "65181",
"peer-cfg-rti": "master",
"peer-flags": "",
"peer-fwd-rti": "master",
"peer-group": "v6_sggjbb001",
"peer-state": "Active",
"peer-type": "External",
},
]
}
}
|
<reponame>funjack/launchcontrol
"""Launchcontrol client
The module exposes the Launchcontrol API as a Client object.
Data:
scripttypes -- list of a dictionaries containing supported script formats.
"""
import urllib2
scripttypes = [
{
"name" : "funscript",
"extensions": ["funscript"],
"mediaType" : "application/prs.funscript+json",
},
{
"name" : "raw",
"extensions": ["launch"],
"mediaType" : "application/prs.launchraw+json",
},
{
"name" : "kiiroo",
"extensions": ["kiiroo"],
"mediaType" : "text/prs.kiiroo",
},
{
"name" : "feelme",
"extensions": ["meta"],
"mediaType" : "application/prs.kiiroo+json",
},
{
"name" : "realtouch",
"extensions": ["realtouch", "ott"],
"mediaType" : "text/prs.realtouch",
},
{
"name" : "vorze",
"extensions": ["vorze"],
"mediaType" : "text/prs.vorze",
},
{
"name" : "json",
"extensions": ["json"],
"mediaType" : "application/json",
},
{
"name" : "text",
"extensions": ["txt"],
"mediaType" : "text/plain",
},
{
"name" : "csv",
"extensions": ["csv"],
"mediaType" : "text/csv",
},
]
class NotNowException(Exception):
"""Raise when an operation it not compatible with current state"""
class NotSupportedException(Exception):
"""Raise when the specified type is not supported"""
class Client() :
"""Client communicates with a Launchcontrol server.
Args:
url: Launchcontrol server url
latency: Time adjustment in milliseconds
positionmin: Lowest position in percent the Launch should move to
positionmax: Highest position in percent the Launch should move to
speedmin: Slowest speed in percent the Launch should move at
speedmax: Highest speed in percent the Launch should move to
"""
def __init__ (self, url="http://127.0.0.1:6969", latency=0,
positionmin=0, positionmax=100, speedmin=20, speedmax=100):
self._url = url
self.latency = int(latency)
self.positionMin = int(positionmin)
self.positionMax = int(positionmax)
self.speedMin = int(speedmin)
self.speedMax = int(speedmax)
def Play(self, data, mediaType):
"""Play by sending data as specified mediatype.
Args:
data: Raw script data in bytes
mediaType: Mimetype of the script in data
Raises:
NotSupportedException: The script and or mediaType is not
supported.
"""
if mediaType != "":
params = [ "latency=%d" % self.latency,
"positionmin=%d" % self.positionMin,
"positionmax=%d" % self.positionMax,
"speedmin=%d" % self.speedMin,
"speedmax=%d" % self.speedMax ]
req = urllib2.Request(self._url+'/v1/play?%s' % "&".join(params),
data=data, headers={'Content-Type': mediaType})
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Unsupported Media Type (415): Can't handle script.
if e.code == 415:
raise NotSupportedException("script is not supported")
else:
raise e
def Stop(self):
"""Stop playback.
Raises:
NotNowException: Stop can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/stop')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can pause.
if e.code == 409:
raise NotNowException("cannot stop script now")
else:
raise e
def Pause(self):
"""Pause playback.
Raises:
NotNowException: Pause can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/pause')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can pause.
if e.code == 409:
raise NotNowException("cannot pause script now")
else:
raise e
def Resume(self):
"""Resume playback.
Raises:
NotNowException: Pause can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/resume')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can resume.
if e.code == 409:
raise NotNowException("cannot resume now")
else:
raise e
def Skip(self, time):
"""Skip jumps to a timecode.
Raises:
NotNowException: Skip can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/skip?p=%dms' % time)
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can skip.
if e.code == 409:
raise NotNowException("cannot skip now")
else:
raise e
|
# ===============================================================================
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
import yaml
from traits.api import Property, Instance, List, Either, Int, Float, HasTraits, \
Str, Bool, Button
from traitsui.api import View, Item, UItem, VGroup, HGroup, spring
from traitsui.editors.check_list_editor import CheckListEditor
from traitsui.tabular_adapter import TabularAdapter
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.formatting import floatfmt
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
from pychron.mass_spec.mass_spec_recaller import MassSpecRecaller
from pychron.paths import paths
from pychron.pychron_constants import PLUSMINUS_ONE_SIGMA, NULL_STR
DIFF_TOLERANCE_PERCENT = 0.01
DIFF_TAGS = ('J',
u'J {}'.format(PLUSMINUS_ONE_SIGMA),
'Age',
u'Age W/Jerr {}'.format(PLUSMINUS_ONE_SIGMA),
u'Age {}'.format(PLUSMINUS_ONE_SIGMA),
'40Ar* %',
'Rad4039',
u'Rad4039 {}'.format(PLUSMINUS_ONE_SIGMA),
'Ca37/K39',
'Ca/K',
'Cl38/K39',
'Cl/K',
'Lambda K',
'Lambda Ar37',
'Lambda Ar39',
'Lambda Cl36',
'Ar40 Total',
u'Ar40 Total'.format(PLUSMINUS_ONE_SIGMA),
'Ar40 Bs Corrected',
u'Ar40 Bs Corrected {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar40 Blank',
u'Ar40 Blank {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar40 N', 'Ar40 fN', 'Ar40 Fit', 'Ar40 Filter', 'Ar40 Filter Iter', 'Ar40 Filter SD', 'Ar40 IC',
u'Ar40 IC {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar39 Total',
u'Ar39 Total'.format(PLUSMINUS_ONE_SIGMA),
'Ar39 Bs Corrected',
u'Ar39 Bs Corrected {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar39 Blank',
u'Ar39 Blank {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar39 N', 'Ar39 fN', 'Ar39 Fit', 'Ar39 Filter', 'Ar39 Filter Iter', 'Ar39 Filter SD', 'Ar39 IC',
u'Ar39 IC {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar38 Total',
u'Ar38 Total'.format(PLUSMINUS_ONE_SIGMA),
'Ar38 Bs Corrected',
u'Ar38 Bs Corrected {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar38 Blank',
u'Ar38 Blank {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar38 N', 'Ar38 fN', 'Ar38 Fit', 'Ar38 Filter', 'Ar38 Filter Iter', 'Ar38 Filter SD', 'Ar38 IC',
u'Ar38 IC {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar37 Total',
u'Ar37 Total'.format(PLUSMINUS_ONE_SIGMA),
'Ar37 Bs Corrected',
u'Ar37 Bs Corrected {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar37 Blank',
u'Ar37 Blank {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar37 N', 'Ar37 fN', 'Ar37 Fit', 'Ar37 Filter', 'Ar37 Filter Iter', 'Ar37 Filter SD', 'Ar37 IC',
u'Ar37 IC {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar36 Total',
u'Ar36 Total'.format(PLUSMINUS_ONE_SIGMA),
'Ar36 Bs Corrected',
u'Ar36 Bs Corrected {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar36 Blank',
u'Ar36 Blank {}'.format(PLUSMINUS_ONE_SIGMA),
'Ar36 N', 'Ar36 fN', 'Ar36 Fit', 'Ar36 Filter', 'Ar36 Filter Iter', 'Ar36 Filter SD', 'Ar36 IC',
u'Ar36 IC {}'.format(PLUSMINUS_ONE_SIGMA),
'K4039', 'K3839', 'K3739', 'Ca3937', 'Ca3837', 'Ca3637', 'Cl3638', 'Ca_K', 'Cl_K'
)
class ValueTabularAdapter(TabularAdapter):
columns = [('Name', 'name'),
('Pychron', 'lvalue'),
('Diff', 'diff'),
('MassSpec', 'rvalue'),
('% Dff', 'percent_diff')]
lvalue_width = Int(100)
diff_width = Int(100)
rvalue_width = Int(100)
# name_width = Int(100)
name_width = Int(120)
# name_text = Property
lvalue_text = Property
diff_text = Property
rvalue_text = Property
percent_diff_text = Property
font = '12'
use_bg_color = Bool(True)
def get_bg_color(self, object, trait, row, column=0):
color = 'white'
if self.use_bg_color:
if self.item.enabled:
color = '#FFCCCC'
return color
# def _get_name_text(self):
# return '<b>{}</b'.format(self.item.name)
def _get_percent_diff_text(self):
v = self.item.percent_diff
return self._get_value_text(v, n=5)
def _get_lvalue_text(self):
v = self.item.lvalue
return self._get_value_text(v)
def _get_rvalue_text(self):
v = self.item.rvalue
return self._get_value_text(v)
def _get_value_text(self, v, n=8):
if isinstance(v, float):
v = floatfmt(v, n=n, s=5, use_scientific=True)
return v
def _get_diff_text(self):
v = self.item.diff
if isinstance(v, float):
v = floatfmt(v, n=8, use_scientific=True)
elif isinstance(v, bool):
v = NULL_STR if v else ''
return v
class Value(HasTraits):
name = Str
lvalue = Either(Int, Float)
rvalue = Either(Int, Float)
diff = Property(depends_on='lvalue,rvalue')
enabled = Property(depends_on='lvalue,rvalue')
percent_diff = Property(depends_on='lvalue,rvalue')
def _get_percent_diff(self):
try:
return self.diff / self.lvalue * 100
except ZeroDivisionError:
return 'NaN'
def _get_diff(self):
diff = self.lvalue - self.rvalue
return diff
def _get_enabled(self):
if not self.lvalue and not self.rvalue:
return False
if self.lvalue <= 1e-20 and self.rvalue <= 1e-20:
return False
t = True
d = self.percent_diff
if d != 'NaN':
t = abs(d) > DIFF_TOLERANCE_PERCENT
return t
class StrValue(Value):
lvalue = Str
rvalue = Str
def _get_diff(self):
return self.lvalue != self.rvalue
def _get_enabled(self):
return self.diff
def _get_percent_diff(self):
return ''
class DiffEditor(BaseTraitsEditor):
values = List
recaller = Instance(MassSpecRecaller)
selected_row = Int
# left_baselines = Dict
# right_baselines = Dict
_right = None
basename = Str
diffs_only = Bool(True)
adapter = None
record_id = ''
is_blank = False
is_air = False
diff_tags = List
edit_configuration_button = Button
select_all_button = Button('Select All')
clear_all_button = Button('Clear All')
def setup(self, left):
self.record_id = left.record_id
self.is_blank = self.record_id.startswith('b')
self.is_air = self.record_id.startswith('a')
right = self._find_right(left)
self.adapter = ValueTabularAdapter()
if right:
self._right = right
return True
def set_diff(self, left):
self.name = '{} Diff'.format(left.record_id)
self.basename = left.record_id
right = self._right
isotopes = ['Ar40', 'Ar39', 'Ar38', 'Ar37', 'Ar36']
self._set_values(left, right, isotopes)
def _find_right(self, left):
"""
find corresponding analysis in secondary database
"""
recaller = self.recaller
ln = left.labnumber
aliquot = left.aliquot
if ln.startswith('b'):
aliquot = '-'.join(left.record_id.split('-')[1:])
ln = -1
elif ln.startswith('a'):
aliquot = '-'.join(left.record_id.split('-')[1:])
ln = -2
return recaller.find_analysis(ln, aliquot,
left.step)
def _set_values(self, left, right, isotopes):
vs = []
pfunc = lambda x: lambda n: u'{} {}'.format(x, n)
if not self.is_blank and not self.is_air:
vs.append(Value(name='J',
lvalue=nominal_value(left.j or 0),
rvalue=nominal_value(right.j or 0)))
vs.append(Value(name=u'J {}'.format(PLUSMINUS_ONE_SIGMA),
lvalue=std_dev(left.j or 0),
rvalue=std_dev(right.j or 0)))
vs.append(Value(name='Age',
lvalue=left.age or 0,
rvalue=right.age or 0))
vs.append(Value(name=u'Age W/Jerr {}'.format(PLUSMINUS_ONE_SIGMA),
lvalue=std_dev(left.uage_w_j_err) or 0,
rvalue=right.age_err or 0))
vs.append(Value(name=u'Age {}'.format(PLUSMINUS_ONE_SIGMA),
lvalue=left.age_err or 0,
rvalue=right.age_err_wo_j or 0))
vs.append(Value(name='40Ar* %',
lvalue=nominal_value(left.radiogenic_yield or 0),
rvalue=nominal_value(right.radiogenic_yield or 0)))
vs.append(Value(name='Rad4039',
lvalue=nominal_value(left.uF),
rvalue=nominal_value(right.rad4039)))
vs.append(Value(name=u'Rad4039 {}'.format(PLUSMINUS_ONE_SIGMA),
lvalue=std_dev(left.uF),
rvalue=std_dev(right.rad4039)))
k = left.get_computed_value('k39')
ca = left.get_non_ar_isotope('ca37')
vs.append(Value(name='Ca37/K39', lvalue=nominal_value(ca / k),
rvalue=nominal_value(right.r3739)))
vs.append(Value(name='K/Ca', lvalue=nominal_value(left.kca),
rvalue=nominal_value(right.kca)))
cl = left.get_non_ar_isotope('cl38')
vs.append(Value(name='Cl38/K39', lvalue=nominal_value(cl / k),
rvalue=nominal_value(right.Cl3839)))
vs.append(Value(name='K/Cl', lvalue=nominal_value(left.kcl),
rvalue=nominal_value(right.kcl)))
constants = left.arar_constants
vv = [Value(name=n, lvalue=nominal_value(getattr(constants, k)),
rvalue=nominal_value(getattr(right, k)))
for n, k in (('Lambda K', 'lambda_k'),
('Lambda Ar37', 'lambda_Ar37'),
('Lambda Ar39', 'lambda_Ar39'),
('Lambda Cl36', 'lambda_Cl36'))]
vs.extend(vv)
def filter_str(ii):
fd = ii.filter_outliers_dict.get('filter_outliers')
return 'yes' if fd else 'no'
for a in isotopes:
iso = left.isotopes[a]
riso = right.isotopes[a]
func = pfunc(a)
# mass spec only has baseline corrected intercepts
# mass spec does not propagate baseline error
i = iso.get_baseline_corrected_value(include_baseline_error=False)
ri = riso.baseline_corrected
vs.append(Value(name=func('Bs Corrected'),
lvalue=nominal_value(i),
rvalue=nominal_value(ri)))
vs.append(Value(name=func(PLUSMINUS_ONE_SIGMA), lvalue=std_dev(i), rvalue=std_dev(ri)))
if not self.is_blank:
if iso.decay_corrected:
# baseline, blank corrected, ic_corrected, decay_corrected
i = iso.decay_corrected
else:
# baseline, blank corrected, ic_corrected
i = iso.get_intensity()
ri = riso.total_value
vs.append(Value(name=func('Total'),
lvalue=nominal_value(i),
rvalue=nominal_value(ri)))
vs.append(
Value(name=func(u'Total {}'.format(PLUSMINUS_ONE_SIGMA)), lvalue=std_dev(i), rvalue=std_dev(ri)))
vs.append(Value(name=func('N'), lvalue=iso.n, rvalue=riso.n))
vs.append(Value(name=func('fN'), lvalue=iso.fn, rvalue=riso.fn))
vs.append(StrValue(name=func('Fit'), lvalue=iso.fit.lower(), rvalue=riso.fit.lower()))
vs.append(StrValue(name=func('Filter'), lvalue=filter_str(iso), rvalue=filter_str(riso)))
vs.append(Value(name=func('Filter Iter'), lvalue=iso.filter_outliers_dict.get('iterations', 0),
rvalue=riso.filter_outliers_dict.get('iterations', 0)))
vs.append(Value(name=func('Filter SD'), lvalue=iso.filter_outliers_dict.get('std_devs', 0),
rvalue=riso.filter_outliers_dict.get('std_devs', 0)))
vs.append(Value(name=func('IC'), lvalue=nominal_value(iso.ic_factor),
rvalue=nominal_value(riso.ic_factor)))
vs.append(Value(name=func(u'IC {}'.format(PLUSMINUS_ONE_SIGMA)), lvalue=std_dev(iso.ic_factor),
rvalue=std_dev(riso.ic_factor)))
for a in isotopes:
func = pfunc(a)
baseline = left.isotopes[a].baseline
rbaseline = right.isotopes[a].baseline
vs.append(Value(name=func('Bs'), lvalue=baseline.value, rvalue=rbaseline.value))
vs.append(Value(name=func(u'Bs {}'.format(PLUSMINUS_ONE_SIGMA)), lvalue=baseline.error,
rvalue=rbaseline.error))
vs.append(Value(name=func('Bs N'), lvalue=baseline.n, rvalue=rbaseline.n))
vs.append(Value(name=func('Bs fN'), lvalue=baseline.fn, rvalue=rbaseline.fn))
fv = StrValue(name=func('Bs Filter'), lvalue=filter_str(iso), rvalue=filter_str(iso))
vs.append(fv)
if not (fv.lvalue == 'no' and fv.rvalue == 'no'):
vs.append(Value(name=func('Bs Filter Iter'), lvalue=baseline.filter_outliers_dict.get('iterations'),
rvalue=rbaseline.filter_outliers_dict.get('iterations')))
vs.append(Value(name=func('Bs Filter SD'), lvalue=baseline.filter_outliers_dict.get('std_devs'),
rvalue=rbaseline.filter_outliers_dict.get('std_devs')))
if not self.is_blank:
for a in isotopes:
func = pfunc(a)
iso = left.isotopes[a]
riso = right.isotopes[a]
vs.append(Value(name=func('Blank'), lvalue=iso.blank.value, rvalue=riso.blank.value))
vs.append(Value(name=func(u'Blank {}'.format(PLUSMINUS_ONE_SIGMA)), lvalue=iso.blank.error,
rvalue=riso.blank.error))
rpr = right.production_ratios
for k, v in left.production_ratios.items():
vs.append(Value(name=k, lvalue=nominal_value(v),
rvalue=nominal_value(rpr.get(k, 0))))
rifc = right.interference_corrections
for k, v in left.interference_corrections.items():
vs.append(Value(name=k, lvalue=nominal_value(v),
rvalue=nominal_value(rifc.get(k.lower(), 0))))
self.ovalues = vs
self._diffs_only_changed(self.diffs_only)
def _get_configuration(self):
p = paths.hidden_path('diff_config')
if os.path.isfile(p):
return yload(p)
def _dump_configuration(self):
p = paths.hidden_path('diff_config')
with open(p, 'w') as wfile:
return yaml.dump(self.diff_tags, wfile)
def _edit_configuration_button_fired(self):
v = okcancel_view(VGroup(HGroup(UItem('select_all_button'),
UItem('clear_all_button')),
UItem('diff_tags', style='custom',
editor=CheckListEditor(values=DIFF_TAGS, cols=5))),
title='Configure Diff')
cfg = self._get_configuration()
if cfg is None:
self._select_all_button_fired()
else:
self.diff_tags = cfg
info = self.edit_traits(v)
if info.result:
self._dump_configuration()
self._diffs_only_changed(self.diffs_only)
def _select_all_button_fired(self):
self.diff_tags = list(DIFF_TAGS)
def _clear_all_button_fired(self):
self.diff_tags = []
def _diffs_only_changed(self, new):
cfg = self._get_configuration()
ovs = self.ovalues
if cfg:
ovs = [vi for vi in self.ovalues if vi.name in cfg]
if new:
self.values = [vi for vi in ovs if vi.enabled]
self.adapter.use_bg_color = False
else:
self.adapter.use_bg_color = True
self.values = ovs
def traits_view(self):
v = View(VGroup(
# HGroup(Item('diffs_only'), spring, UItem('record_id', style='readonly'), spring),
HGroup(Item('diffs_only'), icon_button_editor('edit_configuration_button', 'cog'),
spring,
UItem('record_id',
style='readonly'),
spring),
UItem('values', editor=myTabularEditor(adapter=self.adapter,
editable=False,
selected_row='selected_row'))))
return v
# ============= EOF =============================================
|
<reponame>KamilRizatdinov/marvelowe-server
from fastapi.testclient import TestClient
from src.application import app
client = TestClient(app)
def test_get_characters_no_auth(setup_marvel_api):
characher_response = client.get("/characters")
assert characher_response.status_code == 400
def test_get_characters_wrong_auth(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/characters", headers={"Authorization": f"Bearer {token}124"})
assert characher_response.status_code == 400
def test_get_characters(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/characters", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
def test_get_zero_bookmarked_characters(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/characters?onlyBookmarked=true", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
assert characher_response.json()["data"]["results"] == []
def test_get_bookmarked_characters(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
client.post(
"/bookmark/characters/1",
data={"username": "birdi7", "password": "<PASSWORD>"},
headers={"Authorization": f"Bearer {token}"},
)
characher_response = client.get("/characters?onlyBookmarked=true", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
assert characher_response.json()["data"]["results"] == [{"data": "testing information", "id": 1, "bookmark": True}]
def test_get_character(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/characters/1", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
def test_get_comics(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/comics", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
def test_get_comic(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
characher_response = client.get("/comics/1", headers={"Authorization": f"Bearer {token}"})
assert characher_response.status_code == 200
def test_get_zero_bookmarked_comics(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
comics_response = client.get("/comics?onlyBookmarked=true", headers={"Authorization": f"Bearer {token}"})
assert comics_response.status_code == 200
assert comics_response.json()["data"]["results"] == []
def test_get_bookmarked_comics(setup_marvel_api):
client.post("/register", data={"username": "birdi7", "password": "<PASSWORD>"})
response = client.post("/token", data={"username": "birdi7", "password": "<PASSWORD>"})
token = response.json()["access_token"]
client.post(
"/bookmark/comics/1",
data={"username": "birdi7", "password": "<PASSWORD>"},
headers={"Authorization": f"Bearer {token}"},
)
comic_response = client.get("/comics?onlyBookmarked=true", headers={"Authorization": f"Bearer {token}"})
assert comic_response.status_code == 200
assert comic_response.json()["data"]["results"] == [{"data": "testing information", "id": 1, "bookmark": True}]
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" LSF job adaptor implementation
"""
import radical.utils.which
import radical.utils.threads as sut
import saga.url as surl
import saga.utils.pty_shell
import saga.adaptors.base
import saga.adaptors.cpi.job
from saga.job.constants import *
import re
import os
import time
import threading
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
SYNC_WAIT_UPDATE_INTERVAL = 1 # seconds
MONITOR_UPDATE_INTERVAL = 3 # seconds
# --------------------------------------------------------------------
#
class _job_state_monitor(threading.Thread):
""" thread that periodically monitors job states
"""
def __init__(self, job_service):
self.logger = job_service._logger
self.js = job_service
self._stop = sut.Event()
super(_job_state_monitor, self).__init__()
self.setDaemon(True)
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
while self.stopped() is False:
try:
# do bulk updates here! we don't want to pull information
# job by job. that would be too inefficient!
jobs = self.js.jobs
job_keys = jobs.keys()
for job in job_keys:
# if the job hasn't been started, we can't update its
# state. we can tell if a job has been started if it
# has a job id
if jobs[job].get ('job_id', None) is not None:
# we only need to monitor jobs that are not in a
# terminal state, so we can skip the ones that are
# either done, failed or canceled
state = jobs[job]['state']
if (state != saga.job.DONE) and (state != saga.job.FAILED) and (state != saga.job.CANCELED):
job_info = self.js._job_get_info(job)
self.logger.info("Job monitoring thread updating Job %s (state: %s)" % (job, job_info['state']))
if job_info['state'] != jobs[job]['state']:
# fire job state callback if 'state' has changed
job._api()._attributes_i_set('state', job_info['state'], job._api()._UP, True)
# update job info
self.js.jobs[job] = job_info
time.sleep(MONITOR_UPDATE_INTERVAL)
except Exception as e:
self.logger.warning("Exception caught in job monitoring thread: %s" % e)
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
""" loggs an 'error' message and subsequently throws an exception
"""
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _lsf_to_saga_jobstate(lsfjs):
""" translates a lsf one-letter state to saga
"""
if lsfjs in ['RUN']:
return saga.job.RUNNING
elif lsfjs in ['WAIT', 'PEND']:
return saga.job.PENDING
elif lsfjs in ['DONE']:
return saga.job.DONE
elif lsfjs in ['UNKNOWN', 'ZOMBI', 'EXIT']:
return saga.job.FAILED
elif lsfjs in ['USUSP', 'SSUSP', 'PSUSP']:
return saga.job.SUSPENDED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _lsfcript_generator(url, logger, jd, ppn, lsf_version, queue=None, ):
""" generates an LSF script from a SAGA job description
"""
lsf_params = str()
exec_n_args = str()
if jd.executable is not None:
exec_n_args += "%s " % (jd.executable)
if jd.arguments is not None:
for arg in jd.arguments:
exec_n_args += "%s " % (arg)
if jd.name is not None:
lsf_params += "#BSUB -J %s \n" % jd.name
if jd.environment is not None:
env_variable_list = "export "
for key in jd.environment.keys():
env_variable_list += " %s=%s " % (key, jd.environment[key])
else:
env_variable_list = ""
# a workaround is to do an explicit 'cd'
if jd.working_directory is not None:
lsf_params += "#BSUB -cwd %s \n" % jd.working_directory
if jd.output is not None:
# if working directory is set, we want stdout to end up in
# the working directory as well, unless it containes a specific
# path name.
if jd.working_directory is not None:
if os.path.isabs(jd.output):
lsf_params += "#BSUB -o %s \n" % jd.output
else:
# user provided a relative path for STDOUT. in this case
# we prepend the working directory path before passing
# it on to LSF.
lsf_params += "#BSUB -o %s/%s \n" % (jd.working_directory, jd.output)
else:
lsf_params += "#BSUB -o %s \n" % jd.output
if jd.error is not None:
# if working directory is set, we want stderr to end up in
# the working directory as well, unless it contains a specific
# path name.
if jd.working_directory is not None:
if os.path.isabs(jd.error):
lsf_params += "#BSUB -e %s \n" % jd.error
else:
# user provided a relative path for STDERR. in this case
# we prepend the working directory path before passing
# it on to LSF.
lsf_params += "#BSUB -e %s/%s \n" % (jd.working_directory, jd.error)
else:
lsf_params += "#BSUB -e %s \n" % jd.error
if jd.wall_time_limit is not None:
hours = jd.wall_time_limit / 60
minutes = jd.wall_time_limit % 60
lsf_params += "#BSUB -W %s:%s \n" \
% (str(hours), str(minutes))
if (jd.queue is not None) and (queue is not None):
lsf_params += "#BSUB -q %s \n" % queue
elif (jd.queue is not None) and (queue is None):
lsf_params += "#BSUB -q %s \n" % jd.queue
elif (jd.queue is None) and (queue is not None):
lsf_params += "#BSUB -q %s \n" % queue
if jd.project is not None:
lsf_params += "#BSUB -P %s \n" % str(jd.project)
if jd.job_contact is not None:
lsf_params += "#BSUB -U %s \n" % str(jd.job_contact)
# if total_cpu_count is not defined, we assume 1
if jd.total_cpu_count is None:
jd.total_cpu_count = 1
lsf_params += "#BSUB -n %s \n" % str(jd.total_cpu_count)
#tcc = int(jd.total_cpu_count)
#tbd = float(tcc) / float(ppn)
#if float(tbd) > int(tbd):
# lsf_params += "#PBS -l nodes=%s:ppn=%s \n" \
# % (str(int(tbd) + 1), ppn)
#else:
# lsf_params += "#PBS -l nodes=%s:ppn=%s \n" \
# % (str(int(tbd)), ppn)
# escape all double quotes and dollarsigns, otherwise 'echo |'
# further down won't work
# only escape '$' in args and exe. not in the params
#exec_n_args = workdir_directives exec_n_args
exec_n_args = exec_n_args.replace('$', '\\$')
lsfscript = "\n#!/bin/bash \n%s\n%s\n%s" % (lsf_params, env_variable_list, exec_n_args)
lsfscript = lsfscript.replace('"', '\\"')
return lsfscript
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.lsfjob"
_ADAPTOR_SCHEMAS = ["lsf", "lsf+ssh", "lsf+gsissh"]
_ADAPTOR_OPTIONS = []
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.SPMD_VARIATION, # TODO: 'hot'-fix for BigJob
saga.job.PROCESSES_PER_HOST,
saga.job.TOTAL_CPU_COUNT],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"callbacks": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "<PASSWORD>/password <PASSWORD> (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The LSF adaptor allows to run and manage jobs on `LSF <https://en.wikipedia.org/wiki/Platform_LSF>`_
controlled HPC clusters.
""",
"example": "examples/jobs/lsfjob.py",
"schemas": {"lsf": "connect to a local cluster",
"lsf+ssh": "conenct to a remote cluster via SSH",
"lsf+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"capabilities": _ADAPTOR_CAPABILITIES,
"cpis": [
{
"type": "saga.job.Service",
"class": "LSFJobService"
},
{
"type": "saga.job.Job",
"class": "LSFJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config (_ADAPTOR_NAME)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class LSFJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._mt = None
_cpi_base = super(LSFJobService, self)
_cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.close()
# ----------------------------------------------------------------
#
def close(self):
if self.mt :
self.mt.stop()
self.mt.join(10) # don't block forever on join()
self._logger.info("Job monitoring thread stopped.")
self.finalize(True)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.ppn = 1
self.queue = None
self.shell = None
self.jobs = dict()
# the monitoring thread - one per service instance
self.mt = _job_state_monitor(job_service=self)
self.mt.start()
rm_scheme = rm_url.scheme
pty_url = surl.Url (rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query is not None:
for key, val in parse_qs(rm_url.query).iteritems():
if key == 'queue':
self.queue = val[0]
# we need to extrac the scheme for PTYShell. That's basically the
# job.Serivce Url withou the lsf+ part. We use the PTYShell to execute
# lsf commands either locally or via gsissh or ssh.
if rm_scheme == "lsf":
pty_url.scheme = "fork"
elif rm_scheme == "lsf+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "lsf+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with LSF.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note avaialbe.
self._commands = {'bqueues': None,
'bjobs': None,
'bsub': None,
'bkill': None}
self.shell = saga.utils.pty_shell.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required lsf tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Couldn't find LSF tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
ret, out, _ = self.shell.run_sync("%s -V" % cmd)
if ret != 0:
message = "Couldn't find LSF tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# version is reported as: "version: x.y.z"
version = out.split("\n")[0]
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found LSF tools: %s" % self._commands)
# see if we can get some information about the cluster, e.g.,
# different queues, number of processes per node, etc.
# TODO: this is quite a hack. however, it *seems* to work quite
# well in practice.
#ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "(np|pcpu)"' % \
# self._commands['pbsnodes']['path'])
#if ret != 0:
#
# message = "Error running pbsnodes: %s" % out
# log_error_and_raise(message, saga.NoSuccess, self._logger)
#else:
# this is black magic. we just assume that the highest occurence
# of a specific np is the number of processors (cores) per compute
# node. this equals max "PPN" for job scripts
# ppn_list = dict()
# for line in out.split('\n'):
# np = line.split(' = ')
# if len(np) == 2:
# np = np[1].strip()
# if np in ppn_list:
# ppn_list[np] += 1
# else:
# ppn_list[np] = 1
# self.ppn = max(ppn_list, key=ppn_list.get)
# self._logger.debug("Found the following 'ppn' configurations: %s. \
#Using %s as default ppn."
# % (ppn_list, self.ppn))
# ----------------------------------------------------------------
#
def _job_run(self, job_obj):
""" runs a job via qsub
"""
# get the job description
jd = job_obj.jd
# normalize working directory path
if jd.working_directory :
jd.working_directory = os.path.normpath (jd.working_directory)
if (self.queue is not None) and (jd.queue is not None):
self._logger.warning("Job service was instantiated explicitly with \
'queue=%s', but job description tries to a differnt queue: '%s'. Using '%s'." %
(self.queue, jd.queue, self.queue))
try:
# create an LSF job script from SAGA job description
script = _lsfcript_generator(url=self.rm, logger=self._logger,
jd=jd, ppn=self.ppn,
lsf_version=self._commands['bjobs']['version'],
queue=self.queue,
)
self._logger.info("Generated LSF script: %s" % script)
except Exception, ex:
log_error_and_raise(str(ex), saga.BadParameter, self._logger)
# try to create the working directory (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory is not None:
self._logger.info("Creating working directory %s" % jd.working_directory)
ret, out, _ = self.shell.run_sync("mkdir -p %s" % (jd.working_directory))
if ret != 0:
# something went wrong
message = "Couldn't create working directory - %s" % (out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated PBS script into it
# (2) we call 'qsub <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-LSFJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s < $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['bsub']['path'])
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
message = "Error running job via 'bsub': %s. Commandline was: %s" \
% (out, cmdline)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the job id. bsub's output looks like this:
# Job <901545> is submitted to queue <regular>
lines = out.split("\n")
lines = filter(lambda lines: lines != '', lines) # remove empty
self._logger.info('bsub: %s' % ''.join(lines))
lsf_job_id = None
for line in lines:
if re.search('Job <.+> is submitted to queue', line):
lsf_job_id = re.findall(r'<(.*?)>', line)[0]
break
if not lsf_job_id:
raise Exception("Failed to detect job id after submission.")
job_id = "[%s]-[%s]" % (self.rm, lsf_job_id)
self._logger.info("Submitted LSF job with id: %s" % job_id)
# update job dictionary
self.jobs[job_obj]['job_id'] = job_id
self.jobs[job_obj]['submitted'] = job_id
# set status to 'pending' and manually trigger callback
#self.jobs[job_obj]['state'] = saga.job.PENDING
#job_obj._api()._attributes_i_set('state', self.jobs[job_obj]['state'], job_obj._api()._UP, True)
# return the job id
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s -noheader -o 'stat exec_host exit_code submit_time start_time finish_time delimiter=\",\"' %s" % (self._commands['bjobs']['path'], pid))
if ret != 0:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# the job seems to exist on the backend. let's gather some data
job_info = {
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
results = out.split(',')
job_info['state'] = _lsf_to_saga_jobstate(results[0])
job_info['exec_hosts'] = results[1]
if results[2] != '-':
job_info['returncode'] = int(results[2])
job_info['create_time'] = results[3]
job_info['start_time'] = results[4]
job_info['end_time'] = results[5]
return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_obj):
""" get job attributes via bjob
"""
# if we don't have the job in our dictionary, we don't want it
if job_obj not in self.jobs:
message = "Unknown job object: %s. Can't update state." % job_obj._id
log_error_and_raise(message, saga.NoSuccess, self._logger)
# prev. info contains the info collect when _job_get_info
# was called the last time
prev_info = self.jobs[job_obj]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if prev_info['gone'] is True:
return prev_info
# curr. info will contain the new job info collect. it starts off
# as a copy of prev_info (don't use deepcopy because there is an API
# object in the dict -> recursion)
curr_info = dict()
curr_info['job_id' ] = prev_info.get ('job_id' )
curr_info['state' ] = prev_info.get ('state' )
curr_info['exec_hosts' ] = prev_info.get ('exec_hosts' )
curr_info['returncode' ] = prev_info.get ('returncode' )
curr_info['create_time'] = prev_info.get ('create_time')
curr_info['start_time' ] = prev_info.get ('start_time' )
curr_info['end_time' ] = prev_info.get ('end_time' )
curr_info['gone' ] = prev_info.get ('gone' )
rm, pid = self._adaptor.parse_id(job_obj._id)
# run the LSF 'bjobs' command to get some infos about our job
# the result of bjobs <id> looks like this:
#
# JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME
# 901545 oweidne DONE regular yslogin5-ib ys3833-ib *FILENAME Nov 11 12:06
#
# If we add the -nodeader flag, the first row is ommited
ret, out, _ = self.shell.run_sync("%s -noheader %s" % (self._commands['bjobs']['path'], pid))
if ret != 0:
if ("Illegal job ID" in out):
# Let's see if the previous job state was running or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
curr_info['gone'] = True
# we can also set the end time
self._logger.warning("Previously running job has disappeared. This probably means that the backend doesn't store informations about finished jobs. Setting state to 'DONE'.")
if prev_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
curr_info['state'] = saga.job.DONE
else:
curr_info['state'] = saga.job.FAILED
else:
# something went wrong
message = "Error retrieving job info via 'bjobs': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the result
results = out.split()
curr_info['state'] = _lsf_to_saga_jobstate(results[2])
curr_info['exec_hosts'] = results[5]
# return the new job info dict
return curr_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_obj):
""" get the job's state
"""
return self.jobs[job_obj]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_obj):
""" get the job's exit code
"""
ret = self.jobs[job_obj]['returncode']
# FIXME: 'None' should cause an exception
if ret == None : return None
else : return int(ret)
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_obj):
""" get the job's exit code
"""
return self.jobs[job_obj]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_obj):
""" get the job's creation time
"""
return self.jobs[job_obj]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_obj):
""" get the job's start time
"""
return self.jobs[job_obj]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_obj):
""" get the job's end time
"""
return self.jobs[job_obj]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_obj):
""" cancel the job via 'qdel'
"""
rm, pid = self._adaptor.parse_id(job_obj._id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['qdel']['path'], pid))
if ret != 0:
message = "Error canceling job via 'qdel': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceled
self.jobs[job_obj]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_obj, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_obj._id)
while True:
#state = self._job_get_state(job_id=job_id, job_obj=job_obj)
state = self.jobs[job_obj]['state'] # this gets updated in the bg.
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(SYNC_WAIT_UPDATE_INTERVAL)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
# create a new job object
job_obj = saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# add job to internal list of known jobs.
self.jobs[job_obj._adaptor] = {
'state': saga.job.NEW,
'job_id': None,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False,
'submitted': False
}
return job_obj
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, jobid):
""" Implements saga.adaptors.cpi.job.Service.get_job()
"""
# try to get some information about this job
job_info = self._retrieve_job(jobid)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": jobid
}
job = saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# throw it into our job dictionary.
self.jobs[job._adaptor] = job_info
return job
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("%s -a" % self._commands['bjobs']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'bjobs': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
jobid = "[%s]-[%s]" % (self.rm, line.split()[0].split('.')[0])
ids.append(str(jobid))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class LSFJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
_cpi_base = super(LSFJob, self)
_cpi_base.__init__(api, adaptor)
def _get_impl(self):
return self
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" implements saga.adaptors.cpi.job.Job.get_state()
"""
return self.js._job_get_state(job_obj=self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(job_obj=self, timeout=timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self)
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self)
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
from __future__ import print_function, division
import sys, os, os.path, itertools
pkg_dir = os.path.dirname(os.path.realpath(__file__)) + '/../../'
sys.path.append(pkg_dir)
from scipy.stats import poisson
from MPBNP import *
from MPBNP import BaseSampler, BasePredictor
from transforms import *
np.set_printoptions(suppress=True)
class Gibbs(BaseSampler):
V_SCALE = 0
H_SCALE = 1
V_TRANS = 2
H_TRANS = 3
NUM_TRANS = 4
def __init__(self, cl_mode = True, cl_device = None, record_best = True,
alpha = None, lam = 0.98, theta = 0.10, epislon = 0.02, init_k = 10):
"""Initialize the class.
"""
BaseSampler.__init__(self, cl_mode = cl_mode, cl_device = cl_device, record_best = record_best)
if cl_mode:
program_str = open(pkg_dir + 'MPBNP/tibp/kernels/tibp_noisyor_cl.c', 'r').read()
self.prg = cl.Program(self.ctx, program_str).build()
self.alpha = alpha # tendency to generate new features
self.k = init_k # initial number of features
self.theta = theta # prior probability that a pixel is on in a feature image
self.lam = lam # effecacy of a feature
self.epislon = epislon # probability that a pixel is on by change in an actual image
self.phi = 0.9 # prior probability that no transformation is applied
self.samples = {'z': [], 'y': [], 'r': []} # sample storage, to be pickled
def read_csv(self, filepath, header=True):
"""Read the data from a csv file.
"""
BaseSampler.read_csv(self, filepath, header)
# convert the data to the appropriate formats
self.new_obs = []
self.img_w, self.img_h = None, None
for row in self.obs:
if self.img_w is None:
self.img_w = int(row[0])
if self.img_w == 0 or (len(row)-1) % self.img_w != 0:
raise Exception('The sampler does not understand the format of the data. Did you forget to specify image width in the data file?')
self.new_obs.append([int(_) for _ in row])
self.obs = np.array(self.new_obs)[:,1:]
if self.cl_mode:
self.d_obs = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=self.obs.astype(np.int32))
# self.d is the length of the flattened vectors
self.d = self.obs.shape[1]
self.img_h = int(self.d / self.img_w)
self.alpha = self.N
return
def direct_read_obs(self, obs):
"""Read the data from a numpy array.
"""
BaseSampler.direct_read_obs(self, obs)
self.d = self.obs.shape[1]
def do_inference(self, init_y = None, init_z = None, init_r = None, output_file = None):
"""Perform inference on the given observations assuming data are generated by an IBP model
with noisy-or as the likelihood function.
@param init_y: An initial feature image matrix, where values are 0 or 1
@param init_z: An initial feature ownership matrix, where values are 0 or 1
"""
BaseSampler.do_inference(self, output_file=None)
if init_y is None:
init_y = np.random.randint(0, 2, (self.k, self.d))
else:
assert(type(init_y) is np.ndarray)
assert(init_y.shape == (self.k, self.d))
if init_z is None:
init_z = np.random.randint(0, 2, (len(self.obs), self.k))
else:
assert(type(init_z) is np.ndarray)
assert(init_z.shape == (len(self.obs), self.k))
if init_r is None:
init_r = np.empty(shape = (self.N, self.k, self.NUM_TRANS), dtype=np.int32)
init_r[:,:,self.V_SCALE] = 0
init_r[:,:,self.H_SCALE] = 0
init_r[:,:,self.V_TRANS] = np.random.randint(0, 2, (self.N, self.k))
init_r[:,:,self.H_TRANS] = np.random.randint(0, 2, (self.N, self.k))
else:
assert(init_r is None)
if self.cl_mode:
timing_stats = self._cl_infer_yzr(init_y, init_z, init_r)
else:
timing_stats = self._infer_yzr(init_y, init_z, init_r)
# report the results
if output_file is sys.stdout:
if self.record_best:
final_y, final_z, final_r = self.best_sample[0]
num_of_feats = final_z.shape[1]
print('parameter,value',
'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,
'epislon,%f' % self.epislon, 'phi,%f' % self.phi, 'inferred_K,%d' % num_of_feats,
'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],
file = output_file, sep = '\n')
np.savetxt(output_file, final_z, fmt="%d", comments='', delimiter=',',
header=','.join(['feature%d' % _ for _ in range(num_of_feats)]))
for k in xrange(num_of_feats):
print('Feature %d\n---------' % k, file = output_file)
np.savetxt(output_file, final_y[k].reshape(self.img_w, self.img_h),
fmt="%d", delimiter=',')
print('object', 'feature', 'v_scale', 'h_scale', 'v_translation', 'h_translation',
file=output_file, sep=',')
for n in xrange(self.N):
for k in xrange(num_of_feats):
print(n, k, *final_r[n,k], file=output_file, sep=',')
else:
if self.record_best:
final_y, final_z, final_r = self.best_sample[0]
num_of_feats = final_z.shape[1]
try: os.mkdir(output_file)
except: pass
print('parameter,value',
'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,
'epislon,%f' % self.epislon, 'phi,%f' % self.phi, 'inferred_K,%d' % num_of_feats,
'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],
file = gzip.open(output_file + 'parameters.csv.gz', 'w'), sep = '\n')
np.savetxt(gzip.open(output_file + 'feature_ownership.csv.gz', 'w'), final_z,
fmt="%d", comments='', delimiter=',',
header=','.join(['feature%d' % _ for _ in range(num_of_feats)]))
for k in xrange(num_of_feats):
np.savetxt(gzip.open(output_file + 'feature_%d_image.csv.gz' % k, 'w'),
final_y[k].reshape(self.img_w, self.img_h), fmt="%d", delimiter=',')
transform_fp = gzip.open(output_file + 'transformations.csv.gz', 'w')
print('object', 'feature', 'v_scale', 'h_scale', 'v_translation', 'h_translation',
file = transform_fp, sep=',')
for n in xrange(self.N):
for k in xrange(num_of_feats):
print(n, k, *final_r[n,k], file=transform_fp, sep=',')
transform_fp.close()
else:
try: os.mkdir(output_file)
except: pass
print('parameter,value',
'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,
'epislon,%f' % self.epislon, 'phi,%f' % self.phi,
'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],
file = gzip.open(output_file + 'parameters.csv.gz', 'w'), sep = '\n')
np.savez_compressed(output_file + 'feature_ownership.npz', self.samples['z'])
np.savez_compressed(output_file + 'feature_images.npz', self.samples['y'])
np.savez_compressed(output_file + 'transformations.npz', self.samples['r'])
return timing_stats
def _infer_yzr(self, init_y, init_z, init_r):
"""Wrapper function to start the inference on y, z and r.
This function is not supposed to directly invoked by an end user.
@param init_y: Passed in from do_inference()
@param init_z: Passed in from do_inference()
@param init_r: Passed in from do_inference()
"""
cur_y = init_y
cur_z = init_z
cur_r = init_r
a_time = time()
if self.record_best: self.auto_save_sample(sample = (cur_y, cur_z, cur_r))
for i in xrange(self.niter):
temp_cur_y = self._infer_y(cur_y, cur_z, cur_r)
temp_cur_y, temp_cur_z, temp_cur_r = self._infer_z(temp_cur_y, cur_z, cur_r)
temp_cur_r = self._infer_r(temp_cur_y, temp_cur_z, temp_cur_r)
if self.record_best:
if self.auto_save_sample(sample = (temp_cur_y, temp_cur_z, temp_cur_r)):
cur_y, cur_z, cur_r = temp_cur_y, temp_cur_z, temp_cur_r
if self.no_improvement(1000):
break
elif i >= self.burnin:
cur_y, cur_z, cur_r = temp_cur_y, temp_cur_z, temp_cur_r
self.samples['z'].append(cur_z)
self.samples['y'].append(cur_y)
self.samples['r'].append(cur_r)
self.total_time += time() - a_time
return self.gpu_time, self.total_time, None
def _infer_y(self, cur_y, cur_z, cur_r):
"""Infer feature images
"""
# calculate the prior probability that a pixel is on
y_on_log_prob = np.log(self.theta) * np.ones(cur_y.shape)
y_off_log_prob = np.log(1. - self.theta) * np.ones(cur_y.shape)
# calculate the likelihood
on_loglik = np.empty(cur_y.shape)
off_loglik = np.empty(cur_y.shape)
for row in xrange(cur_y.shape[0]):
affected_data_index = np.where(cur_z[:,row] == 1)
for col in xrange(cur_y.shape[1]):
old_value = cur_y[row, col]
cur_y[row, col] = 1
on_loglik[row, col] = self._loglik_nth(cur_y, cur_z, cur_r, n = affected_data_index)
cur_y[row, col] = 0
off_loglik[row, col] = self._loglik_nth(cur_y, cur_z, cur_r, n = affected_data_index)
cur_y[row, col] = old_value
# add to the prior
y_on_log_prob += on_loglik
y_off_log_prob += off_loglik
ew_max = np.maximum(y_on_log_prob, y_off_log_prob)
y_on_log_prob -= ew_max
y_off_log_prob -= ew_max
# normalize
y_on_prob = np.exp(y_on_log_prob) / (np.exp(y_on_log_prob) + np.exp(y_off_log_prob))
cur_y = np.random.binomial(1, y_on_prob)
return cur_y
def _infer_z(self, cur_y, cur_z, cur_r):
"""Infer feature ownership
"""
N = float(len(self.obs))
z_col_sum = cur_z.sum(axis = 0)
# calculate the IBP prior on feature ownership for existing features
m_minus = z_col_sum - cur_z
on_prob = m_minus / N
off_prob = 1 - m_minus / N
# add loglikelihood of data
for row in xrange(cur_z.shape[0]):
for col in xrange(cur_z.shape[1]):
old_value = cur_z[row, col]
cur_z[row, col] = 1
on_prob[row, col] = on_prob[row, col] * np.exp(self._loglik_nth(cur_y, cur_z, cur_r, n = row))
cur_z[row, col] = 0
off_prob[row, col] = off_prob[row, col] * np.exp(self._loglik_nth(cur_y, cur_z, cur_r, n = row))
cur_z[row, col] = old_value
# normalize the probability
on_prob = on_prob / (on_prob + off_prob)
# sample the values
cur_z = np.random.binomial(1, on_prob)
# sample new features use importance sampling
k_new = self._sample_k_new(cur_y, cur_z, cur_r)
if k_new:
cur_y, cur_z, cur_r = k_new
# delete empty feature images
non_empty_feat_img = np.where(cur_y.sum(axis = 1) > 0)
cur_y = cur_y[non_empty_feat_img[0],:]
cur_z = cur_z[:,non_empty_feat_img[0]]
cur_r = np.array([_[non_empty_feat_img[0],:] for _ in cur_r])
# delete null features
active_feat_col = np.where(cur_z.sum(axis = 0) > 0)
cur_z = cur_z[:,active_feat_col[0]]
cur_y = cur_y[active_feat_col[0],:]
cur_r = np.array([_[active_feat_col[0],:] for _ in cur_r])
# update self.k
self.k = cur_z.shape[1]
return cur_y, cur_z, cur_r
def _infer_r(self, cur_y, cur_z, cur_r):
"""Infer transformations.
"""
rand_v = np.random.randint(0, self.img_h, size=(cur_z.shape[0], cur_z.shape[1]))
rand_h = np.random.randint(0, self.img_w, size=(cur_z.shape[0], cur_z.shape[1]))
rand_v_scale = np.random.randint(-self.img_h+2, self.img_h, size=(cur_z.shape[0], cur_z.shape[1]))
rand_h_scale = np.random.randint(-self.img_w+2, self.img_w, size=(cur_z.shape[0], cur_z.shape[1]))
# iterate over each transformation and resample it
for nth_img in xrange(cur_r.shape[0]):
for kth_feature in xrange(cur_r.shape[1]):
old_loglik = self._loglik_nth(cur_y, cur_z, cur_r, n=nth_img)
# resample vertical translation
old_v_trans = cur_r[nth_img, kth_feature, self.V_TRANS]
# set a new vertical transformation
cur_r[nth_img, kth_feature, self.V_TRANS] = rand_v[nth_img, kth_feature] #np.random.randint(0, self.img_h)
old_logprior = np.log(abs((old_v_trans > 0) - self.phi))
new_logprior = np.log(abs((rand_v[nth_img, kth_feature] > 0) - self.phi))
new_loglik = self._loglik_nth(cur_y, cur_z, cur_r, n = nth_img)
move_prob = 1 / (1 + np.exp(old_loglik + old_logprior - new_loglik - new_logprior))
if random.random() > move_prob: # revert changes if move_prob too small
cur_r[nth_img, kth_feature, self.V_TRANS] = old_v_trans
else:
old_loglik = new_loglik
# resample horizontal translation
old_h_trans = cur_r[nth_img, kth_feature, self.H_TRANS]
# set a new vertical transformation
cur_r[nth_img, kth_feature, self.H_TRANS] = rand_h[nth_img, kth_feature]
old_logprior = np.log(abs((old_h_trans > 0) - self.phi))
new_logprior = np.log(abs((rand_h[nth_img, kth_feature] > 0) - self.phi))
new_loglik = self._loglik_nth(cur_y, cur_z, cur_r, n = nth_img)
move_prob = 1 / (1 + np.exp(old_loglik + old_logprior - new_loglik - new_logprior))
if random.random() > move_prob: # revert changes if move_prob too small
cur_r[nth_img, kth_feature, self.H_TRANS] = old_h_trans
else:
old_loglik = new_loglik
# resample scale percentage
old_v_scale = cur_r[nth_img, kth_feature, self.V_SCALE]
# set a new vertical scale
cur_r[nth_img, kth_feature, self.V_SCALE] = rand_v_scale[nth_img, kth_feature]
old_logprior = np.log(abs((old_v_scale > 0) - self.phi))
new_logprior = np.log(abs((rand_v_scale[nth_img, kth_feature] > 0) - self.phi))
new_loglik = self._loglik_nth(cur_y, cur_z, cur_r, n = nth_img)
move_prob = 1 / (1 + np.exp(old_loglik + old_logprior - new_loglik - new_logprior))
if random.random() > move_prob: # revert changes if move_prob too small
cur_r[nth_img, kth_feature, self.V_SCALE] = old_v_scale
else:
old_loglik = new_loglik
# resample scale percentage
old_h_scale = cur_r[nth_img, kth_feature, self.H_SCALE]
# set a new horizontal scale
cur_r[nth_img, kth_feature, self.H_SCALE] = rand_h_scale[nth_img, kth_feature]
old_logprior = np.log(abs((old_h_scale > 0) - self.phi))
new_logprior = np.log(abs((rand_h_scale[nth_img, kth_feature] > 0) - self.phi))
new_loglik = self._loglik_nth(cur_y, cur_z, cur_r, n = nth_img)
move_prob = 1 / (1 + np.exp(old_loglik + old_logprior - new_loglik - new_logprior))
if random.random() > move_prob: # revert changes if move_prob too small
cur_r[nth_img, kth_feature, self.H_SCALE] = old_h_scale
return cur_r
def _sample_k_new(self, cur_y, cur_z, cur_r):
"""Sample new features for all rows using Metropolis hastings.
(This is a heuristic strategy aiming for easy parallelization in an
equivalent GPU implementation. We here have effectively treated the
current Z as a snapshot frozen in time, and each new k is based on
this frozen snapshot of Z. In a more correct procedure, we should
go through the rows and sample k new for each row given all previously
sampled new ks.)
"""
N = float(len(self.obs))
#old_loglik = self._loglik(cur_y, cur_z, cur_r)
k_new_count = np.random.poisson(self.alpha / N)
if k_new_count == 0: return False
# modify the feature ownership matrix
cur_z_new = np.hstack((cur_z, np.random.randint(0, 2, size = (cur_z.shape[0], k_new_count))))
#cur_z_new[:, [xrange(-k_new_count,0)]] = 1
# propose feature images by sampling from the prior distribution
cur_y_new = np.vstack((cur_y, np.random.binomial(1, self.theta, (k_new_count, self.d))))
cur_r_new = np.array([np.vstack((_, np.zeros((k_new_count, self.NUM_TRANS)))) for _ in cur_r])
return cur_y_new.astype(np.int32), cur_z_new.astype(np.int32), cur_r_new.astype(np.int32)
def _sample_lam(self, cur_y, cur_z):
"""Resample the value of lambda.
"""
old_loglik = self._loglik(cur_y, cur_z)
old_lam = self.lam
# modify the feature ownership matrix
self.lam = np.random.beta(1,1)
new_loglik = self._loglik(cur_y, cur_z)
move_prob = 1 / (1 + np.exp(old_loglik - new_loglik))
if random.random() < move_prob:
pass
else:
self.lam = old_lam
def _sample_epislon(self, cur_y, cur_z):
"""Resample the value of epislon
"""
old_loglik = self._loglik(cur_y, cur_z)
old_epislon = self.epislon
# modify the feature ownership matrix
self.epislon = np.random.beta(1,1)
new_loglik = self._loglik(cur_y, cur_z)
move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));
if random.random() < move_prob:
pass
else:
self.epislon = old_epislon
def _loglik_nth(self, cur_y, cur_z, cur_r, n):
"""Calculate the loglikelihood of the nth data point
given Y, Z and R.
"""
assert(cur_z.shape[1] == cur_y.shape[0] == cur_r.shape[1])
if type(n) is int: n = [n]
else: n = n[0]
not_on_p = np.empty((len(n), cur_y.shape[1]))
# transform the feature images to obtain the effective y
# this needs to be done on a per object basis
for i in xrange(len(n)):
nth = n[i]
nth_y = copy.deepcopy(cur_y) # the transformed cur_y with respect to nth
kth_feat = 0
for r_feat in cur_r[nth]: # r_feat refers to the transforms applied one feature
nth_y[kth_feat] = scale_manual(nth_y[kth_feat], self.img_w, r_feat[self.H_SCALE], r_feat[self.V_SCALE])
nth_y[kth_feat] = v_translate(nth_y[kth_feat], self.img_w, r_feat[self.V_TRANS])
nth_y[kth_feat] = h_translate(nth_y[kth_feat], self.img_w, r_feat[self.H_TRANS])
kth_feat += 1
not_on_p[i] = np.power(1. - self.lam, np.dot(cur_z[nth], nth_y)) * (1. - self.epislon)
loglik = np.log(np.abs(self.obs[n] - not_on_p)).sum()
return loglik
def _loglik(self, cur_y, cur_z, cur_r):
"""Calculate the loglikelihood of data given Y, Z and R.
"""
assert(cur_z.shape[1] == cur_y.shape[0] == cur_r.shape[1])
not_on_p = np.empty((self.N, self.d))
# transform the feature images to obtain the effective y
# this needs to be done on a per object basis
for nth in xrange(self.N):
nth_y = copy.deepcopy(cur_y) # the transformed cur_y with respect to nth
kth_feat = 0
for r_feat in cur_r[nth]: # r_feat refers to the transforms applied one feature
nth_y[kth_feat] = scale_manual(nth_y[kth_feat], self.img_w, r_feat[self.H_SCALE], r_feat[self.V_SCALE])
nth_y[kth_feat] = v_translate(nth_y[kth_feat], self.img_w, r_feat[self.V_TRANS])
nth_y[kth_feat] = h_translate(nth_y[kth_feat], self.img_w, r_feat[self.H_TRANS])
kth_feat += 1
not_on_p[nth] = np.power(1. - self.lam, np.dot(cur_z[nth], nth_y)) * (1. - self.epislon)
loglik_mat = np.log(np.abs(self.obs - not_on_p))
return loglik_mat.sum()
def _z_by_ry(self, cur_y, cur_z, cur_r):
"""
"""
z_by_ry = np.empty(shape = (cur_z.shape[0], cur_y.shape[1]), dtype=np.int64)
for nth in xrange(self.N):
nth_y = copy.deepcopy(cur_y) # the transformed cur_y with respect to nth
kth_feat = 0
for r_feat in cur_r[nth]: # r_feat refers to the transforms applied one feature
nth_y[kth_feat] = v_translate(nth_y[kth_feat], self.img_w, r_feat[self.V_TRANS])
nth_y[kth_feat] = h_translate(nth_y[kth_feat], self.img_w, r_feat[self.H_TRANS])
kth_feat += 1
z_by_ry[nth,] = np.dot(cur_z[nth], nth_y)
return z_by_ry
def _cl_infer_yzr(self, init_y, init_z, init_r):
"""Wrapper function to start the inference on y and z.
This function is not supposed to directly invoked by an end user.
@param init_y: Passed in from do_inference()
@param init_z: Passed in from do_inference()
@param init_r: Passed in from do_inference()
"""
total_time = time()
cur_y = init_y.astype(np.int32)
cur_z = init_z.astype(np.int32)
cur_r = init_r.astype(np.int32) # this is fine with only translations
if self.record_best: self.auto_save_sample(sample = (cur_y, cur_z, cur_r))
for i in xrange(self.niter):
a_time = time()
d_cur_z = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = cur_z.astype(np.int32))
d_cur_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = cur_y.astype(np.int32))
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
self.gpu_time += time() - a_time
d_cur_y = self._cl_infer_y(cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r)
d_cur_z = self._cl_infer_z(cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r)
temp_cur_r = self._cl_infer_r(cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r)
a_time = time()
temp_cur_y = np.empty_like(cur_y)
cl.enqueue_copy(self.queue, temp_cur_y, d_cur_y)
temp_cur_z = np.empty_like(cur_z)
cl.enqueue_copy(self.queue, temp_cur_z, d_cur_z)
self.gpu_time += time() - a_time
temp_cur_y, temp_cur_z, temp_cur_r = self._cl_infer_k_new(temp_cur_y, temp_cur_z, temp_cur_r)
if self.record_best:
if self.auto_save_sample(sample = (temp_cur_y, temp_cur_z, temp_cur_r)):
print('Number of features:', cur_z.shape[1], file=sys.stderr)
cur_y, cur_z, cur_r = temp_cur_y, temp_cur_z, temp_cur_r
if self.no_improvement(1000):
break
elif i >= self.burnin:
cur_y, cur_z, cur_r = temp_cur_y, temp_cur_z, temp_cur_r
self.samples['z'].append(cur_z)
self.samples['y'].append(cur_y)
self.samples['y'].append(cur_r)
self.total_time += time() - total_time
return self.gpu_time, self.total_time, None
def _cl_infer_y(self, cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r):
"""Infer feature images
"""
a_time = time()
d_z_by_ry = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf = np.empty(shape = self.obs.shape, dtype = np.int32))
d_rand = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.random.random(cur_y.shape).astype(np.float32))
transformed_y = np.empty(shape = (self.obs.shape[0], cur_z.shape[1], self.obs.shape[1]), dtype = np.int32)
d_transformed_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
d_temp_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
# first transform the feature images and calculate z_by_ry
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r, d_transformed_y, d_temp_y, d_z_by_ry,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the prior probability that a pixel is on
self.prg.sample_y(self.queue, cur_y.shape, None,
d_cur_y, d_cur_z, d_z_by_ry, d_cur_r, self.d_obs, d_rand,
np.int32(self.N), np.int32(self.d), np.int32(cur_y.shape[0]), np.int32(self.img_w),
np.float32(self.lam), np.float32(self.epislon), np.float32(self.theta))
self.gpu_time += time() - a_time
return d_cur_y
def _cl_infer_z(self, cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r):
"""Infer feature ownership
"""
a_time = time()
d_z_by_ry = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf = np.empty(shape = self.obs.shape, dtype = np.int32))
d_z_col_sum = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = cur_z.sum(axis = 0).astype(np.int32))
d_rand = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.random.random(cur_z.shape).astype(np.float32))
transformed_y = np.empty(shape = (self.obs.shape[0], cur_z.shape[1], self.obs.shape[1]), dtype = np.int32)
d_transformed_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
d_temp_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
# first transform the feature images and calculate z_by_ry
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r, d_transformed_y, d_temp_y, d_z_by_ry,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the prior probability that a pixel is on
self.prg.sample_z(self.queue, cur_z.shape, None,
d_cur_y, d_cur_z, d_cur_r, d_z_by_ry, d_z_col_sum, self.d_obs, d_rand,
np.int32(self.N), np.int32(self.d), np.int32(cur_y.shape[0]), np.int32(self.img_w),
np.float32(self.lam), np.float32(self.epislon), np.float32(self.theta))
self.gpu_time += time() - a_time
return d_cur_z
def _cl_infer_k_new(self, cur_y, cur_z, cur_r):
# sample new features use importance sampling
k_new = self._sample_k_new(cur_y, cur_z, cur_r)
if k_new:
cur_y, cur_z, cur_r = k_new
# delete empty feature images
non_empty_feat_img = np.where(cur_y.sum(axis = 1) > 0)
cur_y = cur_y[non_empty_feat_img[0],:].astype(np.int32)
cur_z = cur_z[:,non_empty_feat_img[0]].astype(np.int32)
cur_r = np.array([_[non_empty_feat_img[0],:] for _ in cur_r]).astype(np.int32)
# delete null features
active_feat_col = np.where(cur_z.sum(axis = 0) > 0)
cur_z = cur_z[:,active_feat_col[0]].astype(np.int32)
cur_y = cur_y[active_feat_col[0],:].astype(np.int32)
cur_r = np.array([_[active_feat_col[0],:] for _ in cur_r]).astype(np.int32)
# update self.k
self.k = cur_z.shape[1]
z_s0, z_s1 = cur_z.shape
cur_z = cur_z.reshape((z_s0 * z_s1, 1))
cur_z = cur_z.reshape((z_s0, z_s1))
y_s0, y_s1 = cur_y.shape
cur_y = cur_y.reshape((y_s0 * y_s1, 1))
cur_y = cur_y.reshape((y_s0, y_s1))
r_s0, r_s1, r_s2 = cur_r.shape
cur_r = cur_r.reshape((r_s0 * r_s1 * r_s2, 1))
cur_r = cur_r.reshape((r_s0, r_s1, r_s2))
return cur_y, cur_z, cur_r
def _cl_infer_r(self, cur_y, cur_z, cur_r, d_cur_y, d_cur_z, d_cur_r):
"""Infer transformations using opencl.
Note: the algorithm works because resampling one value of cur_r at one time
only affects the loglikelihood of the corresponding image. Therefore, it is
possible to resample one aspect of transformation for all images at the same
time, as long as the new values are accepted / rejected independently of
each other.
"""
a_time = time()
d_z_by_ry_old = cl.array.empty(self.queue, self.obs.shape, np.int32, allocator=self.mem_pool)
d_z_by_ry_new = cl.array.empty(self.queue, self.obs.shape, np.int32, allocator=self.mem_pool)
d_replace_r = cl.array.empty(self.queue, (self.N,), np.int32, allocator=self.mem_pool)
d_rand = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf=np.random.random(self.N).astype(np.float32))
transformed_y = np.empty(shape = (self.obs.shape[0], cur_z.shape[1], self.obs.shape[1]), dtype = np.int32)
d_transformed_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
d_temp_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = transformed_y)
########### Dealing with vertical translations first ##########
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
# calculate the z_by_ry_old under old transformations
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r, d_transformed_y, d_temp_y, d_z_by_ry_old.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the z_by_ry_new under new randomly generated transformations
cur_r_new = np.copy(cur_r)
cur_r_new[:,:,self.V_TRANS] = np.random.randint(0, self.img_h, size = (cur_r_new.shape[0], cur_r_new.shape[1]))
d_cur_r_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r_new.astype(np.int32))
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_new.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# reject or accept newly proposed transformations on a per-object basis
d_logprior_old = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r[:,:,self.V_TRANS] > 0) - self.phi)).astype(np.float32))
d_logprior_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r_new[:,:,self.V_TRANS] > 0) - self.phi)).astype(np.float32))
self.prg.sample_r(self.queue, (self.N, ), None,
d_replace_r.data, d_z_by_ry_old.data, d_z_by_ry_new.data,
d_logprior_old, d_logprior_new, self.d_obs, d_rand,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.float32(self.lam), np.float32(self.epislon))
replace_r = d_replace_r.get()
cur_r[np.where(replace_r == 1)] = cur_r_new[np.where(replace_r == 1)]
########### Dealing with horizontal translations next ##########
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
# calculate the z_by_ry_old under old transformations
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r, d_transformed_y, d_temp_y, d_z_by_ry_old.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the z_by_ry_new under new randomly generated transformations
cur_r_new = np.copy(cur_r)
cur_r_new[:,:,self.H_TRANS] = np.random.randint(0, self.img_w, size = (cur_r_new.shape[0], cur_r_new.shape[1]))
d_cur_r_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r_new.astype(np.int32))
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_new.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# reject or accept newly proposed transformations on a per-object basis
d_logprior_old = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r[:,:,self.H_TRANS] > 0) - self.phi)).astype(np.float32))
d_logprior_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r_new[:,:,self.H_TRANS] > 0) - self.phi)).astype(np.float32))
self.prg.sample_r(self.queue, (self.N, ), None,
d_replace_r.data, d_z_by_ry_old.data, d_z_by_ry_new.data,
d_logprior_old, d_logprior_new, self.d_obs, d_rand,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.float32(self.lam), np.float32(self.epislon))
replace_r = d_replace_r.get()
cur_r[np.where(replace_r == 1)] = cur_r_new[np.where(replace_r == 1)]
########### Dealing with vertical scaling next ##########
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
# calculate the z_by_ry_old under old transformations
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_old.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the z_by_ry_new under new randomly generated transformations
cur_r_new = np.copy(cur_r)
cur_r_new[:,:,self.V_SCALE] = np.random.randint(-self.img_h+2, self.img_h, size = (cur_r_new.shape[0], cur_r_new.shape[1]))
d_cur_r_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r_new.astype(np.int32))
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_new.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# reject or accept newly proposed transformations on a per-object basis
d_logprior_old = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r[:,:,self.V_SCALE] > 0) - self.phi)).astype(np.float32))
d_logprior_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r_new[:,:,self.V_SCALE] > 0) - self.phi)).astype(np.float32))
self.prg.sample_r(self.queue, (self.N, ), None,
d_replace_r.data, d_z_by_ry_old.data, d_z_by_ry_new.data,
d_logprior_old, d_logprior_new, self.d_obs, d_rand,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.float32(self.lam), np.float32(self.epislon))
replace_r = d_replace_r.get()
cur_r[np.where(replace_r == 1)] = cur_r_new[np.where(replace_r == 1)]
########### Dealing with horizontal scaling next ##########
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
# calculate the z_by_ry_old under old transformations
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_old.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# calculate the z_by_ry_new under new randomly generated transformations
cur_r_new = np.copy(cur_r)
cur_r_new[:,:,self.H_SCALE] = np.random.randint(-self.img_w+2, self.img_w, size = (cur_r_new.shape[0], cur_r_new.shape[1]))
d_cur_r_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r_new.astype(np.int32))
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r_new, d_transformed_y, d_temp_y, d_z_by_ry_new.data,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
# reject or accept newly proposed transformations on a per-object basis
d_logprior_old = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r[:,:,self.H_SCALE] > 0) - self.phi)).astype(np.float32))
d_logprior_new = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.log(abs((cur_r_new[:,:,self.H_SCALE] > 0) - self.phi)).astype(np.float32))
self.prg.sample_r(self.queue, (self.N, ), None,
d_replace_r.data, d_z_by_ry_old.data, d_z_by_ry_new.data,
d_logprior_old, d_logprior_new, self.d_obs, d_rand,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.float32(self.lam), np.float32(self.epislon))
replace_r = d_replace_r.get()
cur_r[np.where(replace_r == 1)] = cur_r_new[np.where(replace_r == 1)]
self.gpu_time += time() - a_time
return cur_r
def _logprob(self, sample):
"""Calculate the joint log probability of data and model given a sample.
"""
cur_y, cur_z, cur_r = sample
log_prior = 0
log_lik = 0
if cur_z.shape[1] == 0: return -999999999.9
if self.cl_mode:
a_time = time()
d_cur_z = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_z.astype(np.int32))
d_cur_y = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_y.astype(np.int32))
d_cur_r = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cur_r.astype(np.int32))
d_z_by_ry = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf = np.empty(shape = self.obs.shape, dtype = np.int32))
transformed_y = np.empty(shape = (self.obs.shape[0], cur_z.shape[1], self.obs.shape[1]), dtype = np.int32)
d_transformed_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf = transformed_y)
d_temp_y = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf = transformed_y)
# calculate the log prior of Z
d_logprior_z = cl.array.empty(self.queue, cur_z.shape, np.float32)
self.prg.logprior_z(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_z, d_logprior_z.data,
cl.LocalMemory(cur_z[0].nbytes), #cl.LocalMemory(cur_z.nbytes),
np.int32(self.N), np.int32(cur_y.shape[1]), np.int32(cur_z.shape[1]),
np.float32(self.alpha))
# calculate the loglikelihood of data
# first transform the feature images and calculate z_by_ry
self.prg.compute_z_by_ry(self.queue, cur_z.shape, (1, cur_z.shape[1]),
d_cur_y, d_cur_z, d_cur_r, d_transformed_y, d_temp_y, d_z_by_ry,
np.int32(self.obs.shape[0]), np.int32(self.obs.shape[1]), np.int32(cur_y.shape[0]),
np.int32(self.img_w))
loglik = np.empty(shape = self.obs.shape, dtype = np.float32)
d_loglik = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = loglik)
self.prg.loglik(self.queue, self.obs.shape, None,
d_z_by_ry, self.d_obs, d_loglik,
np.int32(self.N), np.int32(cur_y.shape[1]), np.int32(cur_z.shape[1]),
np.float32(self.lam), np.float32(self.epislon))
cl.enqueue_copy(self.queue, loglik, d_loglik)
log_lik = loglik.sum()
self.gpu_time += time() - a_time
# calculate the prior probability of Y
num_on, num_off = (cur_y == 1).sum(), (cur_y == 0).sum()
log_prior = num_on * np.log(self.theta) + num_off * np.log(1 - self.theta) + d_logprior_z.get().sum()
# calculate the prior probability of R
# we implement a slight bias towards no transformation
log_prior += (cur_r > 0).sum() * np.log(1 - self.phi) + (cur_r == 0).sum() * np.log(self.phi)
else:
# calculate the prior probability of Z
feat_count = cur_z.cumsum(axis = 0)
for n in xrange(cur_z.shape[0]):
num_novel = 0
for k in xrange(cur_z.shape[1]):
m = feat_count[n,k] - cur_z[n,k]#cur_z[:n,k].sum()
if m > 0:
if cur_z[n,k] == 1: log_prior += np.log(m / (n+1.0))
else: log_prior += np.log(1 - m / (n + 1.0))
else:
if cur_z[n,k] == 1: num_novel += 1
if num_novel > 0:
log_prior += poisson.logpmf(num_novel, self.alpha / (n+1.0))
# calculate the prior probability of Y
num_on = (cur_y == 1).sum()
num_off = (cur_y == 0).sum()
log_prior += num_on * np.log(self.theta) + num_off * np.log(1 - self.theta)
# calculate the prior probability of R
# we implement a slight bias towards no transformation
log_prior += (cur_r > 0).sum() * np.log(1 - self.phi) + (cur_r == 0).sum() * np.log(self.phi)
# calculate the logliklihood
log_lik = self._loglik(cur_y = cur_y, cur_z = cur_z, cur_r = cur_r)
return log_prior + log_lik
class GibbsPredictor(BasePredictor):
def __init__(self, cl_mode = True, cl_device = None,
alpha = 1.0, lam = 0.98, theta = 0.01, epislon = 0.02, init_k = 4):
"""Initialize the predictor.
"""
BasePredictor.__init__(self, cl_mode = cl_mode, cl_device = cl_device)
self.alpha = alpha
self.lam = lam
self.theta = theta
self.epislon = epislon
def read_test_csv(self, file_path, header=True):
"""Read the test cases and convert values to integer.
"""
BasePredictor.read_test_csv(self, file_path, header)
self.obs = np.array(self.obs, dtype=np.int32)
return
def read_samples_csv(self, var_name, file_path, header = True):
"""Read samples from a csv file.
"""
BasePredictor.read_samples_csv(self, var_name, file_path, header)
new_samples = []
for sample in self.samples[var_name]:
if len(sample) > 1: # remove null feature samples
sample = np.array(sample, dtype=np.int32)
sample = np.reshape(sample[1:], (-1, sample[0]))
new_samples.append(sample)
self.samples[var_name] = new_samples
def predict(self, thining = 0, burnin = 0, use_iter=None, output_file = None):
"""Predict the test cases
"""
assert('y' in self.samples and 'z' in self.samples)
assert(len(self.samples['y']) == len(self.samples['z']))
num_sample = len(self.samples['y'])
num_obs = len(self.obs)
logprob_result = np.empty((num_sample, num_obs))
for i in xrange(num_sample):
cur_y = self.samples['y'][i]
cur_z = self.samples['z'][i]
# generate all possible Zs
num_feature = cur_z.shape[1]
all_z = []
for n in xrange(num_feature+1):
base = [1] * n + [0] * (num_feature - n)
all_z.extend(list(set(itertools.permutations(base))))
all_z = np.array(all_z, dtype=np.int32)
# BEGIN p(z|z_inferred) calculation
# the following lines of code may be a bit tricky to parse
# first, calculate the probability of features that already exist
# since features are additive within an image, we can just prod them
prior_off_prob = 1.0 - cur_z.sum(axis = 0) / float(cur_z.shape[0])
prior_prob = np.abs(all_z - prior_off_prob)
# then, locate the novel features in all_z
mask = np.ones(all_z.shape)
mask[:,np.where(cur_z.sum(axis = 0) > 0)] = 0
novel_all_z = all_z * mask
# temporarily mark those cells to have probability 1
prior_prob[novel_all_z==1] = 1
# we can safely do row product now, still ignoring new features
prior_prob = prior_prob.prod(axis = 1)
# let's count the number of new features for each row
num_novel = novel_all_z.sum(axis = 1)
# calculate the probability
novel_prob = poisson.pmf(num_novel, self.alpha / float(cur_z.shape[0]))
# ignore the novel == 0 special case
novel_prob[num_novel==0] = 1.
# multiply it by prior prob
prior_prob = prior_prob * novel_prob
# END p(z|z_inferred) calculation
# BEGIN p(x|z, y_inferred)
n_by_d = np.dot(all_z, cur_y)
not_on_p = np.power(1. - self.lam, n_by_d) * (1. - self.epislon)
for j in xrange(len(self.obs)):
prob = np.abs(self.obs[j] - not_on_p).prod(axis=1)
prob = prob #* prior_prob
prob = prob.sum()
logprob_result[i,j] = prob
# END
return logprob_result.max(axis=0), logprob_result.std(axis=0)
|
<filename>pandas/tseries/plotting.py
"""
Adapted from scikits.timeseries by <NAME> & <NAME>
"""
#!!! TODO: Use the fact that axis can have units to simplify the process
from matplotlib import pylab
from matplotlib.axes import Subplot
from matplotlib.figure import Figure
from matplotlib.ticker import Formatter, Locator
from matplotlib.transforms import nonsingular
import numpy as np
import pandas.core.datetools as datetools
from pandas.core.datetools import Period
from pandas.core.index import PeriodIndex
from pandas.core.series import Series
import warnings
# Generic documentation ......................................................
_doc_parameters = dict(
figsize="""figsize : {None, tuple}
Size of the figure, as a tuple (width, height) in inches.
If None, defaults to rc figure.figsize.""",
dpi="""dpi : {None, int}, optional
Resolution in dots per inches.
If None, defaults to rc figure.dpi.""",
facecolor="""facecolor : {None, string}, optional
Background color.
If None, defaults to rc figure.facecolor.""",
edgecolor="""edgecolor : {None, string}, optional
Border color.
If None, defaults to rc figure.edgecolor.""",
linewidth="""linewidth : {float, None}
Width of the patch edge line.""",
frameon="""frameon : {True, False}
Whether to draw the frame around the figure.""",
subplotpars="""subplotpars : {None, var}
A :class:`SubplotParams` instance, defaults to rc""",
mandatoryplotargs="""args : var
Mandatory arguments for the creation of the subplot.
These arguments should be given as ``nb_of_rows``, ``nb_of_columns``,
``plot_number``, or as a single 3-digit number if the 3 previous numbers
are all lower than 10.""" )
#####---------------------------------------------------------------------------
#---- --- Matplotlib extensions ---
#####---------------------------------------------------------------------------
def add_generic_subplot(figure_instance, *args, **kwargs):
"""
Generalizes the :meth:`matplotlib.Figure.add_subplot` method
of :class:`~matplotlib.figure.Figure` to generic subplots.
The specific Subplot object class to add is given through the keywords
``SubplotClass`` or ``class``.
Parameters
----------
figure_instance : Figure object
Figure to which the generic subplot should be attached.
args : {var}
Miscellaneous arguments to the subplot.
kwargs : {Dictionary}
Optional keywords.
The same keywords as ``Subplot`` are recognized, with the addition of:
+ *SubplotClass* : {string}
Type of subplot.
+ *subclass* : {string}
Shortcut to SubplotClass.
+ any keyword required by the ``SubplotClass`` subclass.
"""
key = figure_instance._make_key(*args, ** kwargs)
#!!!: Find why, sometimes, key is not hashable (even if tuple)
# else, there's a fix below
try:
key.__hash__()
except TypeError:
key = str(key)
if figure_instance._axstack.get(key):
ax = figure_instance._axstack[key]
figure_instance.sca(ax)
return ax
SubplotClass = kwargs.pop("SubplotClass", Subplot)
SubplotClass = kwargs.pop("subclass", SubplotClass)
if isinstance(args[0], Subplot):
a = args[0]
assert(a.get_figure() is figure_instance)
else:
a = SubplotClass(figure_instance, *args, **kwargs)
figure_instance.axes.append(a)
figure_instance._axstack.add(key, a)
figure_instance.sca(a)
return a
##### -------------------------------------------------------------------------
#---- --- Locators ---
##### -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates-1, period)
return (current - previous).nonzero()[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_UND = -10000
def get_freq_group(freq):
if isinstance(freq, basestring):
base, mult = datetools._get_freq_code(freq)
freq = base
return (freq // 1000) * 1000
def get_freq(freq):
if isinstance(freq, basestring):
base, mult = datetools._get_freq_code(freq)
freq = base
return freq
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FR_HR:
if freq == FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FR_MIN:
periodsperday = 24 * 60
elif freq == FR_HR:
periodsperday = 24
else:
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif get_freq_group(freq) == FR_WK:
periodsperyear = 52
periodspermonth = 3
elif freq == FR_UND:
periodsperyear = 100
periodspermonth = 10
else:
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(value=int(vmin), freq=freq),
Period(value=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S20')])
info['val'][:] = dates_.values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_-1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_-1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_-1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second % label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0: _second_finder(1)
elif span < periodsperday / 6000.0: _second_finder(2)
elif span < periodsperday / 2400.0: _second_finder(5)
elif span < periodsperday / 1200.0: _second_finder(10)
elif span < periodsperday / 800.0: _second_finder(15)
elif span < periodsperday / 400.0: _second_finder(30)
elif span < periodsperday / 150.0: _minute_finder(1)
elif span < periodsperday / 70.0: _minute_finder(2)
elif span < periodsperday / 24.0: _minute_finder(5)
elif span < periodsperday / 12.0: _minute_finder(15)
elif span < periodsperday / 6.0: _minute_finder(30)
elif span < periodsperday / 2.5: _hour_finder(1, False)
elif span < periodsperday / 1.5: _hour_finder(2, False)
elif span < periodsperday * 1.25: _hour_finder(3, False)
elif span < periodsperday * 2.5: _hour_finder(6, True)
elif span < periodsperday * 4: _hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].years
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
#............................................
return info
def _monthly_finder(vmin, vmax, freq):
if isinstance(freq, basestring):
freq = get_freq(freq)
if freq != FR_MTH:
raise ValueError("Unexpected frequency")
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
#..............
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 1).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
#..............
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
#..............
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 1).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
#..............
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 1) | (dates_ % 12 == 7)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
#..............
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 1).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
#..................
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
#..............
return info
def _quarterly_finder(vmin, vmax, freq):
if isinstance(freq, basestring):
freq = get_freq(freq)
if get_freq_group(freq) != FR_QTR:
raise ValueError("Unexpected frequency")
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
#............................................
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 1).nonzero()[0]
#..............
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
#..............
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
#..............
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
#..............
return info
def _annual_finder(vmin, vmax, freq):
if isinstance(freq, basestring):
freq = get_freq(freq)
if get_freq_group(freq) != FR_ANN:
raise ValueError("Unexpected frequency")
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
#..............
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
#..............
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
#..............
return info
def get_finder(freq):
if isinstance(freq, basestring):
freq = get_freq(freq)
fgroup = get_freq_group(freq)
if fgroup == FR_ANN:
return _annual_finder
elif fgroup == FR_QTR:
return _quarterly_finder
elif freq ==FR_MTH:
return _monthly_finder
elif (freq >= FR_BUS) or (freq == FR_UND) or fgroup == FR_WK:
return _daily_finder
else:
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, basestring):
freq = get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def asminor(self):
"Returns the locator set to minor mode."
self.isminor = True
return self
def asmajor(self):
"Returns the locator set to major mode."
self.isminor = False
return self
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else:
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = range(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
#####-------------------------------------------------------------------------
#---- --- Formatter ---
#####-------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, basestring):
freq = get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def asminor(self):
"Returns the formatter set to minor mode."
self.isminor = True
return self
def asmajor(self):
"Returns the fromatter set to major mode."
self.isminor = False
return self
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
#
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(int(x), self.freq).strftime(fmt)
#####--------------------------------------------------------------------------
#---- --- TimeSeries plots ---
#####--------------------------------------------------------------------------
class TimeSeriesPlot(Subplot, object):
"""
Based on : :class:`~matplotlib.axes.SubPlot`
Defines a subclass of :class:`matplotlib.axes.Subplot` to plot time series.
A :class:`~Series` is associated with the plot. This time series is
usually specified at the creation of the plot, through the optional
parameter ``series``. If no series is given at the creation, the first
time series being plotted will be used as associated series.
The associated series is stored in the :attr:`~TimeSeriesPlot.series`
attribute. It gives its frequency to the plot. This frequency can be
accessed through the attribute :attr:`freq`. All the other series that
will be plotted will be first converted to the :attr:`freq` frequency,
using their :meth:`~asfreq` method.
The same parameters used for the instanciation of a standard
:class:`matplotlib.axes.Subplot` are recognized.
Parameters
----------
series : {None, Series}, optional
The time series allocated to the plot.
Attributes
----------
freq : int
Frequency of the plot.
xdata : PeriodIndex
The array of dates corresponding to the x axis.
legendsymbols : list
legendlabels : list
List of the labels associated with each plot.
The first label corresponds to the first plot, the second label to the
second plot, and so forth.
Warnings
--------
* Because the series to plot are first converted to the plot frequency, it
* is recommended when plotting several series to associate the plot with
* the series with the highest frequency, in order to keep a good level of
* detail.
"""
def __init__(self, fig=None, *args, **kwargs):
# Retrieve the series ...................
_series = kwargs.pop('series', getattr(fig, 'series', None))
Subplot.__init__(self, fig, *args, **kwargs)
# Process options .......................
self.set_series(series=_series)
self._austoscale = False
# Get the data to plot
self.legendsymbols = []
self.legendlabels = []
# keep track of axis format and tick info
self.date_axis_info = None
# used to keep track of current view interval to determine if we need
# to reset date_axis_info
self.view_interval = None
def set_series(self, series=None):
"""
Sets the time series associated with the plot. If ``series`` is a
valid :class:`~Series` object, the :attr:`xdata` attribute is updated
to the ``_dates`` part of ``series``.
"""
if series is not None:
self._series = series.values
self.xdata = series.index
else:
self._series = None
self.xdata = None
#
def get_series(self):
"""
Returns the data part of the time series associated with the plot,
as a (subclass of) :class:`ndarray`.
"""
return self._series
#
series = property(fget=get_series, fset=set_series,
doc="Underlying time series.")
def set_ydata(self, series=None):
errmsg = ("The use of 'set_ydata' is deprecated. "
"Please use 'set_series' instead")
warnings.DepreciationWarning(errmsg)
return self.set_series(series)
#
def get_ydata(self):
errmsg = ("The use of 'get_ydata' is deprecated. "
"Please use 'get_series' instead")
warnings.DepreciationWarning(errmsg)
return self.get_series()
#
ydata = property(fget=get_ydata, fset=set_ydata,
doc="Underlying time series.")
def get_freq(self):
"""
Returns the underlying frequency of the plot
"""
return getattr(self.xdata, 'freq', None)
#
freq = property(fget=get_freq, doc="Underlying frequency.")
#......................................................
def _check_plot_params(self, *args):
"""
Defines the plot coordinates (and basic plotting arguments).
"""
remaining = list(args)
noinfo_msg = "No date information available!"
# No args ? Use defaults, if any
if len(args) == 0:
if self.xdata is None:
raise ValueError(noinfo_msg)
return (self.xdata, self.series)
output = []
while len(remaining) > 0:
a = remaining.pop(0)
# The argument is a format: use default dates/
if isinstance(a, str):
if self.xdata is None:
raise ValueError(noinfo_msg)
else:
output.extend([self.xdata, self.series, a])
# The argument is a Series: use its dates for x
elif isinstance(a, Series):
(x, y) = (a.index, a.values)
if len(remaining) > 0 and isinstance(remaining[0], str):
b = remaining.pop(0)
output.extend([x, y, b])
else:
output.extend([x, y])
# The argument is a PeriodIndex............
elif isinstance(a, PeriodIndex):
# Force to current freq
if self.freq is not None:
if a.freq != self.freq:
a = a.asfreq(self.freq)
# There's an argument after
if len(remaining) > 0:
#...and it's a format string
if isinstance(remaining[0], str):
b = remaining.pop(0)
if self.series is None:
raise ValueError(noinfo_msg)
else:
output.extend([a, self.series, b])
#... and it's another date: use the default
elif isinstance(remaining[0], PeriodIndex):
if self.series is None:
raise ValueError(noinfo_msg)
else:
output.extend([a, self.series])
#... and it must be some data
else:
b = remaining.pop(0)
if len(remaining) > 0:
if isinstance(remaining[0], str):
c = remaining.pop(0)
output.extend([a, b, c])
else:
output.extend([a, b])
else:
if self.series is None:
raise ValueError(noinfo_msg)
# Otherwise..............................
elif len(remaining) > 0 and isinstance(remaining[0], str):
b = remaining.pop(0)
if self.xdata is None:
raise ValueError(noinfo_msg)
else:
output.extend([self.xdata, a, b])
elif self.xdata is None:
raise ValueError(noinfo_msg)
else:
output.extend([self.xdata, a])
# Reinitialize the plot if needed ...........
if self.xdata is None:
self.xdata = output[0]
# Force the xdata to the current frequency
elif output[0].freq != self.freq:
output = list(output)
output[0] = output[0].asfreq(self.freq)
return output
#......................................................
def tsplot(self, *args, **kwargs):
"""
Plots the data parsed in argument to the current axes. This command
accepts the same optional keywords as :func:`matplotlib.pyplot.plot`.
The argument ``args`` is a variable length argument, allowing for
multiple data to be plotted at once. Acceptable combinations are:
No arguments or a format string: The time series associated with the
subplot is plotted with the given format. If no format string is
given, the default format is used instead. For example, to plot the
underlying time series with the default format, use:
>>> tsplot()
To plot the underlying time series with a red solid line, use the
command:
>>> tsplot('r-')
a :class:`~Series` object or one of its subclass with or without a
format string: The given time series is plotted with the given format.
If no format string is given, the default format is used instead.
an array or sequence, with or without a format string: The data is
plotted with the given format using the :attr:`~TimeSeriesPlot.xdata`
attribute of the plot as abscissae.
two arrays or sequences, with or without a format string: The data are
plotted with the given format, using the first array as abscissae and
the second as ordinates.
Parameters
----------
args : var
Sequence of arguments, as described previously.
kwargs : var
Optional parameters.
The same parameters are accepted as for
:meth:`matplotlib.axes.Subplot.plot`.
"""
args = self._check_plot_params(*args)
self.legendlabels.append(kwargs.get('label', None))
plotted = Subplot.plot(self, *args, **kwargs)
self.format_dateaxis()
# when adding a right axis (using add_yaxis), for some reason the
# x axis limits don't get properly set. This gets around the problem
xlim = self.get_xlim()
if xlim[0] == 0.0 and xlim[1] == 1.0:
# if xlim still at default values, autoscale the axis
self.autoscale_view()
self.reset_datelimits()
return plotted
#......................................................
def format_dateaxis(self):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
# Get the locator class .................
majlocator = TimeSeries_DateLocator(self.freq, dynamic_mode=True,
minor_locator=False, plot_obj=self)
minlocator = TimeSeries_DateLocator(self.freq, dynamic_mode=True,
minor_locator=True, plot_obj=self)
self.xaxis.set_major_locator(majlocator)
self.xaxis.set_minor_locator(minlocator)
# Get the formatter .....................
majformatter = TimeSeries_DateFormatter(self.freq, dynamic_mode=True,
minor_locator=False,
plot_obj=self)
minformatter = TimeSeries_DateFormatter(self.freq, dynamic_mode=True,
minor_locator=True,
plot_obj=self)
self.xaxis.set_major_formatter(majformatter)
self.xaxis.set_minor_formatter(minformatter)
pylab.draw_if_interactive()
#......................................................
def set_dlim(self, start_date=None, end_date=None):
"""
Sets the date limits of the plot to ``start_date`` and ``end_date``.
The dates can be given as :class:`~Period` objects, strings or
integers.
Parameters
----------
start_date : {var}
Starting date of the plot. If None, the current left limit
(earliest date) is used.
end_date : {var}
Ending date of the plot. If None, the current right limit (latest
date) is used.
"""
freq = self.freq
if freq is None:
raise ValueError("Undefined frequency! Date limits can't be set!")
# TODO : Shouldn't we make get_datevalue a more generic function ?
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).value
elif isinstance(date, str):
return Period(date, freq).value
elif isinstance(date, (int, float)) or \
(isinstance(date, np.ndarray) and (date.size == 1)):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '%s'" % date)
# Fix left limit ..............
xleft = get_datevalue(start_date, freq)
# Fix right limit .......
xright = get_datevalue(end_date, freq)
self.set_xlim(xleft, xright)
return (xleft, xright)
def reset_datelimits(self):
"""
Reset the date range of the x axis to the date range of the underlying
time series.
"""
return self.set_xlim(self.xdata[[0, -1]])
def get_dlim(self):
"""
Returns the limits of the x axis as a :class:`~PeriodIndex`.
"""
xlims = self.get_xlim()
return PeriodIndex(xlims, freq=self.freq)
TSPlot = TimeSeriesPlot
def add_yaxis(fsp=None, position='right', yscale=None, basey=10, subsy=None):
"""
Adds a second y-axis to a :class:`TimeSeriesPlot`.
This function can also be used as a method.
Parameters
----------
fsp : {None, TimeSeriesPlot}
Subplot to which the secondary y-axis is added.
If None, the current subplot is selected: in that case, it should be
a valid :class:`TimeSeriesPlot`.
When used as a :class:`TimeSeriesPlot` method, this parameter points
automatically to the calling subplot.
position : {string}
Position of the new axis, as either ``'left'`` or ``'right'``.
yscale : {string}
Scale of the new axis, as either ``'log'``, ``'linear'`` or ``None``.
If None, uses the same scale as the first y axis.
basey : {integer}
Base of the logarithm for the new axis (if needed).
subsy : {sequence}
Sequence of the location of the minor ticks;
None defaults to autosubs, which depend on the number of decades in
the plot.
Eg for base 10, ``subsy=(1,2,5)`` will put minor ticks on 1, 2, 5, 11,
12,15, 21, ....
To turn off minor ticking, set ``subsy=[]``.
Raises
------
TypeError
If the selected subplot is not a valid :class:`TimeSeriesPlot` object.
"""
if fsp is None:
fsp = pylab.gca()
if not isinstance(fsp, TimeSeriesPlot):
raise TypeError("The current plot is not a TimeSeriesPlot")
fig = fsp.figure
axisini = fsp.axis()
fsp_alt_args = (fsp._rows, fsp._cols, fsp._num + 1)
fsp_alt = fig.add_tsplot(frameon=False, position=fsp.get_position(),
sharex=fsp, *fsp_alt_args)
# Set position ....................
if position.lower() == 'right':
(inipos, newpos) = ('left', 'right')
else:
(inipos, newpos) = ('right', 'left')
# Force scales tics to one side ...
fsp.yaxis.set_ticks_position(inipos)
fsp.yaxis.set_label_position(inipos)
# Force 2nd ticks to the other side..
fsp_alt.yaxis.set_ticks_position(newpos)
fsp_alt.yaxis.set_label_position(newpos)
# Force period axis scale..........
if yscale is None:
yscale = fsp.get_yscale()
try:
basey = fsp.yaxis.get_major_locator()._base
except AttributeError:
basey = 10.
fsp_alt.set_yscale(yscale, basey=basey, subsy=subsy)
pylab.draw_if_interactive()
return fsp_alt
TimeSeriesPlot.add_yaxis = add_yaxis
#####--------------------------------------------------------------------------
#---- --- TimeSeries Figures ---
#####--------------------------------------------------------------------------
class TimeSeriesFigure(Figure):
"""
Based on :class:`matplotlib.figure.Figure`
Create a new :class:`~matplotlib.figure.Figure` object.
All the subplots share the same time series.
The same parameters used for the creation of a standard
:class:`~matplotlib.figure.Figure` are accepted.
Parameters
----------
series : {None, TimeSeries}, optional
Underlying time series.
All the subplots of the figure will share the same series.
figsize : {None, tuple}
Size of the figure, as a tuple (width, height) in inches.
If None, defaults to rc figure.figsize.
dpi : {None, int}, optional
Resolution in dots per inches.
If None, defaults to rc figure.dpi
facecolor : {None, string}, optional
Background color.
If None, defaults to rc figure.facecolor.
edgecolor : {None, string}, optional
Border color.
If None, defaults to rc figure.edgecolor.
linewidth : {float, None}
Width of the patch edge line.
frameon : {True, False}
Whether to draw the frame around the figure.
"""
def __init__(self, **kwargs):
self._series = series = kwargs.pop('series', None)
Figure.__init__(self, **kwargs)
fspnum = kwargs.pop('fspnum', None)
if fspnum is not None:
self.add_tsplot(fspnum, series=series)
#.........
def add_tsplot(self, *args, **kwargs):
"""
Adds a :class:`TimeSeriesPlot` subplot to the current figure.
Parameters
----------
args : var
Mandatory arguments for the creation of the subplot.
These arguments should be given as ``nb_of_rows``,
``nb_of_columns``, ``plot_number``, or as a single 3-digit number
if the 3 previous numbers are all lower than 10.
kwargs : var
Optional arguments, as recognized by `add_subplot`.
"""
kwargs.update(SubplotClass=TimeSeriesPlot)
if self._series is not None:
kwargs.update(series=self._series)
return add_generic_subplot(self, *args, **kwargs)
add_subplot = add_tsplot
TSFigure = TimeSeriesFigure
#................................................
def tsfigure(num=None, figsize=None, dpi=None, facecolor=None,
edgecolor=None, frameon=True, subplotpars=None,
FigureClass=TSFigure):
"""
Creates a new :class:`TimeSeriesFigure` object.
Parameters
----------
num : {None, int}, optional
Number of the figure.
If None, a new figure is created and ``num`` is incremented.
%(figsize)s
%(dpi)s
%(facecolor)s
%(edgecolor)s
%(frameon)s
%(subplotpars)s
FigureClass : FigureClass
Class of the figure to create
"""
figargs = dict(num=num, figsize=figsize, dpi=dpi, facecolor=facecolor,
frameon=frameon, FigureClass=FigureClass,
subplotpars=subplotpars)
fig = pylab.figure(**figargs)
return fig
tsfigure.__doc__ %= _doc_parameters
def tsplot(series=None, num=None, figsize=None, dpi=None,
facecolor=None, edgecolor=None, frameon=True, subplotpars=None,
FigureClass=TSFigure):
"""
Creates a new :class:`TimeSeriesFigure` object and plots a series
Parameters
----------
num : {None, int}, optional
Number of the figure.
If None, a new figure is created and ``num`` is incremented.
%(figsize)s
%(dpi)s
%(facecolor)s
%(edgecolor)s
%(frameon)s
%(subplotpars)s
FigureClass : FigureClass
Class of the figure to create
Parameters
----------
"""
# deal with kwargs
fig = tsfigure(num=num, figsize=figsize, dpi=dpi,
facecolor=facecolor, edgecolor=edgecolor,
frameon=frameon, subplotpars=subplotpars,
FigureClass=FigureClass)
sub = fig.add_tsplot(111)
ret = sub.tsplot(series)
return ret
tsplot.__doc__ %= _doc_parameters
###############################################################################
|
import rhinoscriptsyntax as rs
import math
import clr
def countDeskFunc(k):
return len(k)
def lenCurveFunc(k):
return rs.CurveLength(k)
def filterEdges(e):
return rs.CurveLength(e) > 1
def scaleLine(line, scale):
midPoint = rs.CurveMidPoint(line)
plane = rs.PlaneFromNormal(midPoint, rs.VectorUnitize([0.0,0.0,1.0]))
ret = rs.ScaleObject(line, midPoint, [scale, scale, 0], copy=False)
return ret
def placeRowOfDesks(s, edges, offsetEdge, offsetDir, desks, deskPoints, deskDirections, deskWidth, deskLength, rowOffset, columns):
dCount = 0
deskCount = rs.CurveLength(offsetEdge) / deskLength
if deskCount >= 2:
deskPlacements = rs.DivideCurve(offsetEdge, deskCount) #PointsAtEqualSegmentLength PointsAtEqualChordLength
perpDir = rs.CurveTangent(offsetEdge, 0.5)
for deskP in deskPlacements:
vec1 = rs.VectorScale(offsetDir, deskWidth * 0.5)
vec2 = rs.VectorReverse(vec1)
vec3 = rs.VectorScale(offsetDir, deskWidth * 1.5)
vec4 = rs.VectorReverse(vec3)
r1 = rs.MoveObject(deskP, vec1)
r2 = rs.MoveObject(deskP, vec2)
r3 = rs.MoveObject(deskP, vec3)
r4 = rs.MoveObject(deskP, vec4)
desk = rs.AddRectangle(rs.PlaneFromFrame(r1, offsetDir, perpDir), deskWidth, deskLength)
desk2 = rs.AddRectangle(rs.PlaneFromFrame(r2, offsetDir, perpDir), deskWidth, deskLength)
intersects1 = False
intersects2 = False
for col in columns:
if rs.CurveCurveIntersection(col, desk):
intersects1 = True
if rs.CurveCurveIntersection(col, desk2):
intersects2 = True
for e in edges:
if rs.CurveCurveIntersection(e, desk):
intersects1 = True
if rs.CurveCurveIntersection(e, desk2):
intersects2 = True
if not intersects1 and rs.CurveCurveIntersection(s, desk):
desks.append(desk)
dCount += 1
deskPoints.append(r3)
deskDirections.append(rs.VectorCreate(r1, r3))
else:
desk.Dispose()
r3.Dispose()
if not intersects2 and rs.CurveCurveIntersection(s, desk2):
desks.append(desk2)
dCount += 1
deskPoints.append(r4)
deskDirections.append(rs.VectorCreate(r2, r4))
else:
desk2.Dispose()
r4.Dispose()
vec1.Dispose()
vec2.Dispose()
r1.Dispose()
r2.Dispose()
perpDir.Dispose()
return dCount
desks = []
deskPoints = []
deskDirections = []
deskCounts = []
if surfaces and allColumns:
for i in range(len(surfaces)):
surface = surfaces[i]
# figure out direction
deskCount = 0
desksGroup = []
deskPointsGroup = []
deskDirectionsGroup = []
edges = rs.DuplicateEdgeCurves(surface, True)
filteredEdges = list(filter(filterEdges, edges))
edge = max(filteredEdges, key=lenCurveFunc)
centerPt = rs.EvaluateSurface(surface, 0.5, 0.5)
midpoint = rs.CurveMidPoint(edge)
offsetDir0 = rs.VectorCreate(centerPt, midpoint)
offsetDir1 = rs.CurveCurvature(edge, 0.5)[1]
offsetDir2 = rs.VectorReverse(offsetDir1)
offsetDir = offsetDir1
if rs.VectorAngle(offsetDir0, offsetDir1) > 90:
offsetDir = offsetDir2
j = 0
while True:
offset = str(j) + ": " + str((rowOffset + deskWidth) + ((rowOffset + (deskWidth*2)) * j))
offsetEdge = rs.MoveObject(edge, offsetDir*((rowOffset + deskWidth) + ((rowOffset + (deskWidth*2)) * j)))
offsetEdge2 = scaleLine(offsetEdge, 20.0)
offsetEdge3 = rs.CurveSurfaceIntersection(offsetEdge2, surface)
offsetEdge4 = rs.AddCurve([offsetEdge3[1],offsetEdge3[2]])
if offsetEdge4:
dCount = placeRowOfDesks(surface, edges, offsetEdge4, offsetDir, desksGroup, deskPointsGroup, deskDirectionsGroup, deskWidth, deskLength, rowOffset, allColumns[i])
deskCount += dCount
offsetEdge.Dispose()
offsetEdge2.Dispose()
for e in offsetEdge3:
e.Dispose()
if not offsetEdge3:
break
j += 1
centerPt.Dispose()
midpoint.Dispose()
offsetDir.Dispose()
offsetDir0.Dispose()
offsetDir1.Dispose()
offsetDir2.Dispose()
#pc.Dispose()
deskCounts.append(deskCount)
desks.append(desksGroup)
deskPoints.append(deskPointsGroup)
deskDirections.append(deskDirectionsGroup)
a = [desks]
b = [deskPoints]
c = [deskDirections]
d = [deskCounts]
|
<gh_stars>1-10
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import json
import random
import tensorflow as tf
import model as bm
import util
def get_training_examples(config, test_fold=-1):
num_fold = config['cross_validation_fold']
train_examples = []
cnt = 0
for i, line in enumerate(open(config["train_path"])):
example = json.loads(line)
example['main_train'] = True
cnt += 1
if num_fold <=1 or i % num_fold != test_fold:
train_examples.append(example)
print("Find %d documents from %s use %d" % (cnt, config['train_path'], len(train_examples)))
if config["second_train_path"]:
cnt = 0
for line in open(config["second_train_path"]):
cnt += 1
example = json.loads(line)
example['main_train'] = False
train_examples.append(example)
print("Using %d additional documents from %s." % (cnt, config['second_train_path']))
return train_examples
if __name__ == "__main__":
config = util.initialize_from_env()
num_fold = config['cross_validation_fold']
report_frequency = config["report_frequency"]
eval_frequency = config["eval_frequency"]
max_step = config["max_step"]
if num_fold > 1:
root_log_dir = config["log_dir"]
for test_fold in xrange(num_fold):
print("\n\nStarting %d of %d fold" % (test_fold+1,num_fold))
tf.reset_default_graph()
config["log_dir"] = util.mkdirs(os.path.join(root_log_dir, '%d_of_%d' % (test_fold, num_fold)))
model = bm.BridgingModel(config)
saver = tf.train.Saver(max_to_keep=1)
log_dir = config["log_dir"]
writer = tf.summary.FileWriter(log_dir, flush_secs=20)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
session_config.allow_soft_placement = True
with tf.Session(config=session_config) as session:
session.run(tf.global_variables_initializer())
accumulated_loss = 0.0
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Restoring from: {}".format(ckpt.model_checkpoint_path))
saver.restore(session, ckpt.model_checkpoint_path)
initial_time = time.time()
tf_global_step = 0
train_examples = [model.tensorize_example(example, is_training=True) for example in
get_training_examples(config, test_fold)]
while max_step<=0 or tf_global_step<max_step:
random.shuffle(train_examples)
for example in train_examples:
feed_dict = dict(zip(model.input_tensors, model.truncate_example(*example)))
tf_loss, tf_global_step, _ = session.run([model.loss, model.global_step, model.train_op],feed_dict=feed_dict)
accumulated_loss += tf_loss
if tf_global_step % report_frequency == 0:
total_time = time.time() - initial_time
steps_per_second = tf_global_step / total_time
average_loss = accumulated_loss / report_frequency
print("[{}] loss={:.2f}, steps/s={:.2f}".format(tf_global_step, average_loss, steps_per_second))
writer.add_summary(util.make_summary({"loss": average_loss}), tf_global_step)
accumulated_loss = 0.0
if tf_global_step % eval_frequency == 0:
saver.save(session, os.path.join(log_dir, "model"), global_step=tf_global_step)
util.copy_checkpoint(os.path.join(log_dir, "model-{}".format(tf_global_step)),
os.path.join(log_dir, "model.max.ckpt"))
else:
model = bm.BridgingModel(config)
saver = tf.train.Saver(max_to_keep=1)
train_examples = get_training_examples(config)
log_dir = config["log_dir"]
writer = tf.summary.FileWriter(log_dir, flush_secs=20)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
session_config.allow_soft_placement = True
max_f1 = -1
best_step = 0
with tf.Session(config=session_config) as session:
session.run(tf.global_variables_initializer())
accumulated_loss = 0.0
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Restoring from: {}".format(ckpt.model_checkpoint_path))
saver.restore(session, ckpt.model_checkpoint_path)
initial_time = time.time()
tf_global_step = 0
train_examples = [model.tensorize_example(example, is_training=True) for example in
get_training_examples(config)]
while max_step <= 0 or tf_global_step < max_step:
random.shuffle(train_examples)
for example in train_examples:
feed_dict = dict(zip(model.input_tensors, model.truncate_example(*example)))
tf_loss, tf_global_step, _ = session.run([model.loss, model.global_step, model.train_op], feed_dict=feed_dict)
accumulated_loss += tf_loss
if tf_global_step % report_frequency == 0:
total_time = time.time() - initial_time
steps_per_second = tf_global_step / total_time
average_loss = accumulated_loss / report_frequency
print("[{}] loss={:.2f}, steps/s={:.2f}".format(tf_global_step, average_loss, steps_per_second))
writer.add_summary(util.make_summary({"loss": average_loss}), tf_global_step)
accumulated_loss = 0.0
if tf_global_step % eval_frequency == 0:
saver.save(session, os.path.join(log_dir, "model"), global_step=tf_global_step)
eval_summary, eval_f1 = model.evaluate(session)
if eval_f1 > max_f1:
max_f1 = eval_f1
best_step = tf_global_step
util.copy_checkpoint(os.path.join(log_dir, "model-{}".format(tf_global_step)), os.path.join(log_dir, "model.max.ckpt"))
writer.add_summary(eval_summary, tf_global_step)
writer.add_summary(util.make_summary({"max_eval_f1": max_f1}), tf_global_step)
print("[{}] evaL_f1={:.2f}, max_f1={:.2f} at step {}".format(tf_global_step, eval_f1, max_f1, best_step))
|
<reponame>stautonico/blackhat-simulator
__package__ = "blackhat.bin"
from os import system, remove
from secrets import token_hex
from ..helpers import Result
from ..lib.input import ArgParser
from ..lib.output import output
from ..lib.unistd import read, write
from ..lib.sys.stat import stat
from ..lib.fcntl import creat
__COMMAND__ = "nano"
__DESCRIPTION__ = ""
__DESCRIPTION_LONG__ = ""
__VERSION__ = "1.1"
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
# TODO: Fix this, I copied and pasted from reboot and forgot to change it
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("-p", "--poweroff", action="store_true",
help="Power-off the machine, regardless of which one of the two commands is invoked.")
parser.add_argument("--reboot", action="store_true",
help="Reboot the machine, regardless of which one of the three commands is invoked.")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
elif item.nargs == "+":
SYNOPSIS += f"[{item.dest.upper()}]... "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser
def main(args: list, pipe: bool) -> Result:
"""
# TODO: Add docstring for manpage
"""
parser = ArgParser(prog=__COMMAND__)
parser.add_argument("source")
parser.add_argument("--version", action="store_true", help=f"output version information and exit")
args = parser.parse_args(args)
if parser.error_message:
if not args.version:
return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False)
if args.version:
return output(f"{__COMMAND__} (blackhat coreutils) {__VERSION__}", pipe)
# If we specific -h/--help, args will be empty, so exit gracefully
if not args:
return output("", pipe)
else:
source_path = "./" + args.source if "/" not in args.source else args.source
find_response = stat(source_path)
if not find_response.success:
find_response = stat("/".join(args.source.split("/")[:-1]))
if not find_response.success:
return output(f"{__COMMAND__}: No such file or directory", pipe, success=False)
else:
# Create the new file
create_file_response = creat(source_path, 0o644)
if not create_file_response.success:
return output(f"{__COMMAND__}: Unable to create file: {args.source}", pipe, success=False)
else:
file_to_write = stat(args.source).data
else:
# If this exists, the file already existed, we can read its contents
# and write it into the physical file
exists = True
file_to_write = find_response.data
if not file_to_write.st_isfile:
return output(f"{__COMMAND__}: {args.source}: Is a directory", pipe, success=False)
read_file_result = read(source_path)
if not read_file_result.success:
return output(f"{__COMMAND__}: {args.source}: Permission denied", pipe, success=False)
# Create a temporary dir in the real /tmp to write to and read from because im not re-writing nano from scratch
# for this dumb ass game
temp_file = token_hex(6)
try:
# if exists:
# with open(f"/tmp/{temp_file}", "w") as f:
# f.write(read_file_result.data)
system(f"nano /tmp/{temp_file}")
with open(f"/tmp/{temp_file}", "r") as f:
file_content = f.read()
remove(f"/tmp/{temp_file}")
write_result = write(source_path, file_content)
if not write_result.success:
return output(f"{__COMMAND__}: {args.source}: Permission denied", pipe, success=False)
return output("", pipe)
except Exception as e:
print(e)
return output(f"{__COMMAND__}: Failed to write file!", pipe, success=False)
|
import argparse
import os
from time import time as t
import matplotlib.pyplot as plt
import numpy as np
import torch
from torchvision import transforms
from tqdm import tqdm
from sklearn.preprocessing import normalize
from scipy.stats import entropy
from bindsnet import ROOT_DIR
from bindsnet.analysis.plotting import (
plot_assignments,
plot_input,
plot_performance,
plot_spikes,
plot_voltages,
plot_weights,
)
from bindsnet.datasets import MNIST, DataLoader
from bindsnet.encoding import PoissonEncoder
from bindsnet.evaluation import all_activity, assign_labels, proportion_weighting
from bindsnet.models import DiehlAndCook2015
from bindsnet.network.monitors import Monitor
from bindsnet.utils import get_square_assignments, get_square_weights
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--n_neurons", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--n_epochs", type=int, default=1)
parser.add_argument("--n_test", type=int, default=10000)
parser.add_argument("--n_train", type=int, default=60000)
parser.add_argument("--n_workers", type=int, default=-1)
parser.add_argument("--update_steps", type=int, default=256)
parser.add_argument("--exc", type=float, default=22.5)
parser.add_argument("--inh", type=float, default=120)
parser.add_argument("--theta_plus", type=float, default=0.05)
parser.add_argument("--time", type=int, default=100)
parser.add_argument("--dt", type=int, default=1.0)
parser.add_argument("--intensity", type=float, default=128)
parser.add_argument("--progress_interval", type=int, default=10)
parser.add_argument("--train", dest="train", action="store_true")
parser.add_argument("--test", dest="train", action="store_false")
parser.add_argument("--plot", dest="plot", action="store_true")
parser.add_argument("--gpu", dest="gpu", action="store_false")
parser.set_defaults(plot=True, gpu=True)
args = parser.parse_args()
seed = args.seed
n_neurons = args.n_neurons
batch_size = args.batch_size
n_epochs = args.n_epochs
n_test = args.n_test
n_train = args.n_train
n_workers = args.n_workers
update_steps = args.update_steps
exc = args.exc
inh = args.inh
theta_plus = args.theta_plus
time = args.time
dt = args.dt
intensity = args.intensity
progress_interval = args.progress_interval
train = args.train
plot = args.plot
gpu = args.gpu
plot = True
update_interval = update_steps * batch_size
device = "cpu"
torch.manual_seed(seed)
torch.set_num_threads(os.cpu_count() - 1)
print("Running on Device = ", device)
# Determines number of workers to use
if n_workers == -1:
n_workers = 0 # gpu * 1 * torch.cuda.device_count()
n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
start_intensity = intensity
# Build network.
network = DiehlAndCook2015(
n_inpt=784,
n_neurons=n_neurons,
exc=exc,
inh=inh,
dt=dt,
norm=78.4,
nu=(1e-4, 1e-2),
theta_plus=theta_plus,
inpt_shape=(1, 28, 28),
)
# Directs network to GPU
if gpu:
network.to("cuda")
# Load MNIST data.
dataset = MNIST(
PoissonEncoder(time=time, dt=dt),
None,
"../../data/MNIST",
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Lambda(lambda x: x * intensity)]
),
)
# Selecting classes 1,2,3,5,6,8
# idx = (dataset.targets==1) | (dataset.targets==2) | (dataset.targets==3) | (dataset.targets==5) | (dataset.targets==6) | (dataset.targets==8)
# dataset.targets = dataset.targets[idx]
# dataset.data = dataset.data[idx]
# Neuron assignments and spike proportions.
n_classes = 10
assignments = -torch.ones(n_neurons, device=device)
proportions = torch.zeros((n_neurons, n_classes), device=device)
rates = torch.zeros((n_neurons, n_classes), device=device)
# Sequence of accuracy estimates.
accuracy = {"all": [], "proportion": []}
# Voltage recording for excitatory and inhibitory layers.
exc_voltage_monitor = Monitor(
network.layers["Ae"], ["v"], time=int(time / dt), device=device
)
inh_voltage_monitor = Monitor(
network.layers["Ai"], ["v"], time=int(time / dt), device=device
)
network.add_monitor(exc_voltage_monitor, name="exc_voltage")
network.add_monitor(inh_voltage_monitor, name="inh_voltage")
# Set up monitors for spikes and voltages
spikes = {}
for layer in set(network.layers):
spikes[layer] = Monitor(
network.layers[layer], state_vars=["s"], time=int(time / dt), device=device
)
network.add_monitor(spikes[layer], name="%s_spikes" % layer)
voltages = {}
for layer in set(network.layers) - {"X"}:
voltages[layer] = Monitor(
network.layers[layer], state_vars=["v"], time=int(time / dt), device=device
)
network.add_monitor(voltages[layer], name="%s_voltages" % layer)
inpt_ims, inpt_axes = None, None
spike_ims, spike_axes = None, None
weights_im = None
assigns_im = None
perf_ax = None
voltage_axes, voltage_ims = None, None
spike_record = torch.zeros((update_interval, int(time / dt), n_neurons), device=device)
# Train the network.
print("\nBegin training.\n")
start = t()
model_to_save = None
running_perf = 0
confusion_matrix = np.zeros((10,10))
mae_from_uniform = []
for epoch in range(n_epochs):
labels = []
if epoch % progress_interval == 0:
print("\n Progress: %d / %d (%.4f seconds)" % (epoch, n_epochs, t() - start))
start = t()
# Create a dataloader to iterate and batch data
train_dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_workers,
pin_memory=gpu,
)
pbar_training = tqdm(total=n_train)
for step, batch in enumerate(train_dataloader):
if step > n_train:
break
# Get next input sample.
inputs = {"X": batch["encoded_image"]}
if gpu:
inputs = {k: v.cuda() for k, v in inputs.items()}
if step % update_steps == 0 and step > 0:
# Convert the array of labels into a tensor
label_tensor = torch.tensor(labels, device=device)
# Get network predictions.
all_activity_pred = all_activity(
spikes=spike_record, assignments=assignments, n_labels=n_classes
)
proportion_pred = proportion_weighting(
spikes=spike_record,
assignments=assignments,
proportions=proportions,
n_labels=n_classes,
)
# Compute network accuracy according to available classification strategies.
# new_running_perf = 100 * torch.sum(label_tensor.long() == all_activity_pred).item() / len(label_tensor)
# if (step > 50 and new_running_perf / running_perf > 1.8):
# question = "before: "+str(running_perf)+"after: "+str(new_running_perf)+"step: "+str(step) + " -- would you like to save (Y/N)?"
# save = input(question)
# if save == "Y":
# torch.save(network, "../saved models/"+str(n_neurons)+"_"+str(step*batch_size)+"_after")
# quit()
# torch.save(network, "../saved models/"+str(n_neurons)+"_before")
# running_perf = new_running_perf
accuracy["all"].append(
100
* torch.sum(label_tensor.long() == all_activity_pred).item()
/ len(label_tensor)
)
confusion_matrix = np.zeros((10,10))
# Keep track of the confusion matrix
for i,label_ in enumerate(label_tensor):
real = label_tensor[i]
pred = all_activity_pred[i]
confusion_matrix[real][pred] += 1
accuracy["proportion"].append(
100
* torch.sum(label_tensor.long() == proportion_pred).item()
/ len(label_tensor)
)
print(
"\nAll activity accuracy: %.2f (last), %.2f (average), %.2f (best)"
% (
accuracy["all"][-1],
np.mean(accuracy["all"]),
np.max(accuracy["all"]),
)
)
print(
"Proportion weighting accuracy: %.2f (last), %.2f (average), %.2f"
" (best)\n"
% (
accuracy["proportion"][-1],
np.mean(accuracy["proportion"]),
np.max(accuracy["proportion"]),
)
)
# Assign labels to excitatory layer neurons.
assignments, proportions, rates = assign_labels(
spikes=spike_record,
labels=label_tensor,
n_labels=n_classes,
rates=rates,
)
labels = []
labels.extend(batch["label"].tolist())
input_exc_weights = network.connections[("X", "Ae")].w
# Getting the weights before changing them for the sake of seeing the weight changes
pre_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# Run the network on the input.
network.run(inputs=inputs, time=time, input_time_dim=1)
input_exc_weights = network.connections[("X", "Ae")].w
# Getting the weights after changing them for the sake of seeing the weight changes
post_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# The change of the weights from one batch
weight_changes = post_weights - pre_weights
weight_change_count = np.count_nonzero(weight_changes)
# weight_change_count = np.count_nonzero(weight_changes)
# change_arr.append(weight_change_count)
# Add to spikes recording.
s = spikes["Ae"].get("s").permute((1, 0, 2))
spike_record[
(step * batch_size)
% update_interval : (step * batch_size % update_interval)
+ s.size(0)
] = s
# Get voltage recording.
exc_voltages = exc_voltage_monitor.get("v")
inh_voltages = inh_voltage_monitor.get("v")
# Optionally plot various simulation information.
if step % update_steps == 0 and step > 0:
if plot:
image = batch["image"][:, 0].view(28, 28)
inpt = inputs["X"][:, 0].view(time, 784).sum(0).view(28, 28)
lable = batch["label"][0]
input_exc_weights = network.connections[("X", "Ae")].w
square_weights = get_square_weights(
input_exc_weights.view(784, n_neurons), n_sqrt, 28
)
# weights_im = plot_weights(square_weights, im=weights_im, save="../weights/"+str(step)+".png")
# perf_ax = plot_performance(
# accuracy, x_scale=update_steps * batch_size, ax=perf_ax
# )
# weight_changes = torch.from_numpy(normalize(weight_changes))
# weight_changes = get_square_weights(weight_changes.view(784, n_neurons), n_sqrt, 28)
# save_loc = "../weight_changes/"+str(step)+".png"
# weights_im = plot_weights(weight_changes, im=weights_im, save=save_loc)
fig, ax = plt.subplots()
im = ax.imshow(confusion_matrix)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(10):
for j in range(10):
text = ax.text(j, i, confusion_matrix[i, j],
ha="center", va="center", color="w")
ax.set_title("Confusion matrix of MNIST w/ SNN at " + str(step * batch_size))
ax.set_xlabel("predicted label")
ax.set_ylabel("true label")
fig.tight_layout()
plt.savefig("../confusion_matrices/"+str(step)+".png")
plt.close(fig)
fig, ax = plt.subplots()
prediction_freq = np.sum(confusion_matrix, axis=0)
ax.bar(np.arange(10), prediction_freq)
ax.set_xlabel("predicted label")
ax.set_ylabel("number of predictions")
plt.savefig("../confusion_matrices/freq_"+str(step)+".png")
plt.close(fig)
fig, ax = plt.subplots()
mae_from_uniform_current = (entropy(prediction_freq) * 100) / 2.3
mae_from_uniform.append(mae_from_uniform_current)
ax.plot(np.arange(len(accuracy["all"])), accuracy["all"], label="accuracy")
ax.plot(np.arange(len(accuracy["all"])), mae_from_uniform, label="entropy")
ax.set_title("correlation between entropy and accuracy")
ax.set_xlabel("number of batches seen")
ax.set_ylabel("accuracy (percent)")
ax.legend()
plt.savefig("../entropy_vs_acc/"+str(step)+".png")
plt.close(fig)
print(np.corrcoef(mae_from_uniform, accuracy["all"]))
plt.pause(1e-8)
# indices = np.arange(len(change_arr) )
# plt.plot(indices, change_arr, color='r')
# plt.xlabel('training batch number')
# plt.ylabel('number of weights changed')
# plt.show()
# plt.title("Number of weights changed per update (400 neruons)")
# plt.pause(1e-8)
network.reset_state_variables() # Reset state variables.
pbar_training.update(batch_size)
print("Progress: %d / %d (%.4f seconds)" % (epoch + 1, n_epochs, t() - start))
print("Training complete.\n")
# Load MNIST data.
test_dataset = MNIST(
PoissonEncoder(time=time, dt=dt),
None,
root=os.path.join(ROOT_DIR, "data", "MNIST"),
download=True,
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Lambda(lambda x: x * intensity)]
),
)
# Create a dataloader to iterate and batch data
test_dataloader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_workers,
pin_memory=gpu,
)
# Sequence of accuracy estimates.
accuracy = {"all": 0, "proportion": 0}
# Train the network.
print("\nBegin testing\n")
network.train(mode=False)
start = t()
pbar = tqdm(total=n_test)
for step, batch in enumerate(test_dataset):
if step > n_test:
break
# Get next input sample.
inputs = {"X": batch["encoded_image"]}
if gpu:
inputs = {k: v.cuda() for k, v in inputs.items()}
# Run the network on the input.
network.run(inputs=inputs, time=time, input_time_dim=1)
# Add to spikes recording.
spike_record = spikes["Ae"].get("s").permute((1, 0, 2))
# Convert the array of labels into a tensor
label_tensor = torch.tensor(batch["label"], device=device)
# Get network predictions.
all_activity_pred = all_activity(
spikes=spike_record, assignments=assignments, n_labels=n_classes
)
proportion_pred = proportion_weighting(
spikes=spike_record,
assignments=assignments,
proportions=proportions,
n_labels=n_classes,
)
# Compute network accuracy according to available classification strategies.
accuracy["all"] += float(torch.sum(label_tensor.long() == all_activity_pred).item())
accuracy["proportion"] += float(
torch.sum(label_tensor.long() == proportion_pred).item()
)
network.reset_state_variables() # Reset state variables.
pbar.set_description_str("Test progress: ")
pbar.update()
plt.show()
print("\nAll activity accuracy: %.2f" % (accuracy["all"] / n_test))
print("Proportion weighting accuracy: %.2f \n" % (accuracy["proportion"] / n_test))
print("Progress: %d / %d (%.4f seconds)" % (epoch + 1, n_epochs, t() - start))
print("Testing complete.\n")
|
#using activated venv, so no shebang
import os
import unittest
from app import create_test_app
from models import User, Student, Group, Mentor, Session, SessionAction, Payment
from flask.ext.sqlalchemy import SQLAlchemy
app = create_test_app()
db = SQLAlchemy(app)
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['WTF_CSRF_ENABLED'] = False
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_pop(self):
u = User(username='john', email='<EMAIL>')
db.session.add(u)
db.session.commit()
def test_parents_to_students(self):
u1 = User(username='john', email='<EMAIL>')
u2 = User(username='kylo',email='<EMAIL>')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
s1 = Student(name='Fin')
s2 = Student(name='Ray')
db.session.add(s1)
db.session.add(s2)
db.session.commit()
u1.add_student(s1)
u1.add_student(s2)
assert u1.has_student(s1)
assert u1.has_student(s2)
assert s1.has_parent(u1)
u2.add_student(s1)
assert s1.has_parent(u2)
def test_mentor_to_group(self):
m = Mentor(name='Snoke')
g1 = Group(name='Solo')
g2 = Group(name='Generals')
s1 = Student(name='Ray')
s2 = Student(name='Kylo')
s3 = Student(name='Hux')
db.session.add(m)
db.session.add(g1)
db.session.add(g2)
db.session.add(s1)
db.session.add(s2)
db.session.add(s3)
db.session.commit()
g1.add_student(s1)
g1.add_student(s2)
g2.add_student(s3)
m.add_group(g1)
m.add_group(g2)
assert g1.mentor.name == 'Snoke'
assert m.has_group(g2)
def test_session_to_group(self):
m = Mentor(name='Snoke')
g1 = Group(name='Solo')
g2 = Group(name='Generals')
s1 = Student(name='Ray')
s2 = Student(name='Kylo')
s3 = Student(name='Hux')
db.session.add(m)
db.session.add(g1)
db.session.add(g2)
db.session.add(s1)
db.session.add(s2)
db.session.add(s3)
db.session.commit()
sesh1 = Session()
g1.add_session(sesh1)
db.session.add(sesh1)
db.session.commit()
g1.add_student(s1)
g1.add_student(s2)
g2.add_student(s3)
assert g1.has_session(sesh1)
assert sesh1.group_id == g1.id
def test_SessionActions1(self):
u1 = User(username='Han')
u2 = User(username='Leia')
g1 = Group(name='Solo')
s1 = Student(name='Ray')
s2 = Student(name='Kylo')
sesh1 = Session()
sesh2 = Session()
seshA1 = SessionAction(type='moved')
seshA2 = SessionAction(type='confirmed')
db.session.add(u1)
db.session.add(u2)
db.session.add(g1)
db.session.add(s1)
db.session.add(s2)
db.session.add(sesh1)
db.session.add(sesh2)
db.session.add(seshA1)
db.session.add(seshA2)
db.session.commit()
g1.add_student(s1)
g1.add_student(s2)
g1.add_session(sesh1)
g1.add_session(sesh2)
seshA1.user_id = u1.id
seshA2.user_id = u2.id
sesh1.add_SessionAction(seshA1)
sesh2.add_SessionAction(seshA2)
assert sesh2.has_SessionAction(seshA2)
assert sesh1.has_SessionAction(seshA1)
def test_payments_users(self):
u1 = User(username='Han')
u2 = User(username='Leia')
p1 = Payment(total = 400)
p2 = Payment(total = 600)
db.session.add(u1)
db.session.add(u2)
db.session.add(p1)
db.session.add(p2)
db.session.commit()
u1.add_payment(p1)
u2.add_payment(p2)
assert u1.has_payment(p1)
assert u2.has_payment(p2)
assert p1.payer_id == u1.id
assert p2.payer_id == u2.id
if __name__ == '__main__':
unittest.main() |
##!/usr/bin/env python3
import pygame
import constants as c
from scene import Scene
from level_scene import LevelScene
from high_score_table import HighScoreTable
from transition_gui import TransitionGui
class HighScoreScene(Scene):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for player in self.game.players:
if player not in [item.name for item in self.game.scoreboard.scores]:
self.game.scoreboard.add_score(player, 0)
self.board_offset = -c.WINDOW_HEIGHT
self.table = HighScoreTable(self.game)
self.table_all = HighScoreTable(self.game, hours_to_display=10**9)
self.table.pose.x = c.WINDOW_WIDTH * 0.3
self.table_all.pose.x = self.table.pose.x
self.age = 0
self.shade = pygame.Surface(c.WINDOW_SIZE)
self.shade.fill(c.BLACK)
self.shade_alpha = 255
self.scene_over = False
self.side_gui = TransitionGui(self.game)
pygame.mixer.music.set_volume(0.25)
def next_scene(self):
pygame.mixer.music.set_volume(1.0)
return LevelScene(self.game)
def update(self, dt, events):
self.age += dt
if self.age > 25 and self.board_offset < 0:
speed = 4
d = abs(self.board_offset)
self.board_offset += min(d * dt * speed, c.WINDOW_HEIGHT*dt*2)
if self.board_offset > 0:
self.board_offset = 0
for event in events:
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.scene_over = True
if self.side_gui.countdown_over():
self.scene_over = True
self.table.update(dt, events)
self.table_all.update(dt, events)
self.side_gui.update(dt, events)
for message in self.game.stream.queue_flush():
if message.text.lower() == '!recolor':
if message.user in self.game.players:
self.game.players[message.user].recolor()
self.game.recolor_flag(message.user)
elif message.text.lower() == '!score':
board = self.game.scoreboard.get_total_by_player(c.SCORE_EXPIRATION)
if message.user in board:
score = self.game.scoreboard.get_total_by_player(c.SCORE_EXPIRATION)[message.user].score
self.game.alertManager.alert("Your score is "+str(score), message.user)
else:
self.game.alertManager.alert("You have not played in the last " + str(c.SCORE_EXPIRATION) + " hours", message.user)
elif message.text.lower()[:5] == "!vote":
split = message.text.lower().split()
if len(split) != 2:
self.game.alertManager.alert("Invalid number of arguments for !vote", message.user)
continue
player_name = message.user
argument = split[1]
self.game.current_scene.side_gui.vote(player_name, argument)
speed = 800
if self.scene_over:
self.shade_alpha += speed*dt
else:
self.shade_alpha -= speed*dt
self.shade_alpha = max(0, min(255, self.shade_alpha))
if self.scene_over and self.shade_alpha == 255:
self.is_running = False
def draw(self, surface, offset=(0, 0)):
surface.fill(c.BLACK)
surface.blit(self.table.background_surface, (0, 0))
self.table.draw(surface, (offset[0], offset[1] + self.board_offset + c.WINDOW_HEIGHT))
self.table_all.draw(surface, (offset[0], offset[1] + self.board_offset))
self.side_gui.draw(surface, offset)
if self.shade_alpha > 0:
self.shade.set_alpha(self.shade_alpha)
surface.blit(self.shade, (0, 0))
|
# -*- coding: utf-8 -*-
import pygame
import numpy as np
import random
import os.path
import json
import base64
import hashlib
from cryptography.fernet import Fernet
from io import BytesIO
from sys import exit
class SpaceInvaders:
"""
Space Invaders Game
Copyright © 2022-2222 <NAME> & <NAME>
"""
def __init__(self, screen):
pygame.font.init()
pygame.mixer.init()
# set up display surfaces
self.screen = screen
self.screen_size = np.array(self.screen.get_size())
self.mid_screen = self.screen_size // 2
self.background_color = (0, 0, 0)
self.screen_titles = screen.copy()
self.screen_titles.set_colorkey(self.background_color)
self.screen_credits = self.screen.copy()
self.screen_credits.set_colorkey(self.background_color)
self.screen_instructions = self.screen.copy()
self.screen_instructions.set_colorkey(self.background_color)
self.screen_info = screen.copy()
self.screen_info.set_colorkey(self.background_color)
self.title_color = (220, 220, 160)
self.score_color = (200, 200, 0)
self.angle = 0
self.angle_add = 0
self.info_screen = 'titles'
self.info_screen_next = 'credits'
self.f = Fernet(base64.urlsafe_b64encode(hashlib.md5('<password here>'.encode()).hexdigest().encode("utf-8")))
self.namehints = {'p':'png','j':'jpg','o':'ogg','m':'mp3','w':'wav','t':'txt'}
# load data files - images
(fileobj, namehint) = self.load_dat('ship p')
self.ship_pic = pygame.image.load(fileobj, namehint).convert()
self.ship_pic.set_colorkey((255, 255, 255))
shield_size = int(np.max(np.asarray(self.ship_pic.get_size()) * 1.6))
(fileobj, namehint) = self.load_dat('shield_small p')
self.ship_shield_pic = pygame.transform.scale(pygame.image.load(fileobj, namehint).convert(), (shield_size, shield_size))
self.ship_shield_pic.set_colorkey((0, 0, 0))
self.ship_shield_pic.set_alpha(96)
(fileobj, namehint) = self.load_dat('alien 1 p')
self.alien1_pic = pygame.image.load(fileobj, namehint).convert()
self.alien1_pic.set_colorkey((255, 255, 255))
(fileobj, namehint) = self.load_dat('alien 2 p')
self.alien2_pic = pygame.image.load(fileobj, namehint).convert()
self.alien2_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 3 p')
self.alien3_pic = pygame.image.load(fileobj, namehint).convert()
self.alien3_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 4 p')
self.alien4_pic = pygame.image.load(fileobj, namehint).convert()
self.alien4_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 5 p')
self.alien5_pic = pygame.image.load(fileobj, namehint).convert()
self.alien5_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 6 p')
self.alien6_pic = pygame.image.load(fileobj, namehint).convert()
self.alien6_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 7 p')
self.alien7_pic = pygame.image.load(fileobj, namehint).convert()
self.alien7_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 8 p')
self.alien8_pic = pygame.image.load(fileobj, namehint).convert()
self.alien8_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 9 p')
self.alien9_pic = pygame.image.load(fileobj, namehint).convert()
self.alien9_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 10 p')
self.alien10_pic = pygame.image.load(fileobj, namehint).convert()
self.alien10_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 11 p')
self.alien11_pic = pygame.image.load(fileobj, namehint).convert()
self.alien11_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('alien 12 p')
self.alien12_pic = pygame.image.load(fileobj, namehint).convert()
self.alien12_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('explosion alien w')
self.alien1_sound_explosion = pygame.mixer.Sound(fileobj)
self.alien1_sound_explosion.set_volume(0.2)
(fileobj, namehint) = self.load_dat('alien boss 1 p')
self.alien_boss1_pic = pygame.image.load(fileobj, namehint).convert()
self.alien_boss1_hit_area = pygame.Rect(206 * 2 // 3, 358 * 2 // 3, 100 * 2 // 3, 57 * 2 // 3)
self.alien_boss1_pic = pygame.transform.scale(self.alien_boss1_pic, np.array(self.alien_boss1_pic.get_size()) * 2 // 3)
self.pic_colorkey(self.alien_boss1_pic, (36, 36, 36))
self.alien_boss1_cannon_pos = np.array([[self.alien_boss1_pic.get_size()[0] * 0.2 - 10, self.alien_boss1_pic.get_size()[1] - 5],
[self.alien_boss1_pic.get_size()[0] * 0.8 - 10, self.alien_boss1_pic.get_size()[1] - 5]], dtype=np.float)
(fileobj, namehint) = self.load_dat('alien boss 2 p')
self.alien_boss2_pic = pygame.image.load(fileobj, namehint).convert()
self.alien_boss2_pic.set_colorkey((0, 0, 0))
self.alien_boss2_hit_area = pygame.Rect(87, 300, 106, 65)
self.alien_boss2_cannon_pos = np.array([[self.alien_boss2_pic.get_size()[0] * 0.43 - 10, self.alien_boss2_pic.get_size()[1] - 25],
[self.alien_boss2_pic.get_size()[0] * 0.57 - 10, self.alien_boss2_pic.get_size()[1] - 25]], dtype=np.float)
(fileobj, namehint) = self.load_dat('alien boss 3 p')
self.alien_boss3_pic = pygame.image.load(fileobj, namehint).convert()
self.alien_boss3_pic.set_colorkey((0, 0, 0))
self.alien_boss3_hit_area = pygame.Rect(135, 210, 52, 45)
self.alien_boss3_cannon_pos = np.array([[-10, 225], [110, 210], [192, 210], [312, 225]], dtype=np.float)
(fileobj, namehint) = self.load_dat('alien boss 4 p')
self.alien_boss4_pic = pygame.image.load(fileobj, namehint).convert()
self.alien_boss4_pic.set_colorkey((0, 0, 0))
self.alien_boss4_hit_area = pygame.Rect(153, 340, 72, 35)
self.alien_boss4_cannon_pos = np.array([[27, 368], [146, 350], [212, 350], [321, 368]], dtype=np.float)
(fileobj, namehint) = self.load_dat('alien_ufo p')
self.alien_ufo_pic = pygame.image.load(fileobj, namehint).convert()
self.alien_ufo_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('bullet_small p')
self.bullet_alien1_pic = pygame.image.load(fileobj, namehint).convert()
self.bullet_alien1_pic = pygame.transform.flip(pygame.transform.scale(self.bullet_alien1_pic, (np.array(self.bullet_alien1_pic.get_size()) / 2.6).astype(np.int16)), 0, 1)
self.bullet_alien1_pic = self.recolor(self.bullet_alien1_pic, 'B')
self.bullet_alien1_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('bullet_double p')
self.bullet_alien2_pic = pygame.image.load(fileobj, namehint).convert()
self.bullet_alien2_pic = pygame.transform.flip(pygame.transform.scale(self.bullet_alien2_pic, np.array(self.bullet_alien2_pic.get_size()) // 2), 0, 1)
self.bullet_alien2_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('bullet_medium p')
self.bullet_ship1_pic = pygame.image.load(fileobj, namehint).convert()
self.bullet_ship1_pic = pygame.transform.scale(self.bullet_ship1_pic, np.array(self.bullet_ship1_pic.get_size()) // 3)
self.bullet_ship1_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('bullet_ufo p')
self.bullet_ufo1_pic = pygame.image.load(fileobj, namehint).convert()
self.bullet_ufo1_pic.set_colorkey((0, 0, 0))
(fileobj, namehint) = self.load_dat('power_up p')
self.powerup_template = pygame.image.load(fileobj, namehint).convert()
# load data files - sounds
(fileobj, namehint) = self.load_dat('explo 12 p')
self.explosion1_pic = self.adjust_pic(pygame.image.load(fileobj, namehint).convert(), np.array([-2, 7]), np.array([554, 554])) # adjust picture position and size
self.pic_colorkey(self.explosion1_pic, (16, 16, 16))
self.explosion1_pic_grid = np.array([5, 5])
self.explosion1_freq = 2
(fileobj, namehint) = self.load_dat('explosion p')
self.explosion_ship_pic = pygame.image.load(fileobj, namehint).convert()
self.pic_colorkey(self.explosion_ship_pic, (16, 16, 16))
self.explosion_ship_pic_grid = np.array([4, 4])
self.explosion_ship_freq = 2
(fileobj, namehint) = self.load_dat('ship gun w')
self.ship_sound_gun = pygame.mixer.Sound(fileobj)
self.ship_sound_gun.set_volume(0.2)
(fileobj, namehint) = self.load_dat('explosion ship boss w')
self.ship_sound_explosion = pygame.mixer.Sound(fileobj)
self.ship_sound_explosion.set_volume(1.0)
(fileobj, namehint) = self.load_dat('explosion ship boss w')
self.alien_boss_sound_explosion = pygame.mixer.Sound(fileobj)
self.alien_boss_sound_explosion.set_volume(0.5)
(fileobj, namehint) = self.load_dat('explosion ufo m')
self.alien_ufo_sound_explosion = pygame.mixer.Sound(fileobj)
self.alien_ufo_sound_explosion.set_volume(0.5)
(fileobj, namehint) = self.load_dat('miss w')
self.sound_miss = pygame.mixer.Sound(fileobj)
self.sound_miss.set_volume(0.4)
(fileobj, namehint) = self.load_dat('pling w')
self.powerup_sound = pygame.mixer.Sound(fileobj)
self.powerup_sound.set_volume(0.6)
(fileobj, namehint) = self.load_dat('victory w')
self.sound_victory = pygame.mixer.Sound(fileobj)
self.sound_victory.set_volume(0.4)
(fileobj, namehint) = self.load_dat('defeat w')
self.sound_defeat = pygame.mixer.Sound(fileobj)
self.sound_defeat.set_volume(0.4)
# set up powerups
self.font_powerup = pygame.font.SysFont('Arial Black', 10)
self.font_powerup_desc = pygame.font.SysFont('stencil', 16)
self.powerup_data = self.setup_powerups(self.powerup_template, self.font_powerup, self.font_powerup_desc)
self.powerup_desc_width = 0 # filled by the next loop
for pdata in self.powerup_data:
if pdata[3].get_size()[0] > self.powerup_desc_width:
self.powerup_desc_width = pdata[3].get_size()[0]
# set up high scores
self.highscore_file = 'spaceinv_scores t'
self.highscore_nr = 10
self.highscore_name_length = 8
self.latest_hs_nr = 0
self.latest_hs_keys = []
self.highscores = []
self.read_scores()
# level and game data
self.level_data = []
self.setup_level_data()
self.level = 0
self.level_time = 0
self.level_completed_time = 0
self.level_ufo_probability = 0.05
self.level_power_up_probability = 0.8 # probability of getting a power up by shooting an ufo
self.level_new_life = False
self.running = True
self.clock = pygame.time.Clock()
self.fps = 60
self.game_mode = 'start page'
self.game_over = False
self.game_over_time = 0
self.freeze_aliens = False
self.cheat_mode = 0
self.font_title = pygame.font.SysFont('stencil', self.screen_size[0] // 10)
self.font_highscorelist = pygame.font.SysFont('stencil', self.screen_size[1] // 30)
self.font_score = pygame.font.SysFont('stencil', 25)
self.font_game_score = pygame.font.SysFont('stencil', 50)
self.game_score = 0
self.show_score = -1
self.game_over_pic = self.font_game_score.render('GAME OVER', True, self.title_color)
self.game_over_pic.set_colorkey(self.background_color)
self.score_pic = self.font_game_score.render('0', True, self.score_color, self.background_color)
self.score_pic.set_colorkey(self.background_color)
self.score_pic_position = (10, 10)
self.level_pic = self.font_game_score.render('Level 0', True, self.score_color, self.background_color)
self.level_pic.set_colorkey(self.background_color)
self.level_pic_position = (self.screen_size[0] - self.level_pic.get_size()[0] - 10, 10)
self.switch_time = 0
self.switch_time_ms = 2000 # milliseconds between game and start page modes
# game object lists
self.alien_bullets = []
self.aliens = []
self.ufos = []
self.scores = []
self.explosions = []
self.powerups = []
# setup title screen
self.title_hs_pos = np.array([0, 0])
self.title_hs_pos_end = np.array([0, 0])
self.title_hs_rect = np.zeros((4))
self.title_cr_rect = np.zeros((4))
self.title_ins_rect = np.zeros((4))
self.title_rect = np.zeros((4))
self.title_z_pos = 1000
self.setup_titles(0)
self.setup_credits()
self.setup_instructions()
self.screen_info = self.screen_titles.copy() # copy to info screen
self.credits_time = pygame.time.get_ticks()
self.title_change = 0
self.title_change_ctr = 0
# set up a random batch of stars for the background
self.nr_stars = 4000
self.z_range = (50, 2000) # range for Z coordinates of stars
self.stars = np.random.rand(self.nr_stars, 3) * np.array([self.screen_size[0] - 2, self.screen_size[1] - 2, 1.0]) \
+ np.array([-self.mid_screen[0], -self.mid_screen[1], 0.0])
# adjust Z coordinates as more stars needed at distance for a balanced view
self.stars[:, 2] = (self.stars[:, 2] ** 0.5) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]
self.star_move = np.array([0.0, 0.0, -0.5])
# create the ship
self.ship_shoot_freq = 280
self.ship = Ship(self.ship_pic, self.bullet_ship1_pic, self.ship_shield_pic, self.ship_sound_gun, self.ship_sound_explosion,
self.screen_size, 7.0, self.ship_shoot_freq, 3)
# load music data and start looping it
(fileobj, namehint) = self.load_dat('music theme o')
pygame.mixer.music.load(fileobj)
pygame.mixer.music.set_volume(0.2)
pygame.mixer.music.play(loops=-1)
self.nr_channels = pygame.mixer.get_num_channels()
self.channel = 1 # reserving channel 0 for prioritized sounds
# ---------------- main loop ----------------
def run(self):
prev_time = pygame.time.get_ticks()
while self.running:
time = pygame.time.get_ticks()
# keyboard activity
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if self.game_mode == 'new high score':
if event.type == pygame.TEXTINPUT:
unicode = event.text
key = ord(unicode)
self.latest_hs_keys.append((key, unicode))
if event.type == pygame.KEYDOWN:
# get special keys
key = event.key
if key in (pygame.K_RETURN, pygame.K_BACKSPACE):
self.latest_hs_keys.append((key, ''))
if not self.game_mode == 'new high score':
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
self.running = False
if keys[pygame.K_SPACE]:
if self.game_mode == 'start page':
self.game_mode = 'go to game'
self.switch_time = time
if keys[pygame.K_RETURN]:
if self.game_mode == 'game':
if self.ship.shoot(time):
# shot fired, play sound
self.play_sound(self.ship.sound_gun)
else:
self.ship.space_down = False
if keys[pygame.K_i] and self.angle_add == 0:
self.angle_add = 2
self.info_screen_next = 'instructions'
self.credits_time = time
if keys[pygame.K_c]:
if self.cheat_mode == 0:
self.cheat_mode = 1
else:
self.cheat_mode = 3
if keys[pygame.K_h] and self.cheat_mode in (1, 3):
self.cheat_mode = 2
pid = random.randint(0, len(self.powerup_data) - 1) # randomly pick power up
self.powerups.append(PowerUp(pid, self.powerup_data, self.powerup_sound, (0, 0)))
powerup = self.powerups[-1]
self.ship.add_powerup(powerup, self.ship_shoot_freq)
self.powerups.remove(powerup)
if keys[pygame.K_LEFT] or keys[pygame.K_a]:
self.ship.move(-1, self.screen_size)
if keys[pygame.K_RIGHT] or keys[pygame.K_d]:
self.ship.move(1, self.screen_size)
# clear screen for a new frame
self.screen.fill(self.background_color)
# main loop depending on game mode
if self.game_mode == 'start page':
self.move_stars(time, prev_time)
self.plot_stars()
self.ufo_ops_back(time, prev_time)
self.copy_titles()
self.rotate_titles(time)
self.bullet_ops(time)
self.ufo_ops_front(time)
elif self.game_mode == 'go to game':
star_move_frac = max(1.0 - (time - self.switch_time) / self.switch_time_ms, 0.0)
self.star_move = np.array([0.0,
125.0 * np.cos(star_move_frac * np.pi / 2.0),
-0.5 * np.sin(star_move_frac * np.pi / 2.0)])
self.move_stars(time, prev_time)
self.plot_stars()
self.ufo_ops_back(time, prev_time)
self.screen_titles.set_alpha(int(255 * star_move_frac))
self.copy_titles()
self.ufo_ops_front(time)
if time - self.switch_time > self.switch_time_ms + 1000:
self.star_move = np.array([0.0, 125.0, 0.0])
self.screen_titles.set_alpha(255)
self.new_game(time)
elif self.game_mode == 'go to start page':
self.move_stars(time, prev_time)
self.plot_stars()
if time > self.switch_time + 1000:
self.info_screen = 'titles'
self.angle = 0
self.angle_add = 0
if self.cheat_mode in (0, 1) and self.game_score > self.highscores[self.highscore_nr - 1][1]:
self.latest_hs_nr = self.highscore_add()
self.game_mode = 'new high score'
pygame.key.start_text_input()
else:
self.game_mode = 'start page'
self.credits_time = time
elif self.game_mode == 'new high score':
self.highscore_get_name(time)
self.move_stars(time, prev_time)
self.plot_stars()
self.setup_titles(time)
self.copy_titles()
else: # actual game mode ('game')
self.move_stars(time, prev_time)
self.plot_stars()
self.level_ops(time)
self.ship_ops(time, prev_time)
self.ufo_ops_back(time, prev_time)
self.alien_ops(time, prev_time)
self.bullet_ops(time)
self.explosion_ops()
self.score_ops(time)
self.powerup_ops(time)
self.ufo_ops_front(time)
self.info_ops()
if self.game_over:
self.game_over_ops(time)
self.clock.tick(self.fps) # keeps code running at maximum self.fps frames per second
pygame.display.flip()
prev_time = time + 0
# ---------------- initialize a new game ----------------
def new_game(self, time):
# set up a new game
self.game_mode = 'game'
self.ship = Ship(self.ship_pic, self.bullet_ship1_pic, self.ship_shield_pic, self.ship_sound_gun, self.ship_sound_explosion,
self.screen_size, 7.0, self.ship_shoot_freq, 3)
self.level = 0
self.aliens.clear()
self.alien_bullets.clear()
self.scores.clear()
self.explosions.clear()
self.powerups.clear()
self.game_over = False
self.game_over_time = 0
self.game_score = 0
self.show_score = -1
self.cheat_mode = 0
# ---------------- main loop "operations" for different entities ----------------
def level_ops(self, time):
# test if level completed
if len(self.aliens) == 0 and len(self.ufos) == 0 and not self.game_over:
if (self.level_completed_time > 120 and len(self.explosions) == 0 and len(self.scores) == 0 and self.show_score == self.game_score) or self.level == 0:
self.level_completed_time = 0
self.level += 1
self.setup_level(time)
else:
if self.level_completed_time == 0:
# add score, play fanfare
self.scores.append(Score(self, 200 + self.level * 50, str(200 + self.level * 50),
self.font_game_score, self.screen_size // 2 + np.array([-20, -50]),
self.score_color, self.background_color))
self.play_sound(self.sound_victory, 0)
if self.level_new_life and self.ship.lives < 5:
self.ship.lives += 1 # add extra life
self.level_completed_time += 1
pic = self.font_game_score.render('LEVEL ' + str(self.level) + ' COMPLETE', True, self.title_color, self.background_color)
pic.set_colorkey((self.background_color))
position = ((self.screen_size - pic.get_size()) / 2).astype(np.int)
self.screen.blit(pic, position)
# test if Freeze Alien powerup is active
for powerup in self.ship.powerups:
if self.powerup_data[powerup[0]][2] == 'Freeze Aliens':
self.freeze_aliens = True
break
else:
self.freeze_aliens = False
def bullet_ops(self, time):
# check alien bullets hitting ship or its shield
for bullet in self.alien_bullets:
bullet.move()
if self.ship.shield:
# ship protected by a shield
if bullet.rect.colliderect(self.ship.shield_rect):
mask_offset = (bullet.rect[0] - self.ship.shield_rect[0], bullet.rect[1] - self.ship.shield_rect[1])
if self.ship.shield_mask.overlap(bullet.mask, mask_offset):
self.play_sound(self.sound_miss)
bullet.position[1] = -9999
elif bullet.rect.colliderect(self.ship.rect) and self.ship.status == 0 and self.game_mode == 'game':
mask_offset = (bullet.rect[0] - self.ship.rect[0], bullet.rect[1] - self.ship.rect[1])
if self.ship.mask.overlap(bullet.mask, mask_offset):
self.ship.is_hit(time, self.cheat_mode)
self.play_sound(self.ship.sound_explosion)
bullet.position[1] = -9999
self.explosions.append(Explosion(self.explosion_ship_pic, (int(self.ship.position[0] + self.ship.size[0] / 2),
int(self.ship.position[1] + self.ship.size[1] / 2)),
self.explosion_ship_pic_grid, self.explosion_ship_freq))
if bullet.position[0] < -bullet.size[0] or bullet.position[0] > self.screen_size[0] \
or bullet.position[1] < -bullet.size[1] or bullet.position[1] > self.screen_size[1]:
self.alien_bullets.remove(bullet)
else:
bullet.draw(self.screen)
# check ship bullets hitting ufos, aliens, or powerups
for bullet in self.ship.bullets:
bullet.move()
for ufo in self.ufos:
if ufo.may_be_shot and bullet.rect.colliderect(ufo.rect):
mask_offset = (bullet.rect[0] - ufo.rect[0], bullet.rect[1] - ufo.rect[1])
if ufo.mask.overlap(bullet.mask, mask_offset):
# ufo killed
self.scores.append(Score(self, ufo.score, str(ufo.score), self.font_score,
(int(bullet.position[0] + bullet.size[0]), int(bullet.position[1] - 20)),
self.score_color, self.background_color))
self.play_sound(ufo.sound_explosion)
self.explosions.append(Explosion(self.explosion_ship_pic, (int(ufo.position[0] + ufo.size[0] / 2),
int(ufo.position[1] + ufo.size[1] / 2)),
self.explosion_ship_pic_grid, self.explosion_ship_freq))
self.ufos.remove(ufo)
bullet.position[1] = -9999
# if player is lucky, create a power up
if random.random() <= self.level_power_up_probability:
pos = (int(ufo.position[0] + ufo.size[0] / 2), int(ufo.position[1] + ufo.size[1] / 2))
self.create_random_powerup(pos)
break
else:
for alien in self.aliens:
if bullet.rect.colliderect(alien.rect):
mask_offset = (bullet.rect[0] - alien.rect[0], bullet.rect[1] - alien.rect[1])
if alien.mask.overlap(bullet.mask, mask_offset):
# check if bullet hit the "damage area", if it is defined
if not alien.hit_area or bullet.rect.colliderect(alien.hit_area.move(alien.position)):
# hit caused damage
alien.hit_nr -= 1
self.play_sound(alien.sound_explosion)
if alien.hit_nr == 0:
# alien killed
self.scores.append(Score(self, alien.score, str(alien.score), self.font_score,
(int(bullet.position[0] + bullet.size[0]), int(bullet.position[1] - 20)),
self.score_color, self.background_color))
if alien.hit_total > 5:
# Boss alien - big explosion
self.explosions.append(Explosion(self.explosion_ship_pic, (int(alien.position[0] + alien.size[0] / 2),
int(alien.position[1] + alien.size[1] / 2)),
self.explosion_ship_pic_grid, self.explosion_ship_freq))
# Boss killed - create TWO powerups
pos = (int(alien.position[0] + alien.size[0] / 2 + random.randint(-50, 50)),
int(alien.position[1] + alien.size[1] / 2) + random.randint(-50, 50))
self.create_random_powerup(pos)
pos = (int(alien.position[0] + alien.size[0] / 2 + random.randint(-50, 50)),
int(alien.position[1] + alien.size[1] / 2) + random.randint(-50, 50))
self.create_random_powerup(pos)
else:
# normal alien
self.explosions.append(Explosion(self.explosion1_pic, (int(alien.position[0] + alien.size[0] / 2),
int(alien.position[1] + alien.size[1] / 2)),
self.explosion1_pic_grid, self.explosion1_freq))
self.aliens.remove(alien)
else:
# hit but not killed
self.scores.append(Score(self, int(alien.score / alien.hit_total), str(int(alien.score / alien.hit_total)), self.font_score,
(int(bullet.position[0] + bullet.size[0]), int(bullet.position[1] - 20)),
self.score_color, self.background_color))
self.explosions.append(Explosion(self.explosion1_pic, (int(bullet.position[0] + bullet.size[0] / 2),
int(bullet.position[1])),
self.explosion1_pic_grid, self.explosion1_freq))
else:
# hit did not cause damage
self.play_sound(self.sound_miss)
# mark bullet for removal
bullet.position[1] = -9999
break
else:
for powerup in self.powerups:
if bullet.rect.colliderect(powerup.rect):
mask_offset = (bullet.rect[0] - powerup.rect[0], bullet.rect[1] - powerup.rect[1])
if powerup.mask.overlap(bullet.mask, mask_offset):
self.play_sound(powerup.sound_award, 0)
# show powerup description as score (0 points)
self.scores.append(Score(self, 0, powerup.desc, self.font_score,
(int(bullet.position[0] - 60), int(bullet.position[1] - 40)),
self.score_color, self.background_color))
self.ship.add_powerup(powerup, self.ship_shoot_freq)
self.powerups.remove(powerup)
# mark bullet for removal
bullet.position[1] = -9999
break
if bullet.position[0] < -bullet.size[0] or bullet.position[0] > self.screen_size[0] \
or bullet.position[1] < -bullet.size[1] or bullet.position[1] > self.screen_size[1]:
self.ship.bullets.remove(bullet)
else:
bullet.draw(self.screen)
def ship_ops(self, time, prev_time):
if self.ship.status == 0 or (self.ship.status == 1 and (time - self.ship.start_time) % 100 < 50):
# if status = 0 (normal), draw ship. If status = 1 ("new" ship), draw a blinking ship.
self.ship.draw(self.screen)
if self.ship.status != 0 and not self.game_over:
if self.ship.lives == 0:
self.game_over = True
elif time - self.ship.start_time > 2000:
# after a pause, bring next ship (status: "new") and, after a pause, make it "normal".
self.ship.status -= 1
self.ship.start_time = time
# check ship's powerups' life times
if len(self.ship.powerups) > 0:
for i in range(len(self.ship.powerups) - 1, -1, -1):
powerup = self.ship.powerups[i]
# if powerup initially has -1 as life_time (item [1]), it will never end (so not decreased)
if powerup[1] >= 0 and powerup[1] <= time - prev_time:
self.ship.end_powerup(powerup, self.powerup_data, self.ship_shoot_freq)
else:
if powerup[1] > time - prev_time:
# decrease life time, but preserve Double Fire life if using Triple Fire
if not (self.ship.bullet_type == 3 and self.powerup_data[powerup[0]][2] == 'Double Shot'):
powerup[1] -= time - prev_time
if powerup[0] < 0:
del self.ship.powerups[i]
def alien_ops(self, time, prev_time):
if self.freeze_aliens:
self.level_time += time - prev_time # this keeps bosses frozen
for alien in self.aliens:
if self.freeze_aliens:
alien.last_move += time - prev_time # this keeps 'normal' aliens frozen
else:
alien.move(time, self.level_time, self.screen_size)
alien.draw(self.screen)
# test if alien hits the ship or has passed lower than the ship
if (alien.rect.colliderect(self.ship.rect) or alien.position[1] >= self.ship.position[1]) and not self.game_over:
mask_offset = (alien.rect[0] - self.ship.rect[0], alien.rect[1] - self.ship.rect[1])
if self.ship.mask.overlap(alien.mask, mask_offset):
self.ship.is_hit(time)
self.play_sound(self.ship.sound_explosion)
self.ship.lives = 0 # all lives lost if aliens make it!
self.explosions.append(Explosion(self.explosion_ship_pic, (int(self.ship.position[0] + self.ship.size[0] / 2),
int(self.ship.position[1] + self.ship.size[1] / 2)),
self.explosion_ship_pic_grid, self.explosion_ship_freq))
# alien shoots
if not self.game_over and not self.freeze_aliens and random.random() < self.fps / alien.shoot_freq:
for i in range(np.shape(alien.cannon_pos)[0]):
self.alien_bullets.append(Bullet(alien.bullet_pic, alien.position + alien.cannon_pos[i, :], np.array([0.0, 6.5]), np.array([0.0, 0.0])))
def ufo_ops_back(self, time, prev_time):
# add random ufos, but only if aliens left (or not in game mode)
if random.random() < self.level_ufo_probability / 50 and (self.game_mode != 'game' or (len(self.aliens) > 0 and not self.freeze_aliens)):
# add a ufo
speed = 0.35 + (self.level ** 0.5) * random.random() * 0.1
from_side = random.randint(0, 1) * 2 - 1
self.ufos.append(Ufo(self.alien_ufo_pic, self.bullet_ufo1_pic, self.alien_ufo_sound_explosion, 150, speed, from_side, self.screen_size))
# move and draw ufo
for ufo in self.ufos:
last_phase = ufo.phase + 0
if self.freeze_aliens:
# slow down ufos, no total freeze
freeze_speed = 0.2
else:
freeze_speed = 1.0
ufo.move(time, freeze_speed, self.screen_size) # moving updates ufo.phase
if ufo.phase >= ufo.turning_point + 75 / 2 and last_phase < ufo.turning_point + 75 / 2:
# ufo crossed aliens (in z coordinate) in mid turn, shoot
for i in range(-2, 3):
self.alien_bullets.append(Bullet(ufo.bullet_pic,
(ufo.position[0] + ufo.size[0] / 2 - ufo.bullet_size[0] / 2, ufo.position[1] + ufo.size[1]),
np.array([i, (2 - abs(i)) * 0.3]), np.array([0.0, 0.2])))
if ufo.phase > ufo.turning_point + 75 and (np.min(ufo.position + ufo.size) < 0 or np.max(ufo.position - self.screen_size)) > 0:
# ufo outside of screen: remove
self.ufos.remove(ufo)
else:
if ufo.phase < ufo.turning_point + 75 / 2:
# ufo behind the aliens
ufo.draw(self.screen)
def ufo_ops_front(self, time):
# draw ufo when in front of the aliens
for ufo in self.ufos:
if ufo.phase >= ufo.turning_point + 75 / 2:
ufo.draw(self.screen)
def score_ops(self, time):
# show scores
for score in self.scores:
if time - score.start_time > score.show_time:
self.scores.remove(score)
else:
score.move()
score.draw(self.screen, time)
def explosion_ops(self):
for explosion in self.explosions:
explosion.draw(self.screen)
explosion.freq_cnt += 1
if explosion.freq_cnt == explosion.freq:
explosion.freq_cnt = 0
explosion.phase[0] += 1
if explosion.phase[0] == explosion.grid[0]:
explosion.phase[0] = 0
explosion.phase[1] += 1
if explosion.phase[1] == explosion.grid[1]:
self.explosions.remove(explosion)
def powerup_ops(self, time):
# show powerups. These are powerups on screen (not ship's powerups).
for powerup in self.powerups:
if time - powerup.start_time > powerup.show_time:
self.powerups.remove(powerup)
else:
powerup.draw(self.screen, time)
def info_ops(self):
# show powerups:
y = self.score_pic_position[1]
x = 220
max_w = 260
for powerup in self.ship.powerups: # powerup is an array of [pid, life_time], powerup_data a list of (pic, life_time, desc, text_pic)
pid = powerup[0]
life_time = powerup[1]
self.screen.blit(self.powerup_data[pid][0], (x, y)) # draw powerup pic
self.screen.blit(self.powerup_data[pid][3], (x + self.powerup_data[pid][0].get_size()[0] + 6, y + 4)) # add powerup description
# add bar showing life time
if life_time < 0:
p_width = max_w
else:
p_width = min(max_w, max_w * life_time // self.powerup_data[pid][1])
color = (int((max_w - p_width) * 200 / max_w), 0, int(p_width * 200 / max_w))
pygame.draw.rect(self.screen, color, (x + self.powerup_data[pid][0].get_size()[0] + self.powerup_desc_width + 12, y + 4, p_width, 12))
y += 20
# show game score
if self.show_score != self.game_score:
if self.show_score < self.game_score - 500000:
self.show_score += 10000
elif self.show_score < self.game_score - 50000:
self.show_score += 1000
elif self.show_score < self.game_score - 5000:
self.show_score += 100
elif self.show_score < self.game_score - 1000:
self.show_score += 20
elif self.show_score < self.game_score - 100:
self.show_score += 5
else:
self.show_score += 1
# create new score pic only when needed
self.score_pic = self.font_game_score.render(str(self.show_score), True, self.score_color, self.background_color)
self.score_pic.set_colorkey(self.background_color)
self.screen.blit(self.score_pic, self.score_pic_position)
# show level nr
self.screen.blit(self.level_pic, self.level_pic_position)
# show lives
if self.ship.status == 2:
lives = self.ship.lives + 1 # ship is dead but not yet using the backup ships
else:
lives = self.ship.lives
for i in range(1, lives):
self.screen.blit(self.ship.pic_small, (self.level_pic_position[0] - 20 - int(1.2 * i * self.ship.pic_small_size[0]), self.level_pic_position[1]))
def game_over_ops(self, time):
if self.game_over_time == 0:
self.game_over_time = time
self.play_sound(self.sound_defeat, 0)
elif time > self.game_over_time + 2 * self.switch_time_ms and len(self.ship.bullets) == 0 and self.show_score == self.game_score:
self.game_mode = 'go to start page'
self.switch_time = time
self.star_move = np.array([0.0, 0.0, -0.5])
self.level_ufo_probability = 0.05
self.freeze_aliens = False
return
star_move_frac = max(min((time - (self.game_over_time + self.switch_time_ms)) / self.switch_time_ms, 1.0), 0.0)
self.star_move = np.array([0.0,
125.0 * np.cos(star_move_frac * np.pi / 2.0),
-0.5 * np.sin(star_move_frac * np.pi / 2.0)])
position = ((self.screen_size - self.game_over_pic.get_size()) / 2).astype(np.int)
self.screen.blit(self.game_over_pic, position)
# ---------------- new level setup ----------------
def setup_level(self, time):
self.level_pic = self.font_game_score.render('Level ' + str(self.level), True, self.score_color, self.background_color)
self.level_pic.set_colorkey(self.background_color)
self.level_pic_position = (self.screen_size[0] - self.level_pic.get_size()[0] - 10, 10)
self.level_time = time
# create aliens
self.aliens.clear()
# find mamimum "non-boss" level
for i in range(len(self.level_data) - 1, 0, -1):
if self.level_data[i][12] > 0:
lvl_max = i
break
# set level
lvl = (self.level - 1) % len(self.level_data) # re-use all levels
lvl_data = self.level_data[lvl]
self.level_ufo_probability = 1.0 - lvl_data[15] ** (self.level / 2)
self.level_new_life = lvl_data[16]
move_delay = lvl_data[13] / (self.level ** 0.5)
if lvl_data[6] == 1:
hit_nr = 1
else:
hit_nr = int(lvl_data[6] * (self.level / 3) ** 0.5) # nr of hits required to kill
if lvl_data[12] == 0:
# y_move = 0: boss level
shoot_freq = lvl_data[14] // ((self.level / 3) ** 0.3) # the bigger the less shots
pos = ((self.screen_size - np.array(lvl_data[0].get_size())) / 2).astype(np.int16)
self.aliens.append(Alien(
lvl_data[0], lvl_data[1], lvl_data[2], lvl_data[3], lvl_data[4],
lvl_data[5] * hit_nr, hit_nr, pos, lvl_data[11], lvl_data[12], 0, move_delay, 0, 1, shoot_freq
))
else:
# group of aliens
# for "re-used" levels, use aliens from two different levels. If first round, lvl2 = lvl
lvl2 = (lvl + int((self.level - 1) / len(self.level_data))) % len(self.level_data)
if self.level_data[lvl2][12] == 0:
lvl2 = (lvl2 + 1) % len(self.level_data) # boss level --> use the enxt level
lvl2_data = self.level_data[lvl2]
# pick number of aliens and size multipliers from maximum level, if re-using levels
if lvl2 == lvl:
lvl_max_data = self.level_data[lvl]
else:
lvl_max_data = self.level_data[lvl_max]
shoot_freq = lvl_data[14] // (self.level ** 0.3) # the bigger the less shots
alien_size = np.maximum(np.array(lvl_data[0].get_size()), np.array(lvl2_data[0].get_size()))
x_size = int(alien_size[0] * lvl_max_data[9])
x_times = int(((self.screen_size[0] - alien_size[0]) - (lvl_max_data[7] - 1) * x_size - 10) / lvl_data[11])
for y in range(lvl_max_data[8]):
# make sure there's some room between the ship and the aliens
if 90 + (y + 4) * alien_size[1] * lvl_max_data[10] < self.ship.position[1]:
for x in range(lvl_max_data[7]):
pos = np.array([10 + x * x_size, 90 + y * alien_size[1] * lvl_max_data[10]]).astype(np.int16)
if (y + x) % 2 == 0:
self.aliens.append(Alien(
lvl_data[0], lvl_data[1], lvl_data[2], lvl_data[3], lvl_data[4],
lvl_data[5] * hit_nr, hit_nr, pos, lvl_data[11], lvl_data[12], x_times, move_delay, (y + x) * 30, 1, shoot_freq
))
else:
self.aliens.append(Alien(
lvl2_data[0], lvl2_data[1], lvl2_data[2], lvl2_data[3], lvl2_data[4],
lvl2_data[5] * hit_nr, hit_nr, pos, lvl_data[11], lvl_data[12], x_times, move_delay, (y + x) * 30, 1, shoot_freq
))
def setup_level_data(self):
# setup all levels
# data structure for a level:
# [0, 1] alien picture, alien_boss1_hit_area (for bosses)
# [2, 3] alien bullet picture, cannon positions (array)
# [4] alien explosion sound
# [5, 6] alien score per hit, hit nr (hits required before killed - will be adjusted by level nr if > 1)
# [7 - 12] alien x_nr, y_nr (number of aliens in a matrix), x_size, y_size (multipliers defining distances), x_move, y_move (y_move = 0 for boss)
# [13, 14] alien move delay (bigger = slower), shooting frequency (bigger = less shots). Will be adjusted by level nr
# [15, 16] 1 - ufo probability (ill be adjusted by level nr), add life if level completed (True/False)
# level 1
self.level_data.append((
self.alien1_pic, None,
self.bullet_alien1_pic, np.array([[self.alien1_pic.get_size()[0] // 2 - 1, self.alien1_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
25, 1,
6, 5, 1.8, 1.5, 20, 40,
500, 32000,
0.93, False
))
# level 2
self.level_data.append((
self.alien2_pic, None,
self.bullet_alien1_pic, np.array([[self.alien2_pic.get_size()[0] // 2 - 1, self.alien2_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
28, 1,
8, 5, 1.6, 1.4, 20, 45,
500, 32000,
0.93, False
))
# level 3
self.level_data.append((
self.alien3_pic, None,
self.bullet_alien1_pic, np.array([[self.alien3_pic.get_size()[0] // 2 - 1, self.alien3_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
30, 1,
7, 6, 1.6, 1.4, 20, 45,
500, 32000,
0.93, False
))
# level 4 (boss)
self.level_data.append((
self.alien_boss2_pic, self.alien_boss2_hit_area,
self.bullet_alien2_pic, self.alien_boss2_cannon_pos,
self.alien_boss_sound_explosion,
20, 12,
0, 0, 0, 0, 0, 0,
1000, 5000,
0.96, False
))
# level 5
self.level_data.append((
self.alien4_pic, None,
self.bullet_alien1_pic, np.array([[self.alien4_pic.get_size()[0] // 2 - 1, self.alien4_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
32, 1,
7, 7, 1.6, 1.3, 20, 50,
500, 32000,
0.93, False
))
# level 6
self.level_data.append((
self.alien5_pic, None,
self.bullet_alien1_pic, np.array([[self.alien5_pic.get_size()[0] // 2 - 1, self.alien5_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
35, 1,
8, 7, 1.5, 1.4, 22, 45,
500, 32000,
0.93, False
))
# level 7
self.level_data.append((
self.alien6_pic, None,
self.bullet_alien1_pic, np.array([[self.alien6_pic.get_size()[0] // 2 - 1, self.alien6_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
38, 1,
8, 8, 1.5, 1.3, 24, 50,
500, 32000,
0.93, False
))
# level 8 (boss)
self.level_data.append((
self.alien_boss1_pic, self.alien_boss1_hit_area,
self.bullet_alien2_pic, self.alien_boss1_cannon_pos,
self.alien_boss_sound_explosion,
25, 12,
0, 0, 0, 0, 0, 0,
1000, 5000,
0.96, True
))
# level 9
self.level_data.append((
self.alien7_pic, None,
self.bullet_alien1_pic, np.array([[self.alien7_pic.get_size()[0] // 2 - 1, self.alien7_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
40, 1,
8, 8, 1.5, 1.4, 25, 50,
500, 32000,
0.93, False
))
# level 10
self.level_data.append((
self.alien8_pic, None,
self.bullet_alien1_pic, np.array([[self.alien8_pic.get_size()[0] // 2 - 1, self.alien8_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
42, 1,
8, 8, 1.5, 1.3, 25, 50,
500, 32000,
0.93, False
))
# level 11
self.level_data.append((
self.alien9_pic, None,
self.bullet_alien1_pic, np.array([[self.alien9_pic.get_size()[0] // 2 - 1, self.alien9_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
44, 1,
8, 8, 1.5, 1.3, 25, 50,
500, 32000,
0.93, False
))
# level 12 (boss)
self.level_data.append((
self.alien_boss3_pic, self.alien_boss3_hit_area,
self.bullet_alien2_pic, self.alien_boss3_cannon_pos,
self.alien_boss_sound_explosion,
32, 12,
0, 0, 0, 0, 0, 0,
1000, 5000,
0.96, False
))
# level 13
self.level_data.append((
self.alien10_pic, None,
self.bullet_alien1_pic, np.array([[self.alien10_pic.get_size()[0] // 2 - 1, self.alien10_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
46, 1,
9, 8, 1.5, 1.4, 25, 50,
500, 32000,
0.93, False
))
# level 14
self.level_data.append((
self.alien11_pic, None,
self.bullet_alien1_pic, np.array([[self.alien11_pic.get_size()[0] // 2 - 1, self.alien11_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
48, 1,
9, 9, 1.5, 1.4, 25, 50,
500, 32000,
0.93, False
))
# level 15
self.level_data.append((
self.alien12_pic, None,
self.bullet_alien1_pic, np.array([[self.alien12_pic.get_size()[0] // 2 - 1, self.alien12_pic.get_size()[1]]], dtype=np.float),
self.alien1_sound_explosion,
50, 1,
9, 9, 1.5, 1.5, 25, 50,
500, 32000,
0.93, False
))
# level 16 (boss)
self.level_data.append((
self.alien_boss4_pic, self.alien_boss4_hit_area,
self.bullet_alien2_pic, self.alien_boss4_cannon_pos,
self.alien_boss_sound_explosion,
35, 12,
0, 0, 0, 0, 0, 0,
1000, 5000,
0.96, True
))
# ---------------- powerup setup ----------------
def setup_powerups(self, pic, font, font_desc):
# set up a list of power ups.
# if pic has green color, it will be changed to a power up specific color
old_color = (0, 255, 0)
powerup_data = []
desc = 'Double Shot'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (120, 20, 220), desc, desc[:1], 24000, font, font_desc))
desc = 'Triple Shot'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (120, 20, 20), desc, desc[:1], 15000, font, font_desc))
desc = 'Rapid Fire'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (20, 20, 120), desc, desc[:1], 40000, font, font_desc))
desc = 'Auto Fire'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (220, 20, 120), desc, desc[:1], -1, font, font_desc))
desc = 'Freeze Aliens'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (160, 220, 160), desc, desc[:1], 5000, font, font_desc))
desc = 'Shield'
powerup_data.append(self.setup_powerups_pic(pic.copy(), old_color, (160, 160, 220), desc, desc[:1], 9000, font, font_desc))
return powerup_data
def setup_powerups_pic(self, new_pic, old_color, new_color, desc, letter, life_time, font, font_desc):
rgb_array = pygame.surfarray.pixels2d(new_pic)
self.recolor2(rgb_array, old_color, new_color)
rgb_array = None
new_pic.set_colorkey((255, 255, 255))
if new_color[0] + new_color[1] + new_color[2] > 3 * 128:
letter_color = (1, 1, 1) # use black if letter background very light colored
else:
letter_color = (220, 220, 220)
pic_letter = font.render(letter, False, letter_color, (0, 0, 0))
pic_letter.set_colorkey((0, 0, 0))
pic_desc = font_desc.render(desc, False, (220, 220, 220))
pic_desc.set_colorkey((0, 0, 0))
new_pic.blit(pic_letter, ((new_pic.get_size()[0] - pic_letter.get_size()[0]) // 2, (new_pic.get_size()[1] - pic_letter.get_size()[1]) // 2))
return (new_pic, life_time, desc, pic_desc)
# ---------------- setup titles, credits, and instructions screens ----------------
def setup_titles(self, time):
# setup title screen
self.screen_titles.fill(self.background_color)
title_pos = np.array([0, 80])
title_pos[1] = self.add_text_row(self.screen_titles, self.font_title, 'SPACE', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_title, 'INVADERS', title_pos[1], None, 2.0)
# high score list titles
self.title_hs_pos = title_pos.copy()
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, '- HIGH SCORES -', title_pos[1], None, 1.5)
title_height = (title_pos[1] - self.title_hs_pos[1]) // 1.5
# store the left, right and right position of player, score and level
hs_pos_x = np.array([(self.screen_size[0] - 600) // 2,
-((self.screen_size[0] - 600) // 2 + 480),
-((self.screen_size[0] - 600) // 2 + 600)])
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'PLAYER', title_pos[1], hs_pos_x[0], 0.0)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'SCORE', title_pos[1], hs_pos_x[1], 0.0)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'LVL', title_pos[1], hs_pos_x[2], 1.5)
# texts at the bottom
self.title_hs_pos_end = np.array([0, self.screen_size[1] - title_height * 3])
title_pos[1] = self.title_hs_pos_end[1]
if self.game_mode == 'new high score':
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'CONGRATULATIONS! NEW HIGH SCORE', title_pos[1], None, 1.2)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'ENTER YOUR NAME AND PRESS ENTER', title_pos[1], None, 1.2)
else:
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'PRESS SPACE FOR A NEW GAME', title_pos[1], None, 1.2)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, 'ESC TO EXIT I FOR INSTRUCTIONS', title_pos[1], None, 1.2)
# add high scores at the middle
title_pos[1] = self.title_hs_pos[1] + 1.5 * title_height * 2.0
for i in range(0, self.highscore_nr):
hs = self.highscores[i]
if hs[1] > 0:
# if getting high score name, add a blinking underscore
text = hs[0]
if self.game_mode == 'new high score' and i == self.latest_hs_nr and len(hs[0]) < self.highscore_name_length and time % 1000 > 500:
text = text + '_'
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, text, title_pos[1], hs_pos_x[0], 0.0)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, str(hs[1]), title_pos[1], hs_pos_x[1], 0.0)
title_pos[1] = self.add_text_row(self.screen_titles, self.font_highscorelist, str(hs[2]), title_pos[1], hs_pos_x[2], 1.1)
# test for overlap with end texts
if title_pos[1] >= self.title_hs_pos_end[1]:
break
subsurf = self.screen_titles.subsurface((0, self.title_hs_pos[1], self.screen_size[0] - 1, self.title_hs_pos_end[1] - self.title_hs_pos[1] - 1))
self.title_hs_rect = np.array([
subsurf.get_bounding_rect()[0] + subsurf.get_offset()[0],
subsurf.get_bounding_rect()[1] + subsurf.get_offset()[1],
subsurf.get_bounding_rect()[2],
subsurf.get_bounding_rect()[3]
])
def setup_credits(self):
# setup credits screen. Take a copy of titles screen and clear the middle for credits.
self.screen_credits = self.screen_titles.copy()
self.screen_credits.fill(self.background_color,
(0, self.title_hs_pos[1], self.screen_size[0] - 1, self.title_hs_pos_end[1] - self.title_hs_pos[1] - 1))
title_pos = self.title_hs_pos.copy()
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, '- CREDITS -', title_pos[1], None, 3.0)
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, 'OWNER/PROGRAMMER', title_pos[1], None, 1.5)
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, '<NAME>', title_pos[1], None, 3.0)
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, 'PROGRAMMER', title_pos[1], None, 1.5)
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, '<NAME>', title_pos[1], None, 3.0)
title_pos[1] = self.add_text_row(self.screen_credits, self.font_highscorelist, 'COPYRIGHT © 2022-2222', title_pos[1], None, 1.1)
subsurf = self.screen_credits.subsurface((0, self.title_hs_pos[1], self.screen_size[0] - 1, self.title_hs_pos_end[1] - self.title_hs_pos[1] - 1))
self.title_cr_rect = np.array([
subsurf.get_bounding_rect()[0] + subsurf.get_offset()[0],
subsurf.get_bounding_rect()[1] + subsurf.get_offset()[1],
subsurf.get_bounding_rect()[2],
subsurf.get_bounding_rect()[3]
])
def setup_instructions(self):
# setup instructions screen. Take a copy of titles screen and clear the middle for instructions.
self.screen_instructions = self.screen_titles.copy()
self.screen_instructions.fill(self.background_color,
(0, self.title_hs_pos[1], self.screen_size[0] - 1, self.title_hs_pos_end[1] - self.title_hs_pos[1] - 1))
title_pos = self.title_hs_pos.copy()
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, '- INSTRUCTIONS -', title_pos[1], None, 2.0)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'ALIEN INVASION IS IMMINENT.', title_pos[1], None, 2.0)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'YOU AND YOUR THREE SPACESHIPS', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'ARE ALL THAT IS LEFT TO STOP THEM.', title_pos[1], None, 1.5)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'WATCH OUT FOR UFOS AND THEIR BOMBS!', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'UFOS CAN ONLY BE SHOT IN THE MIDDLE', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'OF THEIR ATTACK PATTERN. THEY', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'MAY LEAVE POWER-UPS IF SHOT DOWN.', title_pos[1], None, 1.5)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'USE LEFT AND RIGHT ARROW KEYS', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, '(OR A AND D KEYS) TO MOVE', title_pos[1], None, 1.1)
title_pos[1] = self.add_text_row(self.screen_instructions, self.font_highscorelist, 'AND ENTER KEY TO FIRE.', title_pos[1], None, 1.1)
subsurf = self.screen_instructions.subsurface((0, self.title_hs_pos[1], self.screen_size[0] - 1, self.title_hs_pos_end[1] - self.title_hs_pos[1] - 1))
self.title_ins_rect = np.array([
subsurf.get_bounding_rect()[0] + subsurf.get_offset()[0],
subsurf.get_bounding_rect()[1] + subsurf.get_offset()[1],
subsurf.get_bounding_rect()[2],
subsurf.get_bounding_rect()[3]
])
# get the minimum Rect which covers all high scores, credits, and instructions.
self.title_rect = np.array([
min(self.title_hs_rect[0], self.title_ins_rect[0], self.title_cr_rect[0]),
min(self.title_hs_rect[1], self.title_ins_rect[1], self.title_cr_rect[1]),
max(self.title_hs_rect[0] + self.title_hs_rect[2], self.title_ins_rect[0] + self.title_ins_rect[2], self.title_cr_rect[0] + self.title_cr_rect[2]) \
- min(self.title_hs_rect[0], self.title_ins_rect[0], self.title_cr_rect[0]),
max(self.title_hs_rect[1] + self.title_hs_rect[3], self.title_ins_rect[1] + self.title_ins_rect[3], self.title_cr_rect[1] + self.title_cr_rect[3]) \
- min(self.title_hs_rect[1], self.title_ins_rect[1], self.title_cr_rect[1])
])
# set z_pos of titles to control the rotation perspective
self.title_z_pos = max(self.title_rect[3] * 3, 1.5 * (self.title_rect[3] / 2) / (self.screen_size[0] / self.title_rect[2] - 1))
def add_text_row(self, screen, font, text, y, x=None, spacing=1.5):
# add text to titles/instructions. If x is None then center text, if x<0 then right align at abs(x). Returns next y.
f_screen = font.render(text, True, self.title_color, self.background_color)
f_size = f_screen.get_size()
if x is None:
pos = ((self.screen_size[0] - f_size[0]) // 2, y) # center
elif x >= 0:
pos = (x, y) # left align
else:
pos = (-x - f_size[0], y) # right align at abs(x)
screen.blit(f_screen, pos)
return int(y + f_size[1] * spacing)
def copy_titles(self):
if self.angle != 0:
self.screen.blit(self.screen_info, (0, 0))
elif self.info_screen == 'titles':
self.screen.blit(self.screen_titles, (0, 0))
elif self.info_screen == 'credits':
self.screen.blit(self.screen_credits, (0, 0))
else:
self.screen.blit(self.screen_instructions, (0, 0))
def rotate_titles(self, time):
# rotates between high scores and instructions in 3D but only around the horizontal axis.
# first check if rotation required.
if time > self.credits_time + 20000 and self.angle_add == 0 and self.info_screen == 'credits':
self.angle_add = 2
self.info_screen_next = 'titles'
self.credits_time = time
if time > self.credits_time + 15000 and self.angle_add == 0 and self.info_screen == 'titles':
self.angle_add = 2
self.info_screen_next = 'credits'
if self.angle_add != 0:
self.angle += self.angle_add
if self.angle == 0:
self.angle_add = 0
elif self.angle == 90:
# rotated 90 degrees - switch source image and continue from -90 degrees
self.angle = -90
if self.info_screen == 'titles':
self.info_screen = self.info_screen_next
else:
self.info_screen = 'titles'
else:
return # no rotation
self.screen_info.fill(self.background_color, (0, self.title_rect[1], self.screen_size[0], self.title_rect[3]))
if self.info_screen == 'titles':
while self.screen_titles.get_locked():
self.screen_titles.unlock()
rgb_array_src = pygame.surfarray.pixels2d(self.screen_titles.subsurface(self.title_rect))
elif self.info_screen == 'credits':
while self.screen_credits.get_locked():
self.screen_credits.unlock()
rgb_array_src = pygame.surfarray.pixels2d(self.screen_credits.subsurface(self.title_rect))
else:
while self.screen_instructions.get_locked():
self.screen_instructions.unlock()
rgb_array_src = pygame.surfarray.pixels2d(self.screen_instructions.subsurface(self.title_rect))
rect_mid = self.title_rect[2:4] // 2
sa, ca = np.sin(self.angle * np.pi / 180), np.cos(self.angle * np.pi / 180) # sin and cos of angle
y_top = int(-rect_mid[1] * ca)
z_top = -rect_mid[1] * sa
# yr = range of y coordinates covered by destination
# y_mapr = range of y coordinates mapped to source data
# zr = range of z coordinates covered by destination (one z for each y in yr)
# z_multip = multiplier used for getting x coordinate mapping for each z in zr
if y_top < 0:
yr = np.arange(y_top, -y_top, dtype=np.int16)
elif y_top > 0:
yr = np.arange(y_top - 1, -y_top - 1, -1, dtype=np.int16)
else:
return # exit if no rows to draw
y_num = np.shape(yr)[0]
y_mapr = np.linspace(0, self.title_rect[3], y_num, False).astype(np.int16)
zr = np.linspace(z_top, -z_top, y_num)
z_multip = (zr + self.title_z_pos) / self.title_z_pos
# destination must be wider than source due to perspective - when zr is negative (closer to viewer). Pick m = maximum multiplier for x.
m = 1.0 / min(z_multip[0], z_multip[-1])
# define destination subsurface, wider but less high than source data (self.title_rect)
dst_rect = np.array([
int(max(self.title_rect[0] - (m - 1.0) * rect_mid[0], 0)),
self.title_rect[1] + self.title_rect[3] // 2 - abs(y_top),
int(min(self.title_rect[2] + 2.0 * (m - 1.0) * rect_mid[0], self.screen_size[0])),
y_num
])
while self.screen_info.get_locked():
self.screen_info.unlock()
rgb_array_dst = pygame.surfarray.pixels2d(self.screen_info.subsurface(dst_rect))
# map x coordinates and y coordinates: for each destination pixel, one pixel in source data
# x_map
# - get range of all x coordinates in destination np.arange(dst_rect[2])
# - subtract the middle dst_rect[2] / 2, and convert to "vertical" array [:, None]
# - multiply with z multipliers z_multip to apply perspective, and add source data middle x rect_mid[0]
# - result: when the line is close to the viewer ie. z_multip << 0, the wider destination is mapped 1:1 to source ("source widens)
# e.g. leftmost pixel in destination is mapped to the leftmost pixel in source, some source pixels are used multiple times.
# when the line is far from the viewer ie. z_multip >> 0, the wider destination is mapped "over" the source ("source narrows")
# e.g. leftmost pixel in destination is mapped left to the leftmost pixel in source: x coordinate is negative, and at the right edge >> source width
# y_map_flat
# - make a matrix (the same size as x_map) by multiplying the range of y coordinates (y_mapr) with ones for all x coordinates (dst_rect[2])
# - filter out the destination coordinates outside of destination surface: [(x_map >= 0) & (x_map < self.title_rect[2])]
# - flatten the range
# x_map_flat
# - filter as y_map_flat
# - flatten the range
x_map = ((np.arange(dst_rect[2]) - (dst_rect[2] / 2))[:, None] * z_multip + rect_mid[0]).astype(np.int16)
y_map_flat = ((y_mapr * np.ones((dst_rect[2]), dtype=np.int16)[:, None])[(x_map >= 0) & (x_map < self.title_rect[2])]).flatten()
x_map_flat = (x_map[(x_map >= 0) & (x_map < self.title_rect[2])]).flatten()
# draw all pixels of destination range but filtered as x_map_flat and y_map_flat, by picking the mapped image coordinates from source data
rgb_array_dst[(x_map >= 0) & (x_map < self.title_rect[2])] = rgb_array_src[x_map_flat, y_map_flat]
# ---------------- handle high scores ----------------
def read_scores(self):
# read high scores from file
if os.path.isfile(self.highscore_file + '.dat'):
(fileobj, namehint) = self.load_dat(self.highscore_file)
# with open(self.highscore_file, newline='') as f:
# self.highscores = json.load(f)[:self.highscore_nr]
self.highscores = json.load(fileobj)[:self.highscore_nr]
# make sure list is full length for easier handling
for i in range(len(self.highscores), self.highscore_nr + 1):
self.highscores.append(['', 0, 0])
def write_scores(self):
# write high scores to file
enc_data = self.f.encrypt(json.dumps(self.highscores[:self.highscore_nr]).encode())
open(self.highscore_file + '.dat', 'wb').write(enc_data)
def highscore_add(self):
# new high score to be added.
i = 0
# find the spot.
while self.highscores[i][1] >= self.game_score:
i += 1
# make room and update.
if i < self.highscore_nr - 1:
self.highscores[i + 1:] = self.highscores[i:-1]
self.highscores[i] = ['', self.game_score, self.level]
# return the index with blank name
return i
def highscore_get_name(self, time):
# process key presses and build a name
while len(self.latest_hs_keys) > 0:
key, unicode = self.latest_hs_keys.pop(0)
if key == pygame.K_RETURN:
# typing finished. Store high scores and move on.
self.write_scores()
self.latest_hs_keys.clear()
self.game_mode = 'start page'
self.credits_time = time
elif key == pygame.K_BACKSPACE:
if len(self.highscores[self.latest_hs_nr][0]) > 0:
# remove last character
self.highscores[self.latest_hs_nr][0] = self.highscores[self.latest_hs_nr][0][:-1]
elif (((unicode >= 'A' and unicode <= 'Z')
or (unicode >= 'a' and unicode <= 'z')
or (unicode >= '0' and unicode <= '9')
or unicode in 'åäöÅÄÖ-_§!#$%&/()=+?\*.:<>|')
and len(self.highscores[self.latest_hs_nr][0]) < self.highscore_name_length):
# add character, if on allowed characters list and name not too long
self.highscores[self.latest_hs_nr][0] = self.highscores[self.latest_hs_nr][0] + unicode
# ---------------- 3D stars background ----------------
def move_stars(self, time, prev_time):
# move stars in X,Y depending on their Z coordinate - the closer the faster / bigger move. Hence divide star_move X & Y by star Z
self.stars += (time - prev_time) * self.star_move / np.hstack((self.stars[:, 2:3], self.stars[:, 2:3], np.ones((self.nr_stars, 1))))
# return stars outside of X, Y range to the other edge. Here only out of Y down is needed.
# self.stars[:, 0][self.stars[:, 0] < -self.mid_screen[0]] += self.screen_size[0] - 2
# self.stars[:, 0][self.stars[:, 0] > self.mid_screen[0] - 2] -= self.screen_size[0] - 2
# self.stars[:, 1][self.stars[:, 1] < -self.mid_screen[1]] += self.screen_size[1] - 2
self.stars[:, 1][self.stars[:, 1] > self.mid_screen[1] - 2] -= self.screen_size[1] - 2
# move stars using Z coordinate and Z move
if self.star_move[2] != 0.0:
self.stars[:, 0:2] *= self.stars[:, 2:3] / (self.stars[:, 2:3] + (time - prev_time) * self.star_move[2])
# if outside of screen, normally replace with a new random star at a random X, Y edge and random Z
nr_half = self.nr_stars // 2
# first half: vertical edge
self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])] = np.hstack((
np.random.randint(0, 2, (np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1)) * (self.screen_size[0] - 2) - self.mid_screen[0],
np.random.rand(np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1) * (self.screen_size[1] - 2) - self.mid_screen[1],
np.random.rand(np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]
))
# second half: horizontal edge
self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])] = np.hstack((
np.random.rand(np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1) * (self.screen_size[0] - 2) - self.mid_screen[0],
np.random.randint(0, 2, (np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1)) * (self.screen_size[1] - 2) - self.mid_screen[1],
np.random.rand(np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]
))
# if Z too close OR X, Y out of bounds due to Z move, replace with a new random star at maximum Z
self.stars[(self.stars[:, 2] < self.z_range[0]) | (abs(self.stars[:, 0] + 1) > self.mid_screen[0] - 1) | (abs(self.stars[:, 1] + 1) > self.mid_screen[1] - 1)] \
= np.random.rand(np.shape(self.stars[(self.stars[:, 2] < self.z_range[0]) | (abs(self.stars[:, 0] + 1) > self.mid_screen[0] - 1) | (abs(self.stars[:, 1] + 1) > self.mid_screen[1] - 1)])[0], 3) \
* np.array([self.screen_size[0] - 2, self.screen_size[1] - 2, 0]) + np.array([-self.mid_screen[0], -self.mid_screen[1], self.z_range[1]])
def plot_stars(self):
while self.screen.get_locked():
self.screen.unlock()
rgb_array = pygame.surfarray.pixels3d(self.screen)
# define color as a function of distance
c_shades = np.array([0.8, 0.8, 1.0]) # percentage of maximum R, G, B color used to tilt to Blue
colors = (c_shades * ((1.0 - self.stars[:, 2:3] / (self.z_range[1] - self.z_range[0])) * 200 + 55)).astype(np.uint8)
stars_int = (self.stars[:, 0:2]).astype(np.int16)
rgb_array[(stars_int[:, 0] + self.mid_screen[0]), (stars_int[:, 1] + self.mid_screen[1]), 0:3] = colors
# add additional pixels to those which are closest (color is above a threshold)
rgb_array[(stars_int[:, 0][colors[:, 2] > 130] + self.mid_screen[0] + 1),
(stars_int[:, 1][colors[:, 2] > 130] + self.mid_screen[1]), 0:3] = colors[colors[:, 2] > 130]
rgb_array[(stars_int[:, 0][colors[:, 2] > 180] + self.mid_screen[0]),
(stars_int[:, 1][colors[:, 2] > 180] + self.mid_screen[1] + 1), 0:3] = colors[colors[:, 2] > 180]
rgb_array[(stars_int[:, 0][colors[:, 2] > 220] + self.mid_screen[0] + 1),
(stars_int[:, 1][colors[:, 2] > 220] + self.mid_screen[1] + 1), 0:3] = colors[colors[:, 2] > 220]
# ---------------- small help functions ----------------
def play_sound(self, sound, channel=None):
# plays a game sound on the next channel (all channels used in order).
# if channel not specified, sounds will be missed as sometimes all channels are busy - rotates through channels.
if channel is None:
ch = pygame.mixer.Channel(self.channel)
self.channel += 1 # move to next channel
if self.channel == self.nr_channels:
self.channel = 1
else:
ch = pygame.mixer.Channel(channel)
ch.play(sound)
def create_random_powerup(self, position):
# adds a random powerup at position
pid = -1
while pid == -1:
pid = random.randint(0, len(self.powerup_data) - 1) # randomly pick power up
# test if this powerup is already active with life time < 0 (i.e. eternal) - if so, pick a new one
for powerup in self.ship.powerups:
if powerup[0] == pid and powerup[1] < 0:
pid = -1
self.powerups.append(PowerUp(pid, self.powerup_data, self.powerup_sound, position))
def pic_colorkey(self, pic, color):
# gives pic a colorkey and makes lighter (if light color) or darker (if dark color) shades equal to colorkey (important for some jpgs)
pic.set_colorkey(color)
if color != (0, 0, 0) and color != (255, 255, 255):
# process the image only if color not totally black or white
pic_array = pygame.surfarray.pixels3d(pic)
if color[0] + color[1] + color[2] <= 48 * 3:
pic_array[:, :, 0:3][(pic_array[:, :, 0] <= color[0]) & (pic_array[:, :, 1] <= color[1]) & (pic_array[:, :, 2] <= color[2])] = \
np.array([color[0], color[1], color[2]], dtype=np.uint8)
elif color[0] + color[1] + color[2] >= 208 * 3:
pic_array[:, :, 0:3][(pic_array[:, :, 0] >= color[0]) & (pic_array[:, :, 1] >= color[1]) & (pic_array[:, :, 2] >= color[2])] = \
np.array([color[0], color[1], color[2]], dtype=np.uint8)
def adjust_pic(self, image, offset, size):
# adjust a picture by moving it (offset) and fitting it in a new size.
# first copy the original
image_copy = image.copy()
image_size = np.asarray(image.get_size(), dtype=np.int)
# create a new one, with desired final size
image = pygame.Surface(size)
# apply offset
image_offset = np.maximum(offset, np.zeros((2)))
blit_offset = -np.minimum(offset, np.zeros((2)))
blit_size = np.minimum(image_size - image_offset, size - blit_offset)
# copy original image data back to it
image.blit(image_copy, blit_offset, (image_offset, blit_size))
return image
def recolor(self, image, mode):
# recolor image by exchanging two color components with each other, then return the resulting image.
rgb_array = pygame.surfarray.pixels3d(image)
if mode == 'R':
# exchange green and blue
rgb_array = np.stack((rgb_array[:, :, 0], rgb_array[:, :, 2], rgb_array[:, :, 1]), axis=-1)
elif mode == 'G':
# exchange red and blue
rgb_array = np.stack((rgb_array[:, :, 2], rgb_array[:, :, 1], rgb_array[:, :, 0]), axis=-1)
elif mode == 'B':
# exchange red and green
rgb_array = np.stack((rgb_array[:, :, 1], rgb_array[:, :, 0], rgb_array[:, :, 2]), axis=-1)
return pygame.surfarray.make_surface(rgb_array)
def recolor2(self, rgb_array, old_color, new_color):
# recolor image by overwriting old color with new color.
# colors given as (R, G, B).
old_col_2d = 256 * 256 * old_color[0] + 256 * old_color[1] + old_color[2]
new_col_2d = 256 * 256 * new_color[0] + 256 * new_color[1] + new_color[2]
rgb_array[rgb_array == old_col_2d] = new_col_2d
return rgb_array
def load_dat(self, filename):
# load and return a data file and its name hint
enc_data = open(filename + '.dat', 'rb').read()
dec_data = BytesIO(self.f.decrypt(enc_data))
return (dec_data, self.namehints[filename[-1:]])
class Ship:
"""
Player Space Ship
"""
def __init__(self, pic, bullet_pic, shield_pic, sound_gun, sound_explosion, screen_size, speed, shoot_freq, lives):
self.pic = pic
self.mask = pygame.mask.from_surface(pic)
self.size = np.asarray(self.pic.get_size())
self.position = np.array(((screen_size[0] - self.size[0]) / 2, (screen_size[1] - self.size[1] - 70)))
self.rect = pygame.Rect(self.position, self.size)
self.bullet_pic = bullet_pic
self.double_fire_angle = 10.0 # degree angle for double/triple fire
self.bullet_pic_left = pygame.transform.rotate(self.bullet_pic, self.double_fire_angle)
self.bullet_pic_right = pygame.transform.rotate(self.bullet_pic, -self.double_fire_angle)
# self.bullet_mask = pygame.mask.from_surface(bullet_pic)
self.bullet_size = np.asarray(self.bullet_pic.get_size())
self.bullet_left_size = np.asarray(self.bullet_pic_left.get_size())
self.bullet_right_size = np.asarray(self.bullet_pic_right.get_size())
self.shield_pic = shield_pic
self.shield_mask = pygame.mask.from_surface(shield_pic)
self.shield_size = np.asarray(self.shield_pic.get_size())
self.shield_rect = pygame.Rect(self.position - (self.shield_size - self.size) // 2, self.shield_size)
self.sound_gun = sound_gun
self.sound_explosion = sound_explosion
self.speed = speed
self.shoot_freq = shoot_freq
self.last_shot_time = 0
self.start_time = 0
self.status = 0 # 0 = normal status; 1 = new ship (momentarily protected), 2 = dead
self.lives = lives
self.auto_fire = False
self.shield = False
self.space_down = False
self.bullet_type = 1
self.bullets = []
self.powerups = [] # a list of arrays (pid, life_time) of active powerups
self.pic_small = self.pic # pygame.transform.scale(self.pic, (self.size[0] // 2, self.size[1] // 2))
self.pic_small_size = self.pic_small.get_size()
def move(self, direction, screen_size):
if self.status in (0, 1):
self.position[0] += direction * self.speed
if self.position[0] < 0:
self.position[0] = 0
if self.position[0] > screen_size[0] - self.size[0]:
self.position[0] = screen_size[0] - self.size[0]
self.rect = pygame.Rect(self.position, self.size)
self.shield_rect = pygame.Rect(self.position - (self.shield_size - self.size) // 2, self.shield_size)
def draw(self, screen):
screen.blit(self.pic, self.position.astype(np.int16))
if self.shield:
screen.blit(self.shield_pic, self.shield_rect)
def shoot(self, time):
if time > self.last_shot_time + self.shoot_freq and (not self.space_down or self.auto_fire) and self.status in (0, 1):
self.last_shot_time = time
self.space_down = True
# triple shot (3) combines single (1) and double (2)
if self.bullet_type in (1, 3):
self.bullets.append(Bullet(self.bullet_pic, self.position + np.array([(self.size[0] - self.bullet_size[0]) / 2, -self.bullet_size[1]]),
np.array([0.0, -6.5]), np.array([0.0, 0.0])))
if self.bullet_type in (2, 3):
# Double Shot powerup
self.bullets.append(Bullet(self.bullet_pic_left, self.position + np.array([self.size[0] / 2 - self.bullet_left_size[0], -self.bullet_left_size[1]]),
np.array([np.cos((90.0 + self.double_fire_angle) * np.pi / 180) * 6.5,
np.sin((90.0 + self.double_fire_angle) * np.pi / 180) * -6.5]), np.array([0.0, 0.0])))
self.bullets.append(Bullet(self.bullet_pic_right, self.position + np.array([self.size[0] / 2, -self.bullet_right_size[1]]),
np.array([np.cos((90.0 - self.double_fire_angle) * np.pi / 180) * 6.5,
np.sin((90.0 - self.double_fire_angle) * np.pi / 180) * -6.5]), np.array([0.0, 0.0])))
return True # shot fired
else:
return False # shot not fired
def is_hit(self, time, cheat_mode=0):
# ship is hit
if cheat_mode in (0, 1):
self.status = 2 # ship marked dead
self.start_time = time
self.lives -= 1
def add_powerup(self, new_powerup, ship_shoot_freq):
# add a new powerup to the ship, or if already has it, extend its life
for powerup in self.powerups:
if powerup[0] == new_powerup.pid:
# existing powerup - add to life_time only
powerup[1] += new_powerup.life_time
break
else:
# new powerup
self.powerups.append(np.array([new_powerup.pid, new_powerup.life_time], dtype=np.int32))
if new_powerup.desc == 'Rapid Fire':
self.shoot_freq = ship_shoot_freq // 2
elif new_powerup.desc == 'Auto Fire':
self.auto_fire = True
elif new_powerup.desc == 'Shield':
self.shield = True
elif new_powerup.desc[-4:] == 'Shot':
self.fire_powerup(new_powerup.desc)
def fire_powerup(self, desc):
if desc == 'Double Shot' and self.bullet_type < 2:
self.bullet_type = 2
elif desc == 'Triple Shot'and self.bullet_type < 3:
self.bullet_type = 3
def end_powerup(self, powerup, powerup_data, ship_shoot_freq):
# remove powerup and its effect
desc = powerup_data[powerup[0]][2]
if desc == 'Rapid Fire':
self.shoot_freq = ship_shoot_freq
elif desc == 'Auto Fire':
self.auto_fire = False
elif desc == 'Shield':
self.shield = False
elif desc[-4:] == 'Shot':
# extra bullets end - check if any other such powerup still active
self.bullet_type = 1
for pup in self.powerups:
if powerup_data[pup[0]][2] != desc and powerup_data[pup[0]][2][-4:] == 'Shot':
self.fire_powerup(powerup_data[pup[0]][2])
# mark for removal
powerup[0] = -1
class Bullet:
"""
Bullet
"""
def __init__(self, pic, start_pos, move_vec, accelerate_vec):
self.pic = pic
self.mask = pygame.mask.from_surface(pic)
self.position = start_pos
self.move_vec = move_vec
self.accelerate_vec = accelerate_vec
self.size = self.pic.get_size()
# self.size = np.abs(np.array([2, move_vec[1]])).astype(np.int16)
self.rect = pygame.Rect(self.position, self.size)
# self.mask = pygame.mask.Mask(self.size, True)
def move(self):
self.position += self.move_vec
self.move_vec += self.accelerate_vec
self.rect = pygame.Rect(self.position, self.size)
def draw(self, screen):
screen.blit(self.pic, self.position.astype(np.int16))
class Alien:
"""
Alien
"""
def __init__(self, pic, hit_area, bullet_pic, cannon_pos, sound_explosion, score, hit_nr,
start_pos, x_move, y_move, x_times, move_delay, start_delay, start_dir, shoot_freq):
self.pic = pic
self.mask = pygame.mask.from_surface(pic)
self.hit_area = hit_area # hit area is a Rect (within mask) where hit causes damage. If None then not used (all hits cause damage)
self.bullet_pic = bullet_pic
self.cannon_pos = cannon_pos
self.sound_explosion = sound_explosion
self.score = score
self.hit_nr = hit_nr
self.hit_total = hit_nr
self.size = self.pic.get_size()
self.life_bar_size = np.array([0.8 * self.size[0], 0.04 * self.size[0] + 1]).astype(np.int16)
self.position = start_pos
self.x_move = x_move
self.y_move = y_move
self.boss_move = np.random.randint(move_delay * 1.5, move_delay * 4, 4)
self.x_times = x_times
self.move_delay = move_delay # set move_delay == 0 for "boss movement"
self.direction = start_dir
self.last_move = pygame.time.get_ticks() + start_delay
self.x_move_cnt = 0
self.shoot_freq = shoot_freq
self.rect = pygame.Rect(self.position, self.size)
def move(self, time, level_time, screen_size):
if self.x_times == 0:
# boss - move using sine/cosine
s = np.sin((time - level_time) / self.boss_move)
self.position = np.array([screen_size[0] * (0.5 + (s[0] + s[1]) * (screen_size[0] - self.size[0] * 1.1)
/ screen_size[0] / 4.0) - (self.size[0] / 2),
screen_size[1] * (0.3 + (s[2] + s[3]) * (screen_size[1] / (0.5 / 0.3) - self.size[1] * 1.1)
/ screen_size[1] / 4.0) - (self.size[1] / 2)], dtype=np.float)
self.rect = pygame.Rect(self.position, self.size)
elif time >= self.last_move + self.move_delay:
if self.x_move_cnt == self.x_times:
self.position[1] += self.y_move
self.x_move_cnt = 0
self.direction = -self.direction
else:
self.position[0] += self.x_move * self.direction
self.x_move_cnt += 1
self.last_move += self.move_delay
self.rect = pygame.Rect(self.position, self.size)
def draw(self, screen):
screen.blit(self.pic, self.position.astype(np.int16))
if self.hit_total > 1:
# life bar for boss aliens
pos = self.position + np.array([(self.size[0] - self.life_bar_size[0]) // 2, -self.life_bar_size[1] - 5])
pygame.draw.rect(screen, (255, 255, 255), (pos - 1, self.life_bar_size + 2), 1)
pygame.draw.rect(screen, (200, 0, 0), (pos, (self.life_bar_size[0] * self.hit_nr // self.hit_total, self.life_bar_size[1])), 0)
class Ufo:
"""
Alien ufo
Contrary to a regular Alien, the ufo has a distance (z coordinate) and can only be shot when at a suitable distance.
"""
def __init__(self, pic, bullet_pic, sound_explosion, score, speed, from_side, screen_size):
self.orig_size = np.array(pic.get_size())
self.z_pos_start = self.orig_size[0] / 50 # starting z position
self.size = (self.orig_size / self.z_pos_start).astype(np.int16) # define start size in pixels (80) based on ufo pic width
# position 3D is relative to screen center
self.position_3D = np.array([
from_side * (screen_size[0] + self.size[0]) / 2, # x start is just outside of screen (either side)
-screen_size[1] / 2 * (random.random() * 0.2 + 0.05), # y start is between 0.25 and 0.45 times screen height
1.0]) * self.z_pos_start
self.position = (self.position_3D[0:2] / self.position_3D[2] + screen_size / 2)
self.orig_pic = pic
self.pic = pygame.transform.scale(self.orig_pic, self.size)
self.mask = pygame.mask.from_surface(self.pic)
self.bullet_pic = bullet_pic
self.bullet_size = np.array(bullet_pic.get_size())
self.sound_explosion = sound_explosion
self.score = score
self.speed = speed
self.from_side = from_side # -1 is left, +1 is right
self.last_move = pygame.time.get_ticks()
self.phase = 0 # phase 0 (start) to turning_point to turning_point + 75 to out of screen
self.turning_point = random.random() * 50 + 1 # turning point between 10 and 60 % of screen width
self.rect = pygame.Rect(self.position, self.size)
self.may_be_shot = False
def move(self, time, freeze_speed, screen_size):
speed_adj = self.speed * freeze_speed * (time - self.last_move) / 20 # adjust speed with time
self.phase += speed_adj
self.last_move = time
if self.phase < self.turning_point:
# ufo entering in the distance, move sideways
self.position_3D[0] -= self.from_side * speed_adj * screen_size[0] * self.z_pos_start / 100.0
elif self.phase < self.turning_point + 75:
# ufo making a turn and coming closer
cx = np.cos((self.phase - self.turning_point) * np.pi / 75) # cx ranges from 1.0 to 0.0 to -1.0
sz = np.sin((self.phase - self.turning_point) * np.pi / 75) # sz ranges from 0.0 to 1.0 to 0.0
self.position_3D[0] -= self.from_side * cx * speed_adj * screen_size[0] * self.z_pos_start / 100.0
self.position_3D[2] -= sz * 1.3 * self.z_pos_start * speed_adj / 75
# adjust size and mask as ufo now closer = bigger
self.size = (self.orig_size / self.position_3D[2]).astype(np.int16)
if np.min(self.size) > 0:
self.pic = pygame.transform.scale(self.orig_pic, self.size)
self.mask = pygame.mask.from_surface(self.pic)
if self.phase > self.turning_point + 75 / 4 and self.phase < self.turning_point + 75 * 3 / 4:
# mark ufo as vulnerable when close to turn mid point - may be shot down
self.may_be_shot = True
else:
# after turning, ufo going back to where it came from, Cannot be shot any more.
self.may_be_shot = False
self.position_3D[0] += self.from_side * speed_adj * screen_size[0] * self.z_pos_start / 100.0
# convert 3D position to 2D
self.position = (self.position_3D[0:2] / self.position_3D[2] + screen_size / 2)
self.rect = pygame.Rect(self.position, self.size)
def draw(self, screen):
screen.blit(self.pic, self.position.astype(np.int16))
class Score:
"""
score
"""
def __init__(self, space_inv, score_amount, score_text, font, start_pos, color, background_color):
space_inv.game_score += score_amount
self.position = np.array(start_pos).astype(np.float)
self.pic = font.render(score_text, True, color)
self.pic.set_colorkey(background_color)
self.show_time = 2000
self.start_time = pygame.time.get_ticks()
def draw(self, screen, time):
self.pic.set_alpha(35 + 220 * (1.0 - (time - self.start_time) / self.show_time))
screen.blit(self.pic, self.position.astype(np.int16))
def move(self):
self.position += np.array([0.2, -0.1])
class Explosion:
"""
explosion
"""
def __init__(self, pic, mid_position, grid, freq):
self.pic = pic
self.size = self.pic.get_size()
self.grid = grid
self.grid_size = (self.size[0] / self.grid[0], self.size[1] / self.grid[1])
self.position = (int(mid_position[0] - self.grid_size[0] / 2), int(mid_position[1] - self.grid_size[1] / 2))
self.freq = freq
self.freq_cnt = 0
self.phase = np.array([0, 0])
def draw(self, screen):
rect = (int(self.phase[0] * self.grid_size[0]), int(self.phase[1] * self.grid_size[1]), int(self.grid_size[0]), int(self.grid_size[1]))
screen.blit(self.pic, self.position, rect)
class PowerUp:
"""
power up
"""
def __init__(self, pid, powerup_data, sound_award, mid_position):
self.pid = pid
self.pic = powerup_data[pid][0]
self.life_time = powerup_data[pid][1]
self.desc = powerup_data[pid][2]
self.size = self.pic.get_size()
self.mask = pygame.mask.from_surface(self.pic)
self.position = (int(mid_position[0] - self.size[0] / 2), int(mid_position[1] - self.size[1] / 2))
self.sound_award = sound_award
self.show_time = 7000
self.start_time = pygame.time.get_ticks()
self.rect = pygame.Rect(self.position, self.size)
def draw(self, screen, time):
screen.blit(self.pic, self.position)
if __name__ == '__main__':
"""
Prepare screen, etc.
"""
pygame.display.init()
desktops = pygame.display.get_desktop_sizes()
# define display/window height based on (the first) desktop size
desktop_size = int(desktops[0][1])
disp_size = (900, desktop_size-90)
bmp = pygame.image.load('pygame\game_icon.bmp')
pygame.display.set_icon(bmp)
pygame.display.set_caption('Space Invaders')
screen = pygame.display.set_mode(disp_size)
SpaceInvaders(screen).run()
# exit; close everything
pygame.quit()
exit()
|
import datetime
from sqlalchemy.util import OrderedDict
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from pylons import config
import vdm.sqlalchemy
import vdm.sqlalchemy.stateful
from sqlalchemy import types, Column, Table, ForeignKey, and_
import meta
import core
import package as _package
import types as _types
import extension
import activity
import domain_object
import ckan.lib.dictization
__all__ = ['Resource', 'resource_table',
'ResourceGroup', 'resource_group_table',
'ResourceRevision', 'resource_revision_table',
'ResourceGroupRevision', 'resource_group_revision_table',
]
CORE_RESOURCE_COLUMNS = ['url', 'format', 'description', 'hash', 'name',
'resource_type', 'mimetype', 'mimetype_inner',
'size', 'created', 'last_modified', 'cache_url',
'cache_last_updated', 'webstore_url',
'webstore_last_updated', 'url_type']
##formally package_resource
resource_table = Table(
'resource', meta.metadata,
Column('id', types.UnicodeText, primary_key=True,
default=_types.make_uuid),
Column('resource_group_id', types.UnicodeText,
ForeignKey('resource_group.id')),
Column('url', types.UnicodeText, nullable=False),
Column('format', types.UnicodeText),
Column('description', types.UnicodeText),
Column('hash', types.UnicodeText),
Column('position', types.Integer),
Column('name', types.UnicodeText),
Column('resource_type', types.UnicodeText),
Column('mimetype', types.UnicodeText),
Column('mimetype_inner', types.UnicodeText),
Column('size', types.BigInteger),
Column('created', types.DateTime, default=datetime.datetime.now),
Column('last_modified', types.DateTime),
Column('cache_url', types.UnicodeText),
Column('cache_last_updated', types.DateTime),
Column('webstore_url', types.UnicodeText),
Column('webstore_last_updated', types.DateTime),
Column('url_type', types.UnicodeText),
Column('extras', _types.JsonDictType),
)
resource_group_table = Table(
'resource_group', meta.metadata,
Column('id', types.UnicodeText, primary_key=True,
default=_types.make_uuid),
Column('package_id', types.UnicodeText, ForeignKey('package.id')),
Column('label', types.UnicodeText),
Column('sort_order', types.UnicodeText),
Column('extras', _types.JsonDictType),
)
vdm.sqlalchemy.make_table_stateful(resource_table)
resource_revision_table = core.make_revisioned_table(resource_table)
vdm.sqlalchemy.make_table_stateful(resource_group_table)
resource_group_revision_table = core.make_revisioned_table(
resource_group_table)
class Resource(vdm.sqlalchemy.RevisionedObjectMixin,
vdm.sqlalchemy.StatefulObjectMixin,
domain_object.DomainObject):
extra_columns = None
def __init__(self, resource_group_id=None, url=u'',
format=u'', description=u'', hash=u'',
extras=None,
**kwargs):
self.id = _types.make_uuid()
if resource_group_id:
self.resource_group_id = resource_group_id
self.url = url
self.format = format
self.description = description
self.hash = hash
# The base columns historically defaulted to empty strings
# not None (Null). This is why they are seperate here.
base_columns = ['url', 'format', 'description', 'hash']
for key in set(CORE_RESOURCE_COLUMNS) - set(base_columns):
setattr(self, key, kwargs.pop(key, None))
self.extras = extras or {}
extra_columns = self.get_extra_columns()
for field in extra_columns:
value = kwargs.pop(field, None)
if value is not None:
setattr(self, field, value)
if kwargs:
raise TypeError('unexpected keywords %s' % kwargs)
def as_dict(self, core_columns_only=False):
_dict = OrderedDict()
cols = self.get_columns()
if not core_columns_only:
cols = ['id', 'resource_group_id'] + cols + ['position']
for col in cols:
value = getattr(self, col)
if isinstance(value, datetime.datetime):
value = value.isoformat()
_dict[col] = value
for k, v in self.extras.items() if self.extras else []:
_dict[k] = v
if self.resource_group and not core_columns_only:
_dict["package_id"] = self.resource_group.package_id
# FIXME format unification needs doing better
import ckan.lib.dictization.model_dictize as model_dictize
_dict[u'format'] = model_dictize._unified_resource_format(self.format)
return _dict
def get_package_id(self):
'''Returns the package id for a resource. '''
query = meta.Session.query(ResourceGroupRevision) \
.filter(and_(ResourceGroupRevision.id == self.resource_group_id,
ResourceGroupRevision.state == u'active',
ResourceGroupRevision.current == True))
resource_group = query.first()
if resource_group is None:
return None
return resource_group.package_id
@classmethod
def get(cls, reference):
'''Returns a resource object referenced by its name or id.'''
query = meta.Session.query(Resource).filter(Resource.id == reference)
resource = query.first()
if resource is None:
resource = cls.by_name(reference)
return resource
@classmethod
def get_columns(cls, extra_columns=True):
'''Returns the core editable columns of the resource.'''
if extra_columns:
return CORE_RESOURCE_COLUMNS + cls.get_extra_columns()
else:
return CORE_RESOURCE_COLUMNS
@classmethod
def get_extra_columns(cls):
if cls.extra_columns is None:
cls.extra_columns = config.get(
'ckan.extra_resource_fields', '').split()
for field in cls.extra_columns:
setattr(cls, field, DictProxy(field, 'extras'))
return cls.extra_columns
def related_packages(self):
return [self.resource_group.package]
def activity_stream_detail(self, activity_id, activity_type):
import ckan.model as model
# Handle 'deleted' resources.
# When the user marks a resource as deleted this comes through here as
# a 'changed' resource activity. We detect this and change it to a
# 'deleted' activity.
if activity_type == 'changed' and self.state == u'deleted':
activity_type = 'deleted'
res_dict = ckan.lib.dictization.table_dictize(self,
context={'model': model})
return activity.ActivityDetail(activity_id, self.id, u"Resource",
activity_type,
{'resource': res_dict})
class ResourceGroup(vdm.sqlalchemy.RevisionedObjectMixin,
vdm.sqlalchemy.StatefulObjectMixin,
domain_object.DomainObject):
extra_columns = None
def __init__(self, package_id=None, sort_order=u'', label=u'',
extras=None, **kwargs):
if package_id:
self.package_id = package_id
self.sort_order = sort_order
self.label = label
self.extras = extras or {}
self.state = 'active'
extra_columns = self.get_extra_columns()
for field in extra_columns:
value = kwargs.pop(field, u'')
setattr(self, field, value)
if kwargs:
raise TypeError('unexpected keywords %s' % kwargs)
def as_dict(self, core_columns_only=False):
_dict = OrderedDict()
cols = self.get_columns()
if not core_columns_only:
cols = ['package_id', 'label', 'sort_order'] + cols
for col in cols:
_dict[col] = getattr(self, col)
for k, v in self.extras.items() if self.extras else []:
_dict[k] = v
return _dict
@classmethod
def get_columns(cls, extra_columns=True):
'''Returns the core editable columns of the resource.'''
if extra_columns:
return ['label', 'sort_order'] + cls.get_extra_columns()
else:
return ['label', 'sort_order']
@classmethod
def get_extra_columns(cls):
if cls.extra_columns is None:
cls.extra_columns = config.get(
'ckan.extra_resource_group_fields', '').split()
for field in cls.extra_columns:
setattr(cls, field, DictProxy(field, 'extras'))
return cls.extra_columns
## Mappers
meta.mapper(Resource, resource_table, properties={
'resource_group': orm.relation(
ResourceGroup,
# all resources including deleted
# formally package_resources_all
backref=orm.backref('resources_all',
collection_class=ordering_list('position'),
cascade='all, delete',
order_by=resource_table.c.position,
),
)
},
order_by=[resource_table.c.resource_group_id],
extension=[vdm.sqlalchemy.Revisioner(resource_revision_table),
extension.PluginMapperExtension(),
],
)
meta.mapper(ResourceGroup, resource_group_table, properties={
'package': orm.relation(
_package.Package,
# all resources including deleted
backref=orm.backref('resource_groups_all',
cascade='all, delete, delete-orphan',
order_by=resource_group_table.c.sort_order,
),
)
},
order_by=[resource_group_table.c.package_id],
extension=[vdm.sqlalchemy.Revisioner(resource_group_revision_table),
extension.PluginMapperExtension(),
],
)
## VDM
vdm.sqlalchemy.modify_base_object_mapper(Resource, core.Revision, core.State)
ResourceRevision = vdm.sqlalchemy.create_object_version(
meta.mapper, Resource, resource_revision_table)
vdm.sqlalchemy.modify_base_object_mapper(ResourceGroup, core.Revision,
core.State)
ResourceGroupRevision = vdm.sqlalchemy.create_object_version(
meta.mapper, ResourceGroup, resource_group_revision_table)
ResourceGroupRevision.related_packages = lambda self: [
self.continuity.package
]
ResourceRevision.related_packages = lambda self: [
self.continuity.resouce_group.package
]
def resource_identifier(obj):
return obj.id
class DictProxy(object):
def __init__(self, target_key, target_dict, data_type=unicode):
self.target_key = target_key
self.target_dict = target_dict
self.data_type = data_type
def __get__(self, obj, type):
if not obj:
return self
proxied_dict = getattr(obj, self.target_dict)
if proxied_dict:
return proxied_dict.get(self.target_key)
def __set__(self, obj, value):
proxied_dict = getattr(obj, self.target_dict)
if proxied_dict is None:
proxied_dict = {}
setattr(obj, self.target_dict, proxied_dict)
proxied_dict[self.target_key] = self.data_type(value)
def __delete__(self, obj):
proxied_dict = getattr(obj, self.target_dict)
proxied_dict.pop(self.target_key)
|
import pandas as pd
import glob
import yaml
from os.path import join
from . import serialization, history
def read_data(*args, group=False, **kwargs):
iterable = _read_data(*args, **kwargs)
if group:
return group_trials(iterable)
else:
return list(iterable)
def _read_data(pattern, *args, from_csv=False, process_args=None, **kwargs):
if not process_args:
process_args = {}
for folder in glob.glob(pattern):
config_file = glob.glob(join(folder, '*.yml'))[0]
config = yaml.load(open(config_file), Loader=yaml.SafeLoader)
df = None
if from_csv:
for trial_data in sorted(glob.glob(join(folder,
'*.environment.csv'))):
df = read_csv(trial_data, **kwargs)
yield config_file, df, config
else:
for trial_data in sorted(glob.glob(join(folder, '*.sqlite'))):
df = read_sql(trial_data, **kwargs)
yield config_file, df, config
def read_sql(db, *args, **kwargs):
h = history.History(db_path=db, backup=False, readonly=True)
df = h.read_sql(*args, **kwargs)
return df
def read_csv(filename, keys=None, convert_types=False, **kwargs):
'''
Read a CSV in canonical form: ::
<agent_id, t_step, key, value, value_type>
'''
df = pd.read_csv(filename)
if convert_types:
df = convert_types_slow(df)
if keys:
df = df[df['key'].isin(keys)]
df = process_one(df)
return df
def convert_row(row):
row['value'] = serialization.deserialize(row['value_type'], row['value'])
return row
def convert_types_slow(df):
'''This is a slow operation.'''
dtypes = get_types(df)
for k, v in dtypes.items():
t = df[df['key']==k]
t['value'] = t['value'].astype(v)
df = df.apply(convert_row, axis=1)
return df
def split_processed(df):
env = df.loc[:, df.columns.get_level_values(1).isin(['env', 'stats'])]
agents = df.loc[:, ~df.columns.get_level_values(1).isin(['env', 'stats'])]
return env, agents
def split_df(df):
'''
Split a dataframe in two dataframes: one with the history of agents,
and one with the environment history
'''
envmask = (df['agent_id'] == 'env')
n_env = envmask.sum()
if n_env == len(df):
return df, None
elif n_env == 0:
return None, df
agents, env = [x for _, x in df.groupby(envmask)]
return env, agents
def process(df, **kwargs):
'''
Process a dataframe in canonical form ``(t_step, agent_id, key, value, value_type)`` into
two dataframes with a column per key: one with the history of the agents, and one for the
history of the environment.
'''
env, agents = split_df(df)
return process_one(env, **kwargs), process_one(agents, **kwargs)
def get_types(df):
dtypes = df.groupby(by=['key'])['value_type'].unique()
return {k:v[0] for k,v in dtypes.iteritems()}
def process_one(df, *keys, columns=['key', 'agent_id'], values='value',
fill=True, index=['t_step',],
aggfunc='first', **kwargs):
'''
Process a dataframe in canonical form ``(t_step, agent_id, key, value, value_type)`` into
a dataframe with a column per key
'''
if df is None:
return df
if keys:
df = df[df['key'].isin(keys)]
df = df.pivot_table(values=values, index=index, columns=columns,
aggfunc=aggfunc, **kwargs)
if fill:
df = fillna(df)
return df
def get_count(df, *keys):
if keys:
df = df[list(keys)]
counts = pd.DataFrame()
for key in df.columns.levels[0]:
g = df[[key]].apply(pd.Series.value_counts, axis=1).fillna(0)
for value, series in g.iteritems():
counts[key, value] = series
counts.columns = pd.MultiIndex.from_tuples(counts.columns)
return counts
def get_value(df, *keys, aggfunc='sum'):
if keys:
df = df[list(keys)]
return df.groupby(axis=1, level=0).agg(aggfunc)
def plot_all(*args, plot_args={}, **kwargs):
'''
Read all the trial data and plot the result of applying a function on them.
'''
dfs = do_all(*args, **kwargs)
ps = []
for line in dfs:
f, df, config = line
if len(df) < 1:
continue
df.plot(title=config['name'], **plot_args)
ps.append(df)
return ps
def do_all(pattern, func, *keys, include_env=False, **kwargs):
for config_file, df, config in read_data(pattern, keys=keys):
if len(df) < 1:
continue
p = func(df, *keys, **kwargs)
yield config_file, p, config
def group_trials(trials, aggfunc=['mean', 'min', 'max', 'std']):
trials = list(trials)
trials = list(map(lambda x: x[1] if isinstance(x, tuple) else x, trials))
return pd.concat(trials).groupby(level=0).agg(aggfunc).reorder_levels([2, 0,1] ,axis=1)
def fillna(df):
new_df = df.ffill(axis=0)
return new_df
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.contrib.slim.quantization.quantization_pass import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization.quantization_pass import AddQuantDequantPass
from paddle.fluid.contrib.slim.quantization.quantization_pass import _op_real_in_out_name
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
import paddlex.utils.logging as logging
import paddle.fluid as fluid
import os
class PaddleXPostTrainingQuantization(PostTrainingQuantization):
def __init__(self,
executor,
dataset,
program,
inputs,
outputs,
batch_size=10,
batch_nums=None,
scope=None,
algo="KL",
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
is_use_cache_file=False,
cache_dir="./temp_post_training"):
'''
The class utilizes post training quantization methon to quantize the
fp32 model. It uses calibrate data to calculate the scale factor of
quantized variables, and inserts fake quant/dequant op to obtain the
quantized model.
Args:
executor(fluid.Executor): The executor to load, run and save the
quantized model.
dataset(Python Iterator): The data Reader.
program(fluid.Program): The paddle program, save the parameters for model.
inputs(dict): The input of prigram.
outputs(dict): The output of program.
batch_size(int, optional): The batch size of DataLoader. Default is 10.
batch_nums(int, optional): If batch_nums is not None, the number of
calibrate data is batch_size*batch_nums. If batch_nums is None, use
all data provided by sample_generator as calibrate data.
scope(fluid.Scope, optional): The scope of the program, use it to load
and save variables. If scope=None, get scope by global_scope().
algo(str, optional): If algo=KL, use KL-divergenc method to
get the more precise scale factor. If algo='direct', use
abs_max methon to get the scale factor. Default is KL.
quantizable_op_type(list[str], optional): List the type of ops
that will be quantized. Default is ["conv2d", "depthwise_conv2d",
"mul"].
is_full_quantized(bool, optional): If set is_full_quantized as True,
apply quantization to all supported quantizable op type. If set
is_full_quantized as False, only apply quantization to the op type
according to the input quantizable_op_type.
is_use_cache_file(bool, optional): If set is_use_cache_file as False,
all temp data will be saved in memory. If set is_use_cache_file as True,
it will save temp data to disk. When the fp32 model is complex or
the number of calibrate data is large, we should set is_use_cache_file
as True. Defalut is False.
cache_dir(str, optional): When is_use_cache_file is True, set cache_dir as
the directory for saving temp data. Default is ./temp_post_training.
Returns:
None
'''
self._executor = executor
self._dataset = dataset
self._batch_size = batch_size
self._batch_nums = batch_nums
self._scope = fluid.global_scope() if scope == None else scope
self._algo = algo
self._is_use_cache_file = is_use_cache_file
self._cache_dir = cache_dir
if self._is_use_cache_file and not os.path.exists(self._cache_dir):
os.mkdir(self._cache_dir)
supported_quantizable_op_type = \
QuantizationTransformPass._supported_quantizable_op_type + \
AddQuantDequantPass._supported_quantizable_op_type
if is_full_quantize:
self._quantizable_op_type = supported_quantizable_op_type
else:
self._quantizable_op_type = quantizable_op_type
for op_type in self._quantizable_op_type:
assert op_type in supported_quantizable_op_type + \
AddQuantDequantPass._activation_type, \
op_type + " is not supported for quantization."
self._place = self._executor.place
self._program = program
self._feed_list = list(inputs.values())
self._fetch_list = list(outputs.values())
self._data_loader = None
self._op_real_in_out_name = _op_real_in_out_name
self._bit_length = 8
self._quantized_weight_var_name = set()
self._quantized_act_var_name = set()
self._sampling_data = {}
self._quantized_var_scale_factor = {}
def quantize(self):
'''
Quantize the fp32 model. Use calibrate data to calculate the scale factor of
quantized variables, and inserts fake quant/dequant op to obtain the
quantized model.
Args:
None
Returns:
the program of quantized model.
'''
self._preprocess()
batch_id = 0
for data in self._data_loader():
self._executor.run(
program=self._program,
feed=data,
fetch_list=self._fetch_list,
return_numpy=False)
self._sample_data(batch_id)
if batch_id % 5 == 0:
logging.info("run batch: {}".format(batch_id))
batch_id += 1
if self._batch_nums and batch_id >= self._batch_nums:
break
logging.info("all run batch: ".format(batch_id))
logging.info("calculate scale factor ...")
self._calculate_scale_factor()
logging.info("update the program ...")
self._update_program()
self._save_output_scale()
return self._program
def save_quantized_model(self, save_model_path):
'''
Save the quantized model to the disk.
Args:
save_model_path(str): The path to save the quantized model
Returns:
None
'''
feed_vars_names = [var.name for var in self._feed_list]
fluid.io.save_inference_model(
dirname=save_model_path,
feeded_var_names=feed_vars_names,
target_vars=self._fetch_list,
executor=self._executor,
params_filename='__params__',
main_program=self._program)
def _preprocess(self):
'''
Load model and set data loader, collect the variable names for sampling,
and set activation variables to be persistable.
'''
feed_vars = [fluid.framework._get_var(var.name, self._program) \
for var in self._feed_list]
self._data_loader = fluid.io.DataLoader.from_generator(
feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True)
self._data_loader.set_sample_list_generator(
self._dataset.generator(self._batch_size, drop_last=True),
places=self._place)
# collect the variable names for sampling
persistable_var_names = []
for var in self._program.list_vars():
if var.persistable:
persistable_var_names.append(var.name)
for op in self._program.global_block().ops:
op_type = op.type
if op_type in self._quantizable_op_type:
if op_type in ("conv2d", "depthwise_conv2d"):
self._quantized_act_var_name.add(op.input("Input")[0])
self._quantized_weight_var_name.add(op.input("Filter")[0])
self._quantized_act_var_name.add(op.output("Output")[0])
elif op_type == "mul":
if self._is_input_all_not_persistable(
op, persistable_var_names):
op._set_attr("skip_quant", True)
logging.warning(
"Skip quant a mul op for two input variables are not persistable"
)
else:
self._quantized_act_var_name.add(op.input("X")[0])
self._quantized_weight_var_name.add(op.input("Y")[0])
self._quantized_act_var_name.add(op.output("Out")[0])
else:
# process other quantizable op type, the input must all not persistable
if self._is_input_all_not_persistable(
op, persistable_var_names):
input_output_name_list = self._op_real_in_out_name[
op_type]
for input_name in input_output_name_list[0]:
for var_name in op.input(input_name):
self._quantized_act_var_name.add(var_name)
for output_name in input_output_name_list[1]:
for var_name in op.output(output_name):
self._quantized_act_var_name.add(var_name)
# set activation variables to be persistable, so can obtain
# the tensor data in sample_data
for var in self._program.list_vars():
if var.name in self._quantized_act_var_name:
var.persistable = True
|
<reponame>salkku/salkku-sdk-python<filename>src/sdkhttp.py
import requests
import json
from requests.auth import HTTPBasicAuth
class HTTPClient:
def __init__(self, api_uri, user, password, options):
# type: (string, string, string, Dict[str, bool]) -> HTTPClient
self.api_uri = api_uri
self.user = user
self.password = password
self.options = options
def __make_authenticated_get_request(self, url, payload=None):
# type: (string, Dict) -> requests.Response
if payload is None:
payload = {}
auth_token = self.authenticate()
response = requests.get(
url,
headers={
'salkku-auth-token': auth_token
},
params=payload,
verify=self.options['verify'],
allow_redirects=False
)
if self.options['verbose']:
print("GET {}".format(response.url))
if response.status_code != requests.codes.ok:
if self.options['verbose']:
print(response.text)
response.raise_for_status()
return response
def __make_authenticated_post_request(self, url, post_data=None, auth_token=None, headers=None):
# type: (string, Dict, string, Dict) -> requests.Response
if headers is None:
headers = {}
if post_data is None:
post_data = {}
if auth_token is None:
auth_token = self.authenticate()
headers['salkku-auth-token'] = auth_token
response = requests.post(
url,
headers=headers,
data=post_data,
verify=self.options['verify'],
allow_redirects=False
)
# print(post_data)
if self.options['verbose']:
print("POST {}".format(response.url))
if response.status_code != requests.codes.ok:
if self.options['verbose']:
print(response.text)
response.raise_for_status()
return response
def __post_json(self, url, post_data, auth_token=None, headers=None):
if headers is None:
headers = {}
headers['Content-Type'] = 'application/json'
return self.__make_authenticated_post_request(url, json.dumps(post_data), auth_token, headers)
def authenticate(self):
# type: () -> str
url = '{}/login'.format(self.api_uri)
response = requests.post(
url,
auth=HTTPBasicAuth(self.user, self.password),
verify=self.options['verify'],
allow_redirects=False
)
if self.options['verbose']:
print("POST {}".format(response.url))
auth_token = ""
if response.status_code == requests.codes.ok:
auth_token = response.headers.get('salkku-auth-token')
else:
if self.options['verbose']:
print(response.text)
response.raise_for_status()
return auth_token
def ping(self):
url = '{}/ping'.format(self.api_uri)
return self.__make_authenticated_get_request(url)
def get_currencies(self):
if self.options['verbose']:
print("Getting list of currencies...")
url = '{}/currency'.format(self.api_uri)
return self.__make_authenticated_get_request(url)
def get_exchanges(self):
if self.options['verbose']:
print("Getting list of exchanges...")
url = '{}/exchange'.format(self.api_uri)
return self.__make_authenticated_get_request(url)
def get_exchange_securities(self, exchange_id):
if self.options['verbose']:
print("Getting list of exchange {} securities...".format(exchange_id))
url = '{}/exchange/{}/security'.format(self.api_uri, exchange_id)
return self.__make_authenticated_get_request(url)
def get_transaction_types(self):
if self.options['verbose']:
print("Getting list of transaction types...")
url = '{}/transaction/type'.format(self.api_uri)
return self.__make_authenticated_get_request(url)
def search_security(self, search):
if self.options['verbose']:
print("Searching security with \"{}\"...".format(search))
params = {"q": search}
url = '{}/security'.format(self.api_uri)
return self.__make_authenticated_get_request(url, params)
def get_security(self, security_id):
if self.options['verbose']:
print("Get security with id \"{}\"...".format(security_id))
url = '{}/security/{}'.format(self.api_uri, security_id)
return self.__make_authenticated_get_request(url)
def get_portfolios(self):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting all portfolios {}...")
url = '{}/portfolio'.format(self.api_uri)
return self.__make_authenticated_get_request(url)
def get_portfolio(self, portfolio_id):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting portfolio with id {}...".format(portfolio_id))
url = '{}/portfolio/{}'.format(self.api_uri, portfolio_id)
return self.__make_authenticated_get_request(url)
def post_portfolio(self, file_name):
with open(file_name, 'r', encoding='utf-8') as f:
portfolio = json.loads(f.read())
url = '{}/portfolio'.format(self.api_uri)
return self.__post_json(url, portfolio)
def get_portfolio_history(self, portfolio_id):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting history of portfolio {}...".format(portfolio_id))
url = '{}/portfolio/{}/history'.format(self.api_uri, portfolio_id)
return self.__make_authenticated_get_request(url)
def get_portfolio_performance(self, portfolio_id):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting performance of portfolio {}...".format(portfolio_id))
url = '{}/portfolio/{}/performance'.format(self.api_uri, portfolio_id)
return self.__make_authenticated_get_request(url)
def get_portfolio_transactions(self, portfolio_id, api_format):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting transactions of portfolio {}...".format(portfolio_id))
if api_format is not None:
url = '{}/portfolio/{}/transaction?format={}'.format(
self.api_uri, portfolio_id, api_format)
else:
url = '{}/portfolio/{}/transaction'.format(self.api_uri, portfolio_id)
return self.__make_authenticated_get_request(url)
def post_portfolio_transactions(self, portfolio_id, file_name):
with open(file_name, 'r', encoding='utf-8') as f:
transactions = json.loads(f.read())
for t in transactions:
url = '{}/portfolio/{}/transaction'.format(self.api_uri, portfolio_id)
self.__post_json(url, t)
def get_portfolio_dividends(self, portfolio_id, api_format):
# type: (string, string) -> requests.Response
if self.options['verbose']:
print("Getting dividends of portfolio {}...".format(portfolio_id))
if api_format is not None:
url = '{}/portfolio/{}/dividend?format={}'.format(
self.api_uri, portfolio_id, api_format)
else:
url = '{}/portfolio/{}/dividend'.format(self.api_uri, portfolio_id)
return self.__make_authenticated_get_request(url)
def post_portfolio_dividends(self, portfolio_id, file_name):
with open(file_name, 'r', encoding='utf-8') as f:
dividends = json.loads(f.read())
for d in dividends:
url = '{}/portfolio/{}/dividend'.format(self.api_uri, portfolio_id)
self.__post_json(url, d)
|
<gh_stars>0
# coding: utf-8
# In[19]:
import os
import json
import tensorflow as tf
import tensorflow.contrib.slim as slim
from models.nnets import NN
from utils.vocabulary import Vocabulary
from config.config import Config
from models.models import ShowAttendTell
from copy import deepcopy
def update_dict(file_name):
with open(file_name, 'r') as f:
train_captions = json.load(f)
train_captions_new = deepcopy(train_captions)
print('*'*30)
print('updating dict '+ file_name)
cls_labels=[]
skip_word = []
import tqdm
annotations = tqdm.tqdm(train_captions['annotations'],
total=len(train_captions['annotations']))
for idx, item in enumerate(annotations):
caption = item[u'caption']
labels = []
# print(caption)
for word in caption.split(' '):
if word in v.words:
word_index = v.word2idx[word]
word_embed = word2vec[word_index]
word_label = kmeans.predict(word_embed[np.newaxis,:])
labels.append(word_label[0])
else:
skip_word.append(word)
labels = list(set(labels))
new_labels = []
for label in labels:
new_labels.append(int(label))
tmp_dic = {u'image_id': item[u'image_id'], u'id': item[u'id'], u'cls_label':new_labels}
cls_labels.append(deepcopy(tmp_dic))
train_captions_new.update({'classifications':cls_labels})
print('update dict')
file_name = file_name.split('.')[0]
with open('./'+file_name+'_new.json',"w") as f:
json.dump(train_captions_new, f)
print('saved')
if __name__ == '__main__':
config = Config()
config.train_cnn = False
config.phase = 'train'
nn = NN(config)
# In[3]:
model = ShowAttendTell(config)
# model.build()
# In[4]:
v = Vocabulary(7300)
v.load('./datasets/vocabulary.csv')
print(v.words.shape)
print((v.word2idx[v.words[1]]))
# In[5]:
word = tf.placeholder(tf.int32, shape=[1])
with tf.variable_scope("word_embedding",reuse=tf.AUTO_REUSE):
embedding_matrix = tf.get_variable(
name = 'weights',
shape = [7300, 512],
initializer = nn.fc_kernel_initializer,
regularizer = nn.fc_kernel_regularizer,
trainable = True)
word_embed = tf.nn.embedding_lookup(embedding_matrix, word)
# In[6]:
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# In[7]:
include = ['word_embedding/weight']
variables_to_restore = slim.get_variables_to_restore(include=include)
# variables_to_restore = slim.get_variables(scope="word_embedding")
word_embed_list = []
with tf.Session() as sess:
checkpoint_path = tf.train.latest_checkpoint('./results/checkpoint/')
print(checkpoint_path)
saver = tf.train.Saver(variables_to_restore)
tf.contrib.framework.get_variables_to_restore()
saver.restore(sess, checkpoint_path)
word2vec = embedding_matrix.eval()
# In[10]:
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=256,
init='k-means++',
n_init=10,
max_iter=3000,
tol=0.0001,
precompute_distances='auto',
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm='auto')
# In[11]:
print('-'*20)
print('clustering')
print('-'*20)
kmeans.fit(word2vec[1:])
print('-'*20)
print('clustering done')
print('-'*20)
import numpy as np
train_caption_file = './datasets/rawjson/captions_train2014.json'
val_caption_file = './datasets/rawjson/captions_val2014.json'
update_dict(train_caption_file)
update_dict(val_caption_file)
# word_cls ={}
# for word in v.words:
# idx = v.word2idx[word]
# embeded = word2vec[idx][np.newaxis,:]
# label = kmeans.predict(embeded)[0]
# if label in word_cls.keys():
# word_cls[label].append(word)
# else:
# word_cls.update({label:[word]})
# for key in word_cls.keys():
# print(str(key))
# print(word_cls[key])
# # In[ ]:
|
'''
Created on Jul 9, 2013
@author: <NAME>
'''
import time
from multiprocessing.pool import Pool
from SegmentTreeSum import SegmentTreeSum
from bitstring import BitArray
parallelSolve = False
INF = 1 << 31
def solve(par):
M, Q, construct, queries = par
bits = BitArray()
for m in construct:
for i in range(m[0]):
bits.append('0b' + m[1])
nodes = set()
for q in queries: # (i-1, i]
nodes.add(q[1] - 1)
nodes.add(q[2])
nodes = list(nodes)
nodes.sort()
count = [0] * len(nodes)
for i in range(1, len(nodes)):
count[i] = bits[nodes[i - 1] + 1:nodes[i] + 1].count(1)
size = [0] * len(nodes)
for i in range(1, len(nodes)):
size[i] = nodes[i] - nodes[i - 1]
tree = SegmentTreeSum(len(nodes))
tree.buildTree(count, 1, 0, len(nodes) - 1)
results = []
for q in queries:
i1 = nodes.index(q[1] - 1)
i2 = nodes.index(q[2])
if q[0] == 'F':
for i in range(i1 + 1, i2 + 1):
count[i] = size[i]
tree.update(i, size[i])
if q[0] == 'E':
for i in range(i1 + 1, i2 + 1):
count[i] = 0
tree.update(i, 0)
if q[0] == 'I':
for i in range(i1 + 1, i2 + 1):
count[i] = size[i] - count[i]
tree.update(i, count[i])
if q[0] == 'S':
results.append(tree.query(i1 + 1, i2))
return '\n'.join(str(e) for e in results) + '\n'
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline())
self.input = []
for t in range(self.numOfTests):
M = int(self.fIn.readline())
construct = []
for j in range(M):
v1 = int(self.fIn.readline())
v2 = self.fIn.readline().strip()
construct.append([v1, v2])
Q = int(self.fIn.readline())
queries = []
for i in range(Q):
row = self.fIn.readline().split()
row[1] = int(row[1])
row[2] = int(row[2])
queries.append(row)
self.input.append((M, Q, construct, queries))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
|
import numpy as np
import pytest
from atompack.crystal.spatial import MillerIndex, Orientation, Plane
###########################
# MillerIndex Tests #
###########################
@pytest.mark.parametrize("test_input,expectation", [
(np.array([1 / 2, 2 / 3, 1]), MillerIndex((4, 3, 2))),
(np.array([-1 / 2, -2 / 3, -1]), MillerIndex((-4, -3, -2))),
(np.array([1, np.inf, np.inf]), MillerIndex((1, 0, 0))),
(np.array([-1, np.inf, np.inf]), MillerIndex((-1, 0, 0))),
(np.array([1 / 2, np.inf, np.inf]), MillerIndex((2, 0, 0))),
(np.array([-1 / 2, np.inf, np.inf]), MillerIndex((-2, 0, 0))),
(np.array([1, 1, 1]), MillerIndex((1, 1, 1))),
(np.array([-1, -1, -1]), MillerIndex((-1, -1, -1))),
])
def test_miller_index_from_intercepts(test_input, expectation):
res = MillerIndex.from_intercepts(test_input)
assert res == expectation
@pytest.mark.parametrize("test_input,expectation", [
(MillerIndex((4, 3, 2)), np.array([1 / 2, 2 / 3, 1])),
(MillerIndex((-4, -3, -2)), np.array([-1 / 2, -2 / 3, -1])),
(MillerIndex((1, 0, 0)), np.array([1, np.inf, np.inf])),
(MillerIndex((-1, 0, 0)), np.array([-1, np.inf, np.inf])),
(MillerIndex((2, 0, 0)), np.array([1 / 2, np.inf, np.inf])),
(MillerIndex((-2, 0, 0)), np.array([-1 / 2, np.inf, np.inf])),
(MillerIndex((1, 1, 1)), np.array([1, 1, 1])),
(MillerIndex((-1, -1, -1)), np.array([-1, -1, -1])),
])
def test_miller_index_intercepts(test_input, expectation):
res = test_input.intercepts
assert np.allclose(res, expectation)
def test_miller_index_equality():
hkl = (1, 2, 3)
uvw = (3, 2, 1)
# valid equality
assert MillerIndex(hkl) == MillerIndex(hkl)
# valid inequality
assert MillerIndex(hkl) != MillerIndex(uvw)
# invalid inequality
assert MillerIndex(hkl) != hkl
###########################
# Orientation Tests #
###########################
def test_orientation_miller_indices():
plane = MillerIndex((1, 0, 0))
direction = MillerIndex((1, 2, 0))
orientation = Orientation.from_miller_indices(plane, direction)
res_plane, res_direction = orientation.as_miller_indices()
assert res_plane == plane
assert res_direction == direction
#####################
# Plane Tests #
#####################
@pytest.mark.parametrize("test_input,expectation", [
(MillerIndex((4, 3, 2)), np.array([[1 / 2, 0, 0], [0, 2 / 3, 0], [0, 0, 1]])),
(MillerIndex((1, 0, 0)), np.array([[1, 0, 0], [1, 1, 0], [1, 0, 1]])),
])
def test_plane_from_miller_index(test_input, expectation):
res = Plane.from_miller_index(test_input)
assert np.allclose(res.coplanar_points, expectation)
def test_plane_coefficients():
plane = Plane(np.array([
[1, 2, 3],
[4, 6, 9],
[12, 11, 9],
]))
assert np.allclose(plane.coefficients, np.array([30, -48, 17, -15]))
|
<filename>oura/converters.py
import pandas as pd
class UnitConverter:
"""
Use this class to convert units for certain dataframe cols
:param convert_cols: A set of columns to apply predefined conversions
:type convert_cols: list/set
"""
all_dt_metrics = []
all_sec_metrics = []
all_metrics = all_dt_metrics + all_sec_metrics
def __init__(self, convert_cols=None):
if convert_cols is not None:
convert_cols = set(convert_cols)
defaults = set(self.all_metrics)
invalid = convert_cols - defaults
if any(invalid):
print(f"Ignoring metrics with no conversion: {invalid}")
self.convert_cols = list(convert_cols & defaults)
else:
self.convert_cols = self.all_metrics
def _rename_converted_cols(self, df, metrics, suffix_str):
"""
Rename converted cols by adding a suffix to the col name
For example, 'bedtime_start' becomes 'bedtime_start_dt_adjusted'
:param df: a dataframe
:type df: pandas dataframe obj
:param metrics: metrics to rename
:type metrics: list of strings
:param suffix_str: the str to append to each metric name
:type suffix_str: str
"""
updated_headers = [header + suffix_str for header in metrics]
d_to_rename = dict(zip(metrics, updated_headers))
df = df.rename(columns=d_to_rename)
return df
def _convert_to_dt(self, df, dt_metrics):
"""
Convert dataframe fields to datetime dtypes
:param df: dataframe
:type df: pandas dataframe obj
:param dt_metrics: List of metrics to be converted to datetime
:type dt_metrics: List
"""
for i, dt_metric in enumerate(dt_metrics):
df[dt_metric] = pd.to_datetime(df[dt_metric], format="%Y-%m-%d %H:%M:%S")
df = self._rename_converted_cols(df, dt_metrics, "_dt_adjusted")
return df
def _convert_to_hrs(self, df, sec_metrics):
"""
Convert fields from seconds to minutes
:param df: dataframe
:type df: pandas dataframe obj
:param sec_metrics: List of metrics to be converted from sec -> hrs
:type sec_metrics: List
"""
df[sec_metrics] = df[sec_metrics] / 60 / 60
df = self._rename_converted_cols(df, sec_metrics, "_in_hrs")
return df
def _select_cols(self, df, subset):
return [c for c in df.columns if c in set(subset) & set(self.convert_cols)]
def convert_metrics(self, df):
"""
Convert metrics to new unit type
:param df: dataframe
:type df: pandas dataframe obj
"""
dt_metrics = self._select_cols(df, self.all_dt_metrics)
df = self._convert_to_dt(df, dt_metrics)
sec_metrics = self._select_cols(df, self.all_sec_metrics)
df = self._convert_to_hrs(df, sec_metrics)
return df
class SleepConverter(UnitConverter):
all_dt_metrics = ["bedtime_end", "bedtime_start"]
all_sec_metrics = [
"awake",
"deep",
"duration",
"light",
"onset_latency",
"rem",
"total",
]
hypnogram_5min = ["hypnogram_5min"]
all_metrics = all_dt_metrics + all_sec_metrics + hypnogram_5min
def convert_hypnogram_helper(self, hypnogram):
d = {"1": "D", "2": "L", "3": "R", "4": "A"}
return "".join(list(map(lambda h: d[h], hypnogram)))
def convert_hypnogram(self, sleep_df):
if "hypnogram_5min" in sleep_df.columns:
sleep_df["hypnogram_5min"] = sleep_df["hypnogram_5min"].apply(
self.convert_hypnogram_helper
)
return sleep_df
def convert_metrics(self, df):
df = super().convert_metrics(df)
if "hypnogram_5min" in self.convert_cols:
df = self.convert_hypnogram(df)
return df
class ActivityConverter(UnitConverter):
all_dt_metrics = ["day_end", "day_start"]
all_metrics = all_dt_metrics
|
import pymultinest
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pymultinest.solve import solve
import scipy as sp
import scipy.stats
import os
from gp_project.kronecker import gaussian_kernel
if not os.path.exists("chains"):
os.mkdir("chains")
def prior(cube):
mu2, sigma2 = cube[0], cube[1]
mu3, sigma3 = cube[2], cube[3]
mu4, sigma4 = cube[4], cube[5]
mu5, sigma5 = cube[6], cube[7]
# Hyperparameters
mu_mu = 0
mu_sigma = 1
sigmasq_a = 1
sigma_s = 0.5
sigma_mu = 0
# Take interval 0:1 -> prior disribution through inverse CDF
cube[0] = sp.stats.norm.ppf(mu2, loc=mu_mu, scale=mu_sigma)
cube[1] = sp.stats.lognorm.ppf(sigma2, s=sigma_s, loc=sigma_mu)
cube[2] = sp.stats.norm.ppf(mu3, loc=mu_mu, scale=mu_sigma)
cube[3] = sp.stats.lognorm.ppf(sigma3, s=sigma_s, loc=sigma_mu)
cube[4] = sp.stats.norm.ppf(mu4, loc=mu_mu, scale=mu_sigma)
cube[5] = sp.stats.lognorm.ppf(sigma4, s=sigma_s, loc=sigma_mu)
cube[6] = sp.stats.norm.ppf(mu5, loc=mu_mu, scale=mu_sigma)
cube[7] = sp.stats.lognorm.ppf(sigma5, s=sigma_s, loc=sigma_mu)
return cube
def loglike(cube):
mu2, sigma2 = cube[0], cube[1]
mu3, sigma3 = cube[2], cube[3]
mu4, sigma4 = cube[4], cube[5]
mu5, sigma5 = cube[6], cube[7]
mu = [mu2, mu3, mu4, mu5]
sigma = [sigma2, sigma3, sigma4, sigma5]
loglikes = []
k = len(ydata) + 2
for i, dX in enumerate(ydata):
n = i + 2
logpdf_n = sp.stats.multivariate_normal.logpdf(
dX/Q**n, mean=mu[i]*np.ones(N),
cov=sigma[i]**2*R + jitter * np.eye(N))
loglikes.append(logpdf_n)
val = np.sum(loglikes) - k*(k+1)/2 * np.log(Q)
return val
datafile = 'A_data'
df = pd.read_csv("./data/{}.csv".format(datafile), index_col=[0, 1])
idx = pd.IndexSlice
df = df.loc[150]
x = np.arange(10, 180, 10)
N = len(x)
Q = df.loc[x, 'Q'].values
Q = Q[0]
print(Q)
ls = 35
jitter = 1e-6
R = gaussian_kernel(x, x, ls)
ydata = df.loc[x, '2':'5'].values.T
# print(ydata)
# number of dimensions our problem has
parameters = ["mu2", "sigma2", "mu3", "sigma3",
"mu4", "sigma4", "mu5", "sigma5"]
n_params = len(parameters)
# name of the output files
prefix = "chains/not_iid-"
datafile = prefix + datafile + "_"
# run MultiNest
result = solve(
LogLikelihood=loglike, Prior=prior,
n_dims=n_params, outputfiles_basename=datafile, resume=False, verbose=True)
# json.dump(parameters, open(datafile + 'params.json', 'w')) # save parameter names
# # plot the distribution of a posteriori possible models
# fig = plt.figure()
# cax = fig.add_subplot(131)
# # plt.plot(x, ydata.T, '+ ', color='red', label='data')
# a = pymultinest.Analyzer(outputfiles_basename=datafile, n_params=n_params)
# colors = ['orange', 'green', 'blue', 'red']
# mulist, siglist = zip(*a.get_equal_weighted_posterior()[::, :-1])
# for (mu, sigma) in a.get_equal_weighted_posterior()[::100, :-1]:
# for i, dX in enumerate(ydata):
# n = i + 2
# plt.plot(x, dX/Q**n, '-', color=colors[i],
# alpha=0.2, label='c{}'.format(n))
# sig_ax = fig.add_subplot(132)
# mu_ax = fig.add_subplot(133)
# sig_ax.hist(siglist, bins='auto', normed=True, alpha=0.5)
# mu_ax.hist(mulist, bins='auto', normed=True, alpha=0.5)
# # plt.legend()
# plt.tight_layout()
# plt.show()
# plt.savefig(datafile + 'posterior.pdf')
# plt.close()
|
<reponame>swryan/OpenMDAO-Framework<filename>openmdao.util/src/openmdao/util/publickey.py
"""
Support for generation, use, and storage of public/private key pairs.
The :func:`pk_encrypt`, :func:`pk_decrypt`, :func:`pk_sign`, and
:func:`pk_verify` functions provide a thin interface over
:class:`Crypto.PublicKey.RSA` methods for easier use and to work around some
issues found with some keys read from ssh ``id_rsa`` files.
"""
import base64
import cPickle
import getpass
import os.path
import socket
import sys
import threading
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Util.number import bytes_to_long
if sys.platform == 'win32': #pragma no cover
try:
import win32api
import win32con
import win32security
import ntsecuritycon
except ImportError:
HAVE_PYWIN32 = False
else:
HAVE_PYWIN32 = True
else:
HAVE_PYWIN32 = False
from openmdao.util.log import NullLogger
# Cache of client key pairs indexed by user.
_KEY_CACHE = {}
_KEY_CACHE_LOCK = threading.Lock()
def get_key_pair(user_host, logger=None,
overwrite_cache=False, ignore_ssh=False):
"""
Returns RSA key containing both public and private keys for the user
identified in `user_host`. This can be an expensive operation, so
we avoid generating a new key pair whenever possible.
If ``~/.ssh/id_rsa`` exists and is private, that key is returned.
user_host: string
Format ``user@host``.
logger: :class:`logging.Logger`
Used for debug messages.
overwrite_cache: bool
If True, a new key is generated and forced into the cache of existing
known keys. Used for testing.
ignore_ssh: bool
If True, ignore any existing ssh id_rsa key file. Used for testing.
.. note::
To avoid unnecessary key generation, the public/private key pair for
the current user is stored in the private file ``~/.openmdao/keys``.
On Windows this requires the pywin32 extension. Also, the public
key is stored in ssh form in ``~/.openmdao/id_rsa.pub``.
"""
logger = logger or NullLogger()
with _KEY_CACHE_LOCK:
if overwrite_cache:
key_pair = _generate(user_host, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
# Look in previously generated keys.
try:
key_pair = _KEY_CACHE[user_host]
except KeyError:
# If key for current user (typical), check filesystem.
# TODO: file lock to protect from separate processes.
user, host = user_host.split('@')
if user == getpass.getuser():
current_user = True
key_pair = None
# Try to re-use SSH key. Exceptions should *never* be exercised!
if not ignore_ssh:
id_rsa = \
os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa'))
if is_private(id_rsa):
try:
with open(id_rsa, 'r') as inp:
key_pair = RSA.importKey(inp.read())
except Exception as exc: #pragma no cover
logger.warning('ssh id_rsa import: %r', exc)
else:
generate = False
else: #pragma no cover
logger.warning('Ignoring insecure ssh id_rsa.')
if key_pair is None:
# Look for OpenMDAO key.
key_file = \
os.path.expanduser(os.path.join('~', '.openmdao', 'keys'))
if is_private(key_file):
try:
with open(key_file, 'rb') as inp:
key_pair = cPickle.load(inp)
except Exception:
generate = True
else:
generate = False
else:
logger.warning('Insecure keyfile! Regenerating keys.')
os.remove(key_file)
generate = True
# Difficult to run test as non-current user.
else: #pragma no cover
current_user = False
generate = True
if generate:
key_pair = _generate(user_host, logger)
if current_user:
key_dir = os.path.dirname(key_file)
if not os.path.exists(key_dir):
os.mkdir(key_dir)
# Save key pair in protected file.
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
logger.debug('No pywin32, not saving keyfile')
else:
make_private(key_dir) # Private while writing keyfile.
with open(key_file, 'wb') as out:
cPickle.dump(key_pair, out,
cPickle.HIGHEST_PROTOCOL)
try:
make_private(key_file)
# Hard to cause (recoverable) error here.
except Exception: #pragma no cover
os.remove(key_file) # Remove unsecured file.
raise
# Save public key in ssh form.
users = {user_host: key_pair.publickey()}
filename = os.path.join(key_dir, 'id_rsa.pub')
write_authorized_keys(users, filename, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
def _generate(user_host, logger):
""" Return new key. """
logger.debug('generating public key for %r...', user_host)
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
strength = 1024 # Much quicker to generate.
else:
strength = 2048
key_pair = RSA.generate(strength, get_random_bytes)
logger.debug(' done')
return key_pair
def pk_encrypt(data, public_key):
"""
Return list of chunks of `data` encrypted by `public_key`.
data: string
The message to be encrypted.
public_key: :class:`Crypto.PublicKey.RSA`
Public portion of key pair.
"""
# Normally we would use 8 rather than 16 here, but for some reason at least
# some keys read from ssh id_rsa files don't work correctly with 8.
chunk_size = public_key.size() / 16
chunks = []
while data:
chunks.append(public_key.encrypt(data[:chunk_size], ''))
data = data[chunk_size:]
return chunks
def pk_decrypt(encrypted, private_key):
"""
Return `encrypted` decrypted by `private_key` as a string.
encrypted: list
Chunks of encrypted data returned by :func:`pk_encrypt`.
private_key: :class:`Crypto.PublicKey.RSA`
Private portion of key pair.
"""
data = ''
for chunk in encrypted:
data += private_key.decrypt(chunk)
return data
def pk_sign(hashed, private_key):
"""
Return signature for `hashed` using `private_key`.
hashed: string
A hash value of the data to be signed.
private_key: :class:`Crypto.PublicKey.RSA`
Private portion of key pair.
"""
# Normally we would just do:
# return private_key.sign(hashed, '')
# But that fails for at least some keys from ssh id_rsa files.
# Instead, use the 'slowmath' method:
c = bytes_to_long(hashed)
m = pow(c, private_key.d, private_key.n)
return (m,)
def pk_verify(hashed, signature, public_key):
"""
Verify `hashed` based on `signature` and `public_key`.
hashed: string
A hash for the data that is signed.
signature: tuple
Value returned by :func:`pk_sign`.
public_key: :class:`Crypto.PublicKey.RSA`
Public portion of key pair.
"""
return public_key.verify(hashed, signature)
def is_private(path):
"""
Return True if `path` is accessible only by 'owner'.
path: string
Path to file or directory to check.
.. note::
On Windows this requires the pywin32 extension.
"""
if not os.path.exists(path):
return True # Nonexistent file is secure ;-)
if sys.platform == 'win32': #pragma no cover
if not HAVE_PYWIN32:
return False # No way to know.
# Find the SIDs for user and system.
username = win32api.GetUserNameEx(win32con.NameSamCompatible)
# Map Cygwin 'root' to 'Administrator'. Typically these are intended
# to be identical, but /etc/passwd might configure them differently.
if username.endswith('\\root'):
username = username.replace('\\root', '\\Administrator')
user, domain, type = win32security.LookupAccountName('', username)
system, domain, type = win32security.LookupAccountName('', 'System')
# Find the DACL part of the Security Descriptor for the file
sd = win32security.GetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION)
dacl = sd.GetSecurityDescriptorDacl()
# Verify the DACL contains just the two entries we expect.
count = dacl.GetAceCount()
if count != 2:
return False
for i in range(count):
ace = dacl.GetAce(i)
if ace[2] != user and ace[2] != system:
return False
return True
else:
return (os.stat(path).st_mode & 0077) == 0
def make_private(path):
"""
Make `path` accessible only by 'owner'.
path: string
Path to file or directory to be made private.
.. note::
On Windows this requires the pywin32 extension.
"""
if sys.platform == 'win32': #pragma no cover
if not HAVE_PYWIN32:
raise ImportError('No pywin32')
# Find the SIDs for user and system.
username = win32api.GetUserNameEx(win32con.NameSamCompatible)
# Map Cygwin 'root' to 'Administrator'. Typically these are intended
# to be identical, but /etc/passwd might configure them differently.
if username.endswith('\\root'):
username = username.replace('\\root', '\\Administrator')
user, domain, type = win32security.LookupAccountName('', username)
system, domain, type = win32security.LookupAccountName('', 'System')
# Find the DACL part of the Security Descriptor for the file
sd = win32security.GetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION)
# Create a blank DACL and add the ACEs we want.
dacl = win32security.ACL()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, system)
# Put our new DACL into the Security Descriptor and update the file
# with the updated SD.
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION,
sd)
else:
# Normal chmod() works on test machines with ACLs enabled, but a user
# in the field reported a situation where it didn't. This code tries
# using libacl if it can. Doesn't seem to cause any problems, not
# verifed that it helps though.
try:
# From pylibacl, which requires 'libacl1-dev'.
import posix1e
except ImportError:
mode = 0700 if os.path.isdir(path) else 0600
os.chmod(path, mode) # Read/Write/Execute
else:
if os.path.isdir(path):
acl = posix1e.ACL(text='u::rwx,g::-,o::-')
else:
acl = posix1e.ACL(text='u::rw,g::-,o::-')
acl.applyto(path)
if not is_private(path):
raise RuntimeError("Can't make %r private" % path)
def encode_public_key(key):
"""
Return base64 text representation of public key `key`.
key: public key
Public part of key pair.
"""
# Just being defensive, this should never happen.
if key.has_private(): #pragma no cover
key = key.publickey()
return base64.b64encode(cPickle.dumps(key, cPickle.HIGHEST_PROTOCOL))
def decode_public_key(text):
"""
Return public key from text representation.
text: string
base64 encoded key data.
"""
return cPickle.loads(base64.b64decode(text))
def read_authorized_keys(filename=None, logger=None):
"""
Return dictionary of public keys, indexed by user, read from `filename`.
The file must be in ssh format, and only RSA keys are processed.
If the file is not private, then no keys are returned.
filename: string
File to read from. The default is ``~/.ssh/authorized_keys``.
logger: :class:`logging.Logger`
Used for log messages.
"""
if not filename:
filename = \
os.path.expanduser(os.path.join('~', '.ssh', 'authorized_keys'))
logger = logger or NullLogger()
if not os.path.exists(filename):
raise RuntimeError('%r does not exist' % filename)
if not is_private(filename):
if sys.platform != 'win32' or HAVE_PYWIN32:
raise RuntimeError('%r is not private' % filename)
else: #pragma no cover
logger.warning('Allowed users file %r is not private', filename)
errors = 0
keys = {}
with open(filename, 'r') as inp:
for line in inp:
line = line.rstrip()
sharp = line.find('#')
if sharp >= 0:
line = line[:sharp]
if not line:
continue
key_type, blank, rest = line.partition(' ')
if key_type != 'ssh-rsa':
logger.error('unsupported key type: %r', key_type)
errors += 1
continue
key_data, blank, user_host = rest.partition(' ')
if not key_data:
logger.error('bad line (missing key data):')
logger.error(line)
errors += 1
continue
try:
user, host = user_host.split('@')
except ValueError:
logger.error('bad line (require user@host):')
logger.error(line)
errors += 1
continue
logger.debug('user %r, host %r', user, host)
try:
ip_addr = socket.gethostbyname(host)
except socket.gaierror:
logger.error('unknown host %r', host)
logger.error(line)
errors += 1
continue
data = base64.b64decode(key_data)
start = 0
name_len = _longint(data, start, 4)
start += 4
name = data[start:start+name_len]
if name != 'ssh-rsa':
logger.error('name error: %r vs. ssh-rsa', name)
logger.error(line)
errors += 1
continue
start += name_len
e_len = _longint(data, start, 4)
start += 4
e = _longint(data, start, e_len)
start += e_len
n_len = _longint(data, start, 4)
start += 4
n = _longint(data, start, n_len)
start += n_len
if start != len(data):
logger.error('length error: %d vs. %d', start, len(data))
logger.error(line)
errors += 1
continue
try:
pubkey = RSA.construct((n, e))
except Exception as exc:
logger.error('key construct error: %r', exc)
errors += 1
else:
keys[user_host] = pubkey
if errors:
raise RuntimeError('%d errors in %r, check log for details'
% (errors, filename))
return keys
def _longint(buf, start, length):
""" Return long value from binary string. """
value = long(0)
for i in range(length):
value = (value << 8) + ord(buf[start])
start += 1
return value
def write_authorized_keys(allowed_users, filename, logger=None):
"""
Write `allowed_users` to `filename` in ssh format.
The file will be made private if supported on this platform.
allowed_users: dict
Dictionary of public keys indexed by user.
filename: string
File to write to.
logger: :class:`logging.Logger`
Used for log messages.
"""
logger = logger or NullLogger()
with open(filename, 'w') as out:
for user in sorted(allowed_users.keys()):
pubkey = allowed_users[user]
buf = 'ssh-rsa'
key_data = _longstr(len(buf), 4)
key_data += buf
buf = _longstr(pubkey.e)
key_data += _longstr(len(buf), 4)
key_data += buf
buf = _longstr(pubkey.n)
key_data += _longstr(len(buf), 4)
key_data += buf
data = base64.b64encode(key_data)
out.write('ssh-rsa %s %s\n\n' % (data, user))
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
logger.warning("Can't make authorized keys file %r private", filename)
else:
make_private(filename)
def _longstr(num, length=0):
""" Return binary string representation of `num`. """
buf = chr(num & 0xff)
num >>= 8
while num:
buf = chr(num & 0xff) + buf
num >>= 8
while len(buf) < length:
buf = chr(0) + buf
return buf
|
# coding: utf-8
"""
Pdf4me
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OcrAction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'stapel': 'str',
'businesss_card_reco': 'bool',
'full_text_search': 'bool',
'output_type': 'str',
'action_id': 'str'
}
attribute_map = {
'stapel': 'stapel',
'businesss_card_reco': 'businesssCardReco',
'full_text_search': 'fullTextSearch',
'output_type': 'outputType',
'action_id': 'actionId'
}
def __init__(self, stapel=None, businesss_card_reco=None, full_text_search=None, output_type=None, action_id=None): # noqa: E501
"""OcrAction - a model defined in Swagger""" # noqa: E501
self._stapel = None
self._businesss_card_reco = None
self._full_text_search = None
self._output_type = None
self._action_id = None
self.discriminator = None
if stapel is not None:
self.stapel = stapel
if businesss_card_reco is not None:
self.businesss_card_reco = businesss_card_reco
if full_text_search is not None:
self.full_text_search = full_text_search
if output_type is not None:
self.output_type = output_type
if action_id is not None:
self.action_id = action_id
@property
def stapel(self):
"""Gets the stapel of this OcrAction. # noqa: E501
:return: The stapel of this OcrAction. # noqa: E501
:rtype: str
"""
return self._stapel
@stapel.setter
def stapel(self, stapel):
"""Sets the stapel of this OcrAction.
:param stapel: The stapel of this OcrAction. # noqa: E501
:type: str
"""
self._stapel = stapel
@property
def businesss_card_reco(self):
"""Gets the businesss_card_reco of this OcrAction. # noqa: E501
:return: The businesss_card_reco of this OcrAction. # noqa: E501
:rtype: bool
"""
return self._businesss_card_reco
@businesss_card_reco.setter
def businesss_card_reco(self, businesss_card_reco):
"""Sets the businesss_card_reco of this OcrAction.
:param businesss_card_reco: The businesss_card_reco of this OcrAction. # noqa: E501
:type: bool
"""
self._businesss_card_reco = businesss_card_reco
@property
def full_text_search(self):
"""Gets the full_text_search of this OcrAction. # noqa: E501
:return: The full_text_search of this OcrAction. # noqa: E501
:rtype: bool
"""
return self._full_text_search
@full_text_search.setter
def full_text_search(self, full_text_search):
"""Sets the full_text_search of this OcrAction.
:param full_text_search: The full_text_search of this OcrAction. # noqa: E501
:type: bool
"""
self._full_text_search = full_text_search
@property
def output_type(self):
"""Gets the output_type of this OcrAction. # noqa: E501
:return: The output_type of this OcrAction. # noqa: E501
:rtype: str
"""
return self._output_type
@output_type.setter
def output_type(self, output_type):
"""Sets the output_type of this OcrAction.
:param output_type: The output_type of this OcrAction. # noqa: E501
:type: str
"""
allowed_values = ["undef", "txt", "docx", "xlsx", "pptx", "pdfSearchable", "xml", "rtf", "rtt", "vcf", "json"] # noqa: E501
if output_type not in allowed_values:
raise ValueError(
"Invalid value for `output_type` ({0}), must be one of {1}" # noqa: E501
.format(output_type, allowed_values)
)
self._output_type = output_type
@property
def action_id(self):
"""Gets the action_id of this OcrAction. # noqa: E501
:return: The action_id of this OcrAction. # noqa: E501
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""Sets the action_id of this OcrAction.
:param action_id: The action_id of this OcrAction. # noqa: E501
:type: str
"""
self._action_id = action_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OcrAction, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OcrAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.