input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
""" Set of functions for detaling with spam reports.
"""
import collections
import httplib2
import logging
import settings
import sys
import settings
from collections import defaultdict
from features import filterrules_helpers
from framework import sql
from infra_libs import ts_mon
from services import spam_helpers
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
from apiclient.errors import Error as ApiClientError
from oauth2client.client import Error as Oauth2ClientError
SPAMREPORT_TABLE_NAME = 'SpamReport'
SPAMVERDICT_TABLE_NAME = 'SpamVerdict'
ISSUE_TABLE = 'Issue'
REASON_MANUAL = 'manual'
REASON_THRESHOLD = 'threshold'
REASON_CLASSIFIER = 'classifier'
REASON_FAIL_OPEN = 'fail_open'
SPAMREPORT_ISSUE_COLS = ['issue_id', 'reported_user_id', 'user_id']
MANUALVERDICT_ISSUE_COLS = ['user_id', 'issue_id', 'is_spam', 'reason',
'project_id']
THRESHVERDICT_ISSUE_COLS = ['issue_id', 'is_spam', 'reason', 'project_id']
SPAMREPORT_COMMENT_COLS = ['comment_id', 'reported_user_id', 'user_id']
MANUALVERDICT_COMMENT_COLS = ['user_id', 'comment_id', 'is_spam', 'reason',
'project_id']
THRESHVERDICT_COMMENT_COLS = ['comment_id', 'is_spam', 'reason', 'project_id']
class SpamService(object):
"""The persistence layer for spam reports."""
issue_actions = ts_mon.CounterMetric(
'monorail/spam_svc/issue',
'Count of things that happen to issues.',
[ts_mon.StringField('type')])
comment_actions = ts_mon.CounterMetric(
'monorail/spam_svc/comment',
'Count of things that happen to comments.',
[ts_mon.StringField('type')])
prediction_api_failures = ts_mon.CounterMetric(
'mononrail/spam_svc/prediction_api_failure',
'Failures calling the prediction API',
None)
def __init__(self):
self.report_tbl = sql.SQLTableManager(SPAMREPORT_TABLE_NAME)
self.verdict_tbl = sql.SQLTableManager(SPAMVERDICT_TABLE_NAME)
self.issue_tbl = sql.SQLTableManager(ISSUE_TABLE)
self.prediction_service = None
try:
credentials = GoogleCredentials.get_application_default()
self.prediction_service = build('prediction', 'v1.6',
http=httplib2.Http(),
credentials=credentials)
except (Oauth2ClientError, ApiClientError):
logging.error("Error getting GoogleCredentials: %s" % sys.exc_info()[0])
def LookupIssueFlaggers(self, cnxn, issue_id):
"""Returns users who've reported the issue or its comments as spam.
Returns a tuple. First element is a list of users who flagged the issue;
second element is a dictionary of comment id to a list of users who flagged
that comment.
"""
rows = self.report_tbl.Select(
cnxn, cols=['user_id', 'comment_id'],
issue_id=issue_id)
issue_reporters = []
comment_reporters = collections.defaultdict(list)
for row in rows:
if row[1]:
comment_reporters[row[1]].append(row[0])
else:
issue_reporters.append(row[0])
return issue_reporters, comment_reporters
def LookupIssueFlagCounts(self, cnxn, issue_ids):
"""Returns a map of issue_id to flag counts"""
rows = self.report_tbl.Select(cnxn, cols=['issue_id', 'COUNT(*)'],
issue_id=issue_ids, group_by=['issue_id'])
counts = {}
for row in rows:
counts[long(row[0])] = row[1]
return counts
def LookupIssueVerdicts(self, cnxn, issue_ids):
"""Returns a map of issue_id to most recent spam verdicts"""
rows = self.verdict_tbl.Select(cnxn,
cols=['issue_id', 'reason', 'MAX(created)'],
issue_id=issue_ids, group_by=['issue_id'])
counts = {}
for row in rows:
counts[long(row[0])] = row[1]
return counts
def LookupIssueVerdictHistory(self, cnxn, issue_ids):
"""Returns a map of issue_id to most recent spam verdicts"""
rows = self.verdict_tbl.Select(cnxn, cols=[
'issue_id', 'reason', 'created', 'is_spam', 'classifier_confidence',
'user_id', 'overruled'],
issue_id=issue_ids, order_by=[('issue_id', []), ('created', [])])
# TODO: group by issue_id, make class instead of dict for verdict.
verdicts = []
for row in rows:
verdicts.append({
'issue_id': row[0],
'reason': row[1],
'created': row[2],
'is_spam': row[3],
'classifier_confidence': row[4],
'user_id': row[5],
'overruled': row[6],
})
return verdicts
def LookupCommentVerdictHistory(self, cnxn, comment_ids):
"""Returns a map of issue_id to most recent spam verdicts"""
rows = self.verdict_tbl.Select(cnxn, cols=[
'comment_id', 'reason', 'created', 'is_spam', 'classifier_confidence',
'user_id', 'overruled'],
comment_id=comment_ids, order_by=[('comment_id', []), ('created', [])])
# TODO: group by comment_id, make class instead of dict for verdict.
verdicts = []
for row in rows:
verdicts.append({
'comment_id': row[0],
'reason': row[1],
'created': row[2],
'is_spam': row[3],
'classifier_confidence': row[4],
'user_id': row[5],
'overruled': row[6],
})
return verdicts
def FlagIssues(self, cnxn, issue_service, issues, reporting_user_id,
flagged_spam):
"""Creates or deletes a spam report on an issue."""
verdict_updates = []
if flagged_spam:
rows = [(issue.issue_id, issue.reporter_id, reporting_user_id)
for issue in issues]
self.report_tbl.InsertRows(cnxn, SPAMREPORT_ISSUE_COLS, rows,
ignore=True)
else:
issue_ids = [issue.issue_id for issue in issues]
self.report_tbl.Delete(
cnxn, issue_id=issue_ids, user_id=reporting_user_id,
comment_id=None)
project_id = issues[0].project_id
# Now record new verdicts and update issue.is_spam, if they've changed.
ids = [issue.issue_id for issue in issues]
counts = self.LookupIssueFlagCounts(cnxn, ids)
previous_verdicts = self.LookupIssueVerdicts(cnxn, ids)
for issue_id in counts:
# If the flag counts changed enough to toggle the is_spam bit, need to
# record a new verdict and update the Issue.
if ((flagged_spam and counts[issue_id] >= settings.spam_flag_thresh or
not flagged_spam and counts[issue_id] < settings.spam_flag_thresh) and
(previous_verdicts[issue_id] != REASON_MANUAL if issue_id in
previous_verdicts else True)):
verdict_updates.append(issue_id)
if len(verdict_updates) == 0:
return
# Some of the issues may have exceed the flag threshold, so issue verdicts
# and mark as spam in those cases.
rows = [(issue_id, flagged_spam, REASON_THRESHOLD, project_id)
for issue_id in verdict_updates]
self.verdict_tbl.InsertRows(cnxn, THRESHVERDICT_ISSUE_COLS, rows,
ignore=True)
update_issues = []
for issue in issues:
if issue.issue_id in verdict_updates:
issue.is_spam = flagged_spam
update_issues.append(issue)
if flagged_spam:
self.issue_actions.increment_by(len(update_issues), {'type': 'flag'})
issue_service.UpdateIssues(cnxn, update_issues, update_cols=['is_spam'])
def FlagComment(self, cnxn, issue_id, comment_id, reported_user_id,
reporting_user_id, flagged_spam):
"""Creates or deletes a spam report on a comment."""
# TODO(seanmccullough): Bulk comment flagging? There's no UI for that.
if flagged_spam:
self.report_tbl.InsertRow(
cnxn, ignore=True, issue_id=issue_id,
comment_id=comment_id, reported_user_id=reported_user_id,
user_id=reporting_user_id)
self.comment_actions.increment({'type': 'flag'})
else:
self.report_tbl.Delete(
cnxn, issue_id=issue_id, comment_id=comment_id,
user_id=reporting_user_id)
def RecordClassifierIssueVerdict(self, cnxn, issue, is_spam, confidence,
fail_open):
reason = REASON_FAIL_OPEN if fail_open else REASON_CLASSIFIER
self.verdict_tbl.InsertRow(cnxn, issue_id=issue.issue_id, is_spam=is_spam,
reason=reason, classifier_confidence=confidence,
project_id=issue.project_id)
if is_spam:
self.issue_actions.increment({'type': 'classifier'})
# This is called at issue creation time, so there's nothing else to do here.
def RecordManualIssueVerdicts(self, cnxn, issue_service, issues, user_id,
is_spam):
rows = [(user_id, issue.issue_id, is_spam, REASON_MANUAL, issue.project_id)
for issue in issues]
issue_ids = [issue.issue_id for issue in issues]
# Overrule all previous verdicts.
self.verdict_tbl.Update(cnxn, {'overruled': True}, [
('issue_id IN (%s)' % sql.PlaceHolders(issue_ids), issue_ids)
], commit=False)
self.verdict_tbl.InsertRows(cnxn, MANUALVERDICT_ISSUE_COLS, rows,
ignore=True)
for issue in issues:
issue.is_spam = is_spam
if is_spam:
self.issue_actions.increment_by(len(issues), {'type': 'manual'})
else:
issue_service.AllocateNewLocalIDs(cnxn, issues)
# This will commit the transaction.
issue_service.UpdateIssues(cnxn, issues, update_cols=['is_spam'])
def RecordManualCommentVerdict(self, cnxn, issue_service, user_service,
comment_id, sequence_num, user_id, is_spam):
# TODO(seanmccullough): Bulk comment verdicts? There's no UI for that.
self.verdict_tbl.InsertRow(cnxn, ignore=True,
user_id=user_id, comment_id=comment_id, is_spam=is_spam,
reason=REASON_MANUAL)
comment = issue_service.GetComment(cnxn, comment_id)
comment.is_spam = is_spam
issue = issue_service.GetIssue(cnxn, comment.issue_id)
issue_service.SoftDeleteComment(cnxn, comment.project_id, issue.local_id,
sequence_num, user_id, user_service,
is_spam, True, is_spam)
if is_spam:
self.comment_actions.increment({'type': 'manual'})
def RecordClassifierCommentVerdict(self, cnxn, comment, is_spam, confidence,
fail_open):
reason = REASON_FAIL_OPEN if fail_open else REASON_CLASSIFIER
self.verdict_tbl.InsertRow(cnxn, comment_id=comment.id, is_spam=is_spam,
reason=reason, classifier_confidence=confidence,
project_id=comment.project_id)
if is_spam:
self.comment_actions.increment({'type': 'classifier'})
def _predict(self, body):
return self.prediction_service.trainedmodels().predict(
project=settings.classifier_project_id,
id=settings.classifier_model_id,
body=body).execute()
def _IsExempt(self, author, is_project_member):
"""Return True if the user is exempt from spam checking."""
if author.email is not None and author.email.endswith(
settings.spam_whitelisted_suffixes):
logging.info('%s whitelisted from spam filtering', author.email)
return True
if author.ignore_action_limits:
logging.info('%s trusted not to spam', author.email)
return True
if is_project_member:
logging.info('%s is a project member, assuming ham', author.email)
return True
return False
def ClassifyIssue(self, issue, firstComment, reporter, is_project_member):
"""Classify an issue as either spam or ham.
Args:
issue: the Issue.
firstComment: the first Comment on issue.
reporter: User PB for the Issue reporter.
is_project_member: True if reporter is a member of issue's project.
Returns a JSON dict of classifier prediction results from
the Cloud Prediction API.
"""
# Fail-safe: not spam.
result = {'outputLabel': 'ham',
'outputMulti': [{'label':'ham', 'score': '1.0'}],
'failed_open': False}
if self._IsExempt(reporter, is_project_member):
return result
if not self.prediction_service:
logging.error("prediction_service not initialized.")
return result
features = spam_helpers.GenerateFeatures(issue.summary,
firstComment.content, settings.spam_feature_hashes)
remaining_retries = 3
while remaining_retries > 0:
try:
result = self._predict(
{
'input': {
'csvInstance': features,
}
}
)
result['failed_open'] = False
return result
except Exception as ex:
remaining_retries = remaining_retries - 1
self.prediction_api_failures.increment()
logging.error('Error calling prediction API: %s' % ex)
result['failed_open'] = True
return result
def ClassifyComment(self, comment_content, commenter, is_project_member=True):
"""Classify a comment as either spam or ham.
Args:
comment: the comment text.
commenter: User PB for the user who authored the comment.
Returns a JSON dict of classifier prediction results from
the Cloud Prediction API.
"""
# Fail-safe: not spam.
result = {'outputLabel': 'ham',
'outputMulti': [{'label':'ham', 'score': '1.0'}],
'failed_open': False}
if self._IsExempt(commenter, is_project_member):
return result
if not self.prediction_service:
logging.error("prediction_service not initialized.")
self.prediction_api_failures.increment()
result['failed_open'] = True
return result
features = spam_helpers.GenerateFeatures('', comment_content,
settings.spam_feature_hashes)
remaining_retries = 3
while remaining_retries > 0:
try:
result = self._predict(
{
'input': {
'csvInstance': features,
}
}
)
result['failed_open'] = False
return result
except Exception as ex:
remaining_retries = remaining_retries - 1
self.prediction_api_failures.increment()
logging.error('Error calling prediction API: %s' % ex)
result['failed_open'] = True
return result
def GetIssueClassifierQueue(
self, cnxn, _issue_service, project_id, offset=0, limit=10):
"""Returns list of recent issues with spam verdicts,
ranked in ascending order of confidence (so uncertain items are first).
"""
# TODO(seanmccullough): Optimize pagination. This query probably gets
# slower as the number of SpamVerdicts grows, regardless of offset
# and limit values used here. Using offset,limit in general may not
# be the best way to do this.
issue_results = self.verdict_tbl.Select(cnxn,
cols=['issue_id', 'is_spam', 'reason', 'classifier_confidence',
'created'],
where=[
('project_id = %s', | |
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="instanceInitiatedShutdownBehavior")
def instance_initiated_shutdown_behavior(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_initiated_shutdown_behavior")
@instance_initiated_shutdown_behavior.setter
def instance_initiated_shutdown_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_initiated_shutdown_behavior", value)
@property
@pulumi.getter(name="instanceMarketOptions")
def instance_market_options(self) -> Optional[pulumi.Input['LaunchTemplateInstanceMarketOptionsArgs']]:
return pulumi.get(self, "instance_market_options")
@instance_market_options.setter
def instance_market_options(self, value: Optional[pulumi.Input['LaunchTemplateInstanceMarketOptionsArgs']]):
pulumi.set(self, "instance_market_options", value)
@property
@pulumi.getter(name="instanceRequirements")
def instance_requirements(self) -> Optional[pulumi.Input['LaunchTemplateInstanceRequirementsArgs']]:
return pulumi.get(self, "instance_requirements")
@instance_requirements.setter
def instance_requirements(self, value: Optional[pulumi.Input['LaunchTemplateInstanceRequirementsArgs']]):
pulumi.set(self, "instance_requirements", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="kernelId")
def kernel_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kernel_id")
@kernel_id.setter
def kernel_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kernel_id", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="licenseSpecifications")
def license_specifications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateLicenseSpecificationArgs']]]]:
return pulumi.get(self, "license_specifications")
@license_specifications.setter
def license_specifications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateLicenseSpecificationArgs']]]]):
pulumi.set(self, "license_specifications", value)
@property
@pulumi.getter(name="maintenanceOptions")
def maintenance_options(self) -> Optional[pulumi.Input['LaunchTemplateMaintenanceOptionsArgs']]:
return pulumi.get(self, "maintenance_options")
@maintenance_options.setter
def maintenance_options(self, value: Optional[pulumi.Input['LaunchTemplateMaintenanceOptionsArgs']]):
pulumi.set(self, "maintenance_options", value)
@property
@pulumi.getter(name="metadataOptions")
def metadata_options(self) -> Optional[pulumi.Input['LaunchTemplateMetadataOptionsArgs']]:
return pulumi.get(self, "metadata_options")
@metadata_options.setter
def metadata_options(self, value: Optional[pulumi.Input['LaunchTemplateMetadataOptionsArgs']]):
pulumi.set(self, "metadata_options", value)
@property
@pulumi.getter
def monitoring(self) -> Optional[pulumi.Input['LaunchTemplateMonitoringArgs']]:
return pulumi.get(self, "monitoring")
@monitoring.setter
def monitoring(self, value: Optional[pulumi.Input['LaunchTemplateMonitoringArgs']]):
pulumi.set(self, "monitoring", value)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateNetworkInterfaceArgs']]]]:
return pulumi.get(self, "network_interfaces")
@network_interfaces.setter
def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateNetworkInterfaceArgs']]]]):
pulumi.set(self, "network_interfaces", value)
@property
@pulumi.getter
def placement(self) -> Optional[pulumi.Input['LaunchTemplatePlacementArgs']]:
return pulumi.get(self, "placement")
@placement.setter
def placement(self, value: Optional[pulumi.Input['LaunchTemplatePlacementArgs']]):
pulumi.set(self, "placement", value)
@property
@pulumi.getter(name="privateDnsNameOptions")
def private_dns_name_options(self) -> Optional[pulumi.Input['LaunchTemplatePrivateDnsNameOptionsArgs']]:
return pulumi.get(self, "private_dns_name_options")
@private_dns_name_options.setter
def private_dns_name_options(self, value: Optional[pulumi.Input['LaunchTemplatePrivateDnsNameOptionsArgs']]):
pulumi.set(self, "private_dns_name_options", value)
@property
@pulumi.getter(name="ramDiskId")
def ram_disk_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ram_disk_id")
@ram_disk_id.setter
def ram_disk_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ram_disk_id", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_groups")
@security_groups.setter
def security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_groups", value)
@property
@pulumi.getter(name="tagSpecifications")
def tag_specifications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateTagSpecificationArgs']]]]:
return pulumi.get(self, "tag_specifications")
@tag_specifications.setter
def tag_specifications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LaunchTemplateTagSpecificationArgs']]]]):
pulumi.set(self, "tag_specifications", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@pulumi.input_type
class LaunchTemplateEbsArgs:
def __init__(__self__, *,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
throughput: Optional[pulumi.Input[int]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input[str]] = None):
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class LaunchTemplateElasticGpuSpecificationArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[str]] = None):
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class LaunchTemplateElasticInferenceAcceleratorArgs:
def __init__(__self__, *,
count: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if count is not None:
pulumi.set(__self__, "count", count)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class LaunchTemplateEnclaveOptionsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class LaunchTemplateHibernationOptionsArgs:
def __init__(__self__, *,
configured: Optional[pulumi.Input[bool]] = None):
if configured is not None:
pulumi.set(__self__, "configured", configured)
@property
@pulumi.getter
def configured(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "configured")
@configured.setter
def configured(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "configured", value)
@pulumi.input_type
class LaunchTemplateIamInstanceProfileArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class LaunchTemplateInstanceMarketOptionsArgs:
def __init__(__self__, *,
market_type: Optional[pulumi.Input[str]] = None,
spot_options: Optional[pulumi.Input['LaunchTemplateSpotOptionsArgs']] = None):
if market_type is not None:
pulumi.set(__self__, "market_type", market_type)
if spot_options is not None:
pulumi.set(__self__, "spot_options", spot_options)
@property
@pulumi.getter(name="marketType")
def market_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market_type")
@market_type.setter
def market_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market_type", value)
@property
@pulumi.getter(name="spotOptions")
def spot_options(self) -> Optional[pulumi.Input['LaunchTemplateSpotOptionsArgs']]:
return pulumi.get(self, "spot_options")
@spot_options.setter
def spot_options(self, value: Optional[pulumi.Input['LaunchTemplateSpotOptionsArgs']]):
pulumi.set(self, "spot_options", value)
@pulumi.input_type
class LaunchTemplateInstanceRequirementsArgs:
def __init__(__self__, *,
accelerator_count: Optional[pulumi.Input['LaunchTemplateAcceleratorCountArgs']] = None,
accelerator_manufacturers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
accelerator_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
accelerator_total_memory_mi_b: Optional[pulumi.Input['LaunchTemplateAcceleratorTotalMemoryMiBArgs']] = None,
accelerator_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bare_metal: Optional[pulumi.Input[str]] = None,
baseline_ebs_bandwidth_mbps: Optional[pulumi.Input['LaunchTemplateBaselineEbsBandwidthMbpsArgs']] = None,
burstable_performance: Optional[pulumi.Input[str]] = None,
cpu_manufacturers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_generations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
local_storage: Optional[pulumi.Input[str]] = None,
local_storage_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
memory_gi_b_per_v_cpu: Optional[pulumi.Input['LaunchTemplateMemoryGiBPerVCpuArgs']] = None,
memory_mi_b: Optional[pulumi.Input['LaunchTemplateMemoryMiBArgs']] = None,
network_interface_count: Optional[pulumi.Input['LaunchTemplateNetworkInterfaceCountArgs']] = None,
on_demand_max_price_percentage_over_lowest_price: Optional[pulumi.Input[int]] = None,
require_hibernate_support: Optional[pulumi.Input[bool]] = None,
spot_max_price_percentage_over_lowest_price: Optional[pulumi.Input[int]] = None,
total_local_storage_gb: Optional[pulumi.Input['LaunchTemplateTotalLocalStorageGBArgs']] = None,
v_cpu_count: Optional[pulumi.Input['LaunchTemplateVCpuCountArgs']] = None):
if accelerator_count is not None:
pulumi.set(__self__, "accelerator_count", accelerator_count)
if accelerator_manufacturers is not None:
pulumi.set(__self__, "accelerator_manufacturers", accelerator_manufacturers)
if accelerator_names is not None:
pulumi.set(__self__, "accelerator_names", accelerator_names)
if accelerator_total_memory_mi_b is not None:
pulumi.set(__self__, "accelerator_total_memory_mi_b", accelerator_total_memory_mi_b)
if accelerator_types is not None:
pulumi.set(__self__, "accelerator_types", accelerator_types)
if bare_metal is not None:
pulumi.set(__self__, "bare_metal", bare_metal)
if baseline_ebs_bandwidth_mbps is not None:
pulumi.set(__self__, "baseline_ebs_bandwidth_mbps", baseline_ebs_bandwidth_mbps)
if burstable_performance is not None:
pulumi.set(__self__, "burstable_performance", burstable_performance)
if cpu_manufacturers is not None:
pulumi.set(__self__, "cpu_manufacturers", cpu_manufacturers)
if excluded_instance_types is not None:
pulumi.set(__self__, "excluded_instance_types", excluded_instance_types)
if instance_generations is not None:
pulumi.set(__self__, "instance_generations", instance_generations)
if local_storage is not None:
pulumi.set(__self__, "local_storage", local_storage)
if local_storage_types is not None:
pulumi.set(__self__, "local_storage_types", local_storage_types)
if memory_gi_b_per_v_cpu is not None:
pulumi.set(__self__, "memory_gi_b_per_v_cpu", memory_gi_b_per_v_cpu)
if memory_mi_b is not None:
pulumi.set(__self__, "memory_mi_b", memory_mi_b)
if network_interface_count is not None:
pulumi.set(__self__, "network_interface_count", network_interface_count)
if on_demand_max_price_percentage_over_lowest_price is not None:
pulumi.set(__self__, "on_demand_max_price_percentage_over_lowest_price", on_demand_max_price_percentage_over_lowest_price)
if require_hibernate_support is not None:
pulumi.set(__self__, "require_hibernate_support", require_hibernate_support)
if spot_max_price_percentage_over_lowest_price is not None:
pulumi.set(__self__, "spot_max_price_percentage_over_lowest_price", spot_max_price_percentage_over_lowest_price)
if total_local_storage_gb is not None:
pulumi.set(__self__, "total_local_storage_gb", total_local_storage_gb)
if v_cpu_count is not None:
pulumi.set(__self__, "v_cpu_count", v_cpu_count)
@property
@pulumi.getter(name="acceleratorCount")
def accelerator_count(self) -> Optional[pulumi.Input['LaunchTemplateAcceleratorCountArgs']]:
return pulumi.get(self, "accelerator_count")
@accelerator_count.setter
def accelerator_count(self, value: Optional[pulumi.Input['LaunchTemplateAcceleratorCountArgs']]):
pulumi.set(self, "accelerator_count", value)
@property
@pulumi.getter(name="acceleratorManufacturers")
def accelerator_manufacturers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "accelerator_manufacturers")
@accelerator_manufacturers.setter
def accelerator_manufacturers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "accelerator_manufacturers", value)
@property
@pulumi.getter(name="acceleratorNames")
def accelerator_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "accelerator_names")
@accelerator_names.setter
def accelerator_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "accelerator_names", value)
@property
@pulumi.getter(name="acceleratorTotalMemoryMiB")
def accelerator_total_memory_mi_b(self) -> Optional[pulumi.Input['LaunchTemplateAcceleratorTotalMemoryMiBArgs']]:
return pulumi.get(self, "accelerator_total_memory_mi_b")
@accelerator_total_memory_mi_b.setter
def accelerator_total_memory_mi_b(self, value: Optional[pulumi.Input['LaunchTemplateAcceleratorTotalMemoryMiBArgs']]):
pulumi.set(self, "accelerator_total_memory_mi_b", value)
@property
@pulumi.getter(name="acceleratorTypes")
def accelerator_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "accelerator_types")
@accelerator_types.setter
def accelerator_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "accelerator_types", value)
@property
@pulumi.getter(name="bareMetal")
def bare_metal(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bare_metal")
@bare_metal.setter
def bare_metal(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bare_metal", value)
@property
@pulumi.getter(name="baselineEbsBandwidthMbps")
def baseline_ebs_bandwidth_mbps(self) -> Optional[pulumi.Input['LaunchTemplateBaselineEbsBandwidthMbpsArgs']]:
return pulumi.get(self, "baseline_ebs_bandwidth_mbps")
@baseline_ebs_bandwidth_mbps.setter
def baseline_ebs_bandwidth_mbps(self, value: Optional[pulumi.Input['LaunchTemplateBaselineEbsBandwidthMbpsArgs']]):
pulumi.set(self, "baseline_ebs_bandwidth_mbps", value)
@property
@pulumi.getter(name="burstablePerformance")
def burstable_performance(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "burstable_performance")
@burstable_performance.setter
def burstable_performance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "burstable_performance", value)
@property
@pulumi.getter(name="cpuManufacturers")
def cpu_manufacturers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "cpu_manufacturers")
@cpu_manufacturers.setter
def cpu_manufacturers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cpu_manufacturers", value)
@property
@pulumi.getter(name="excludedInstanceTypes")
def excluded_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "excluded_instance_types")
@excluded_instance_types.setter
def excluded_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_instance_types", value)
@property
@pulumi.getter(name="instanceGenerations")
def instance_generations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "instance_generations")
@instance_generations.setter
def instance_generations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_generations", value)
@property
@pulumi.getter(name="localStorage")
def local_storage(self) -> Optional[pulumi.Input[str]]:
return | |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018-2021 <NAME>, Typee project, http://www.typee.ovh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from FrontEnd.IntermediateCode.fe_icode_token_node import *
from FrontEnd.IntermediateCode.fe_tokenized_icode import FETokenizedICode
from FrontEnd.IntermediateCode.fe_icblock import FEICBlock
from FrontEnd.IntermediateCode.fe_icleaf import FEICLeaf
from FrontEnd.IntermediateCode.fe_ictree import FEICTree
from FrontEnd.Errors.fe_syntax_errors import FESyntaxErrors
#=============================================================================
class FEParser:
"""
This is the class of the Typee Front-End Parser.
It is the second stage of the front-end pipeline of the Typee
translator.
It parses Typee tokenized Intermediate Code as generated by the
Front-End Scanner and syntaxically parses it to produce Typee
Syntaxic Intermediate Code that will be then analyzed by the
Typee Front-End Elaborator.
Currently, implements version 10 of Typee Grammar Specification.
"""
#-------------------------------------------------------------------------
def __init__(self, tokenized_intermediate_code: FETokenizedICode = None) -> None:
'''
Constructor.
Args:
tokenized_intermediate_code:FETokenizedICode
A reference to the Front-End intermediate code to be parsed.
'''
self._tokenizedIC = tokenized_intermediate_code
#-------------------------------------------------------------------------
def parse(self, tokenized_intermediate_code: FETokenizedICode = None) -> tuple(FEICTree,int):
'''
Parses some Front-End Intermediate Code and generates the related
Front-End Syntaxic Code.
Args:
tokenized_intermediate_code: FETokenizedICode
A reference to the Front-End intermediate code to be parsed.
If None, the intermediate code passed at construction time
will be used.
Returns:
The corresponding Syntaxic Code as generated by this parser,
associated with the detected errors count.
Raises:
AssertError: no intermediate code has been passed for parsing,
either at construction time or at parsing time.
'''
self._tokenizedIC = tokenized_intermediate_code or self._tokenizedIC
assert self._tokenizedIC is not None
return self._parse_code_file()
#=========================================================================
#-------------------------------------------------------------------------
def _abstract_or_final_qualif(self) -> bool:
#=======================================================================
# <abstract or final qualif> ::= <abstract qualifier>
# | EPS
#=======================================================================
if self._current.is_ABSTRACT() or self._current.is_FINAL():
self._append_syntaxic_node()
self._next_token_node()
return True
#-------------------------------------------------------------------------
def _access_protection_statement(self) -> bool:
#=======================================================================
# <access protection statement> ::= ':' <access qualifier> ':'
#=======================================================================
if self._current.is_COLON():
self._append_syntaxic_node()
self._next_token_node()
if not self._access_qualifier():
self._append_error( FESyntaxErrors.ACCESS_QUALIFIER )
if self._current.is_COLON():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.ACCESS_END )
return True
else:
return False
#-------------------------------------------------------------------------
def _access_qualifier(self) -> bool:
#=======================================================================
# <protection qualifier> ::= 'hidden'
# | 'local'
# | 'private'
# | 'protected'
# | 'public'
#=======================================================================
if self._current.is_HIDDEN() or self._current.is_PROTECTED() or self._current.is_PUBLIC(): ## (notice: previously scanned by Scanner)
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return False
#-------------------------------------------------------------------------
def _and_test(self) -> bool:
#=======================================================================
# <and test> ::= <not test> <and test'>
#=======================================================================
return self._not_test() and self._and_test1()
#-------------------------------------------------------------------------
def _and_test1(self) -> bool:
#=======================================================================
# <and test'> ::= 'and' <not test>
# | EPS
#=======================================================================
if self._current.is_AND():
self._append_syntaxic_node()
self._next_token_node()
self._not_test()
return True
#-------------------------------------------------------------------------
def _arithmetic_expr(self) -> bool:
#=======================================================================
# <arithmetic expr> ::= <term> <arithmetic expr'>
#=======================================================================
return self._term() and self._arithmetic_expr1()
#-------------------------------------------------------------------------
def _arithmetic_expr1(self) -> bool:
#=======================================================================
# <artihmetic expr'> ::= '+' <template args> <term> <arithmetic expr'>
# | '-' <template args> <term> <arithmetic expr'>
# | EPS
#=======================================================================
while self._current.is_MINUS() or self._current.is_PLUS():
self._append_syntaxic_node()
self._next_token_node()
self._template_args() ## (notice: always returns True)
if not self._term():
self._append_error( FESyntaxErrors.ARITHM_EXPR )
return True
#-------------------------------------------------------------------------
def _array_type(self) -> bool:
#=======================================================================
# <array type> ::= "array" <declared contained type>
#=======================================================================
if self._current.is_ARRAY():
self._append_syntaxic_node()
self._next_token_node()
if not self._declared_contained_type():
self._append_error( FESyntaxErrors.ARRAY_CONTAINED_TYPE )
return True
else:
return False
#-------------------------------------------------------------------------
def _assert_statement(self) -> bool:
#===============================================================================
# <assert statement> ::= 'assert' <expression> <assert statement'>
#===============================================================================
if self._current.is_ASSERT():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.ASSERT_EXPR )
self._assert_statement1()
return True
else:
return False
#-------------------------------------------------------------------------
def _assert_statement1(self) -> bool:
#=======================================================================
# <assert statement'> ::= ',' <expression>
# | EPS
#=======================================================================
if self._current.is_COMMA():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.ASSERT_COMMA_EXPR )
return True
#-------------------------------------------------------------------------
def _assign_decl_def_funccall_statement(self) -> bool:
#=======================================================================
# <assign decl def func-call statement> ::= <access qualifier> <decl or def statement>
# | <decl or def statement>
# | <dotted name> <assign or func-call statement> <simple statement end>
#=======================================================================
if self._access_qualifier():
if not self._decl_or_def_statement():
self._append_error( FESyntaxErrors.PROTECTION_DECL_DEF )
return True
elif self._decl_or_def_statement():
return True
elif self._dotted_name():
if not self._assign_or_funccall_statement():
self._append_error( FESyntaxErrors.ASSIGN_FUNC_CALL )
if not self._simple_statement_end():
self._append_error( FESyntaxErrors.STATEMENT_END )
return True
else:
return False
#-------------------------------------------------------------------------
def _assign_op(self) -> bool:
#=======================================================================
# <assign op> ::= '='
# | <augmented assign op>
#=======================================================================
if self._current.is_ASSIGN():
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return self._augmented_assign_op()
#-------------------------------------------------------------------------
def _assign_or_funccall_statement(self) -> bool:
#=======================================================================
# <assign or func-call statement> ::= <target list'> <assignment statement>
# | <function call>
#=======================================================================
if self._target_list1():
if not self._assignment_statement():
self._append_error( FESyntaxErrors.ASSIGN_OPERATOR )
return True
elif self._function_call():
return True
else:
return False
#-------------------------------------------------------------------------
def _assignment_statement(self) -> bool:
#=======================================================================
# <assignment statement> ::= <assign op> <expr list>
#=======================================================================
if self._assign_op():
if not self._expr_list():
self._append_error( FESyntaxErrors.ASSIGN_EXPR )
return True
else:
return False
#-------------------------------------------------------------------------
def _atom(self) -> bool:
#=======================================================================
# <atom> ::= <decr> <dotted name> <incr or decr>
# | <incr> <dotted name> <incr or decr>
# | <enclosure>
# | <reference>
# | <scalar>
# | <string>
# | <boolean>
#=======================================================================
if self._decr():
if not self._dotted_name():
self._append_error( FESyntaxErrors.DECR_IDENT )
return self._incr_or_decr()
elif self._incr():
if not self._dotted_name():
self._append_error( FESyntaxErrors.INCR_IDENT )
return self._incr_or_decr()
else:
return self._enclosure() or \
self._reference() or \
self._scalar() or \
self._string() or \
self._boolean()
#-------------------------------------------------------------------------
def atom1(self) -> bool:
#=======================================================================
# <atom'> ::= <incr or decr>
# | <for comprehension>
# | '??' <expression> <atom''>
#=======================================================================
if self._current.is_OP_2QUEST():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.OP_2QUEST_EXPR )
return self._atom2()
else:
return self._for_comprehension() or self._incr_or_decr()
## CAUTION: this order of calls is MANDATORY
#-------------------------------------------------------------------------
def atom2(self) -> bool:
#=======================================================================
# <atom''> ::= '??' <expression> <atom''>
# | EPS
#=======================================================================
while self._current.is_OP_2QUEST():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.OP_2QUEST_EXPR )
return True
#-------------------------------------------------------------------------
def _atom_element(self) -> bool:
#===============================================================================
# <atom element> ::= <atom>
# | <dotted name> <atom element'>
# | <const qualifier> <atom element''>
# | <atom element''>
#===============================================================================
if self._atom():
return True
elif self._dotted_name():
return self._atom_element1()
elif self._const_qualifier():
if not self._atom_element2():
self._append_error( FESyntaxErrors.SCALAR_TYPE )
return True
elif self._atom_element2():
return True
else:
return False
#-------------------------------------------------------------------------
def _atom_element1(self) -> bool:
#=======================================================================
# <atom element'> ::= <atom'>
# | <atom element'''>
#=======================================================================
return self._atom1() or self._atom_element3()
#-------------------------------------------------------------------------
def _atom_element2(self) -> bool:
#=======================================================================
# <atom element''> ::= <scalar type'> <scalar type casting>
#=======================================================================
if self._scalar_type1():
if not self._type_casting():
self._append_error( FESyntaxErrors.CASTING_PAROP )
return True
else:
return False
#-------------------------------------------------------------------------
def _atom_element3(self) -> bool:
#=======================================================================
# <atom element'''> ::= <function call> <atom element''''>
# | <is instance of>
# | <subscription or slicing> <atom element''''>
# | EPS
#=======================================================================
while self._function_call() or self._subscription_or_slicing():
continue
if self._is_instance_of():
return True
else:
return True
#-------------------------------------------------------------------------
def _augmented_assign_op(self) -> bool:
#===============================================================================
# <augmented assign op> ::= '+='
# | '-='
# | '*='
# | '/='
# | '%='
# | '&='
# | '|='
# | '^='
# | '<<='
# | '<<<='
# | '>>='
# | '>>>='
# | '**='
# | '^^='
# | '@='
# | '@@='
# | '><='
# | '<>='
# | '!!='
# | '::='
# | '??='
#===============================================================================
if isinstance( self._current, (ICTokenNode_AUG_2AROB,
ICTokenNode_AUG_2COLN,
ICTokenNode_AUG_2EXCL,
ICTokenNode_AUG_2QUEST,
ICTokenNode_AUG_AROBASE,
ICTokenNode_AUG_BITAND,
ICTokenNode_AUG_BITOR,
ICTokenNode_AUG_BITXOR,
ICTokenNode_AUG_DIV,
ICTokenNode_AUG_GRLE,
ICTokenNode_AUG_LEGR,
ICTokenNode_AUG_MINUS,
ICTokenNode_AUG_MOD,
ICTokenNode_AUG_MUL,
ICTokenNode_AUG_PLUS,
ICTokenNode_AUG_POWER,
ICTokenNode_AUG_SHIFT0L,
ICTokenNode_AUG_SHIFT0R,
ICTokenNode_AUG_SHIFTL,
ICTokenNode_AUG_SHIFTR) ):
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return False
#-------------------------------------------------------------------------
def _auto_type(self) -> bool:
#=======================================================================
# <auto type> ::= '?' <auto type'>
#=======================================================================
if self._current.is_ANY_TYPE():
self._append_syntaxic_node()
self._next_token_node()
self._auto_type1() ## (notice: always returns True)
return True
else:
return False
#-------------------------------------------------------------------------
def _auto_type1(self) -> | |
self._AR_X_final_power.setText("<b style='color: red'>INT!<b>")
self._AR_XY_final_power.setText("<b style='color: red'>INT!<b>")
else:
self._AR_X_intervals.setStyleSheet("background-color: none; color: #000;")
if y_isPow and not (y_incr.is_integer()):
# y_power error
_got_error = True
self._AR_Y_intervals.setStyleSheet("background-color: #DF2928; color: #fff;")
self._AR_Y_final_power.setText("<b style='color: red'>INT!<b>")
self._AR_XY_final_power.setText("<b style='color: red'>INT!<b>")
else:
self._AR_Y_intervals.setStyleSheet("background-color: none; color: #000;")
# RASTER SETTINGS
self._AR_raster_style.setStyleSheet("background-color: none; color: #000;")
if step_along_x and step_along_y:
self._AR_raster_style.setText("Unfilled {}\nDrawing an outline".format(indiv_type))
self._AR_step_size.setReadOnly(True)
self._AR_step_size.setStyleSheet("background-color: #ccc; color: #555;")
elif step_along_x:
self._AR_raster_style.setText("Filled {}\nContinuous along x-axis".format(indiv_type))
self._AR_step_size.setReadOnly(False)
self._AR_step_size.setStyleSheet("background-color: none; color: #000;")
elif step_along_y:
self._AR_raster_style.setText("Filled {}\nContinuous along y-axis".format(indiv_type))
self._AR_step_size.setReadOnly(False)
self._AR_step_size.setStyleSheet("background-color: none; color: #000;")
else:
_got_error = True
self._AR_raster_style.setText("No axis selected\nChoose at least one axis")
self._AR_raster_style.setStyleSheet("background-color: #DF2928; color: #fff;")
self._AR_step_size.setReadOnly(False)
self._AR_step_size.setStyleSheet("background-color: none; color: #000;")
except Exception as e:
# We assume the user is not done entering the data
self.logconsole("{}: {}".format(type(e).__name__, e))
self._AR_Y_final_settings.setStyleSheet("background-color: none; color: #000;")
self._AR_Y_final_settings.setStyleSheet("background-color: none; color: #000;")
self._AR_XY_final_settings.setStyleSheet("background-color: none; color: #000;")
self._AR_rows.setStyleSheet("background-color: none; color: #000;")
self._AR_rows.setStyleSheet("background-color: none; color: #000;")
self._AR_size_x.setStyleSheet("background-color: none; color: #000;")
self._AR_size_y.setStyleSheet("background-color: none; color: #000;")
self._AR_summary_text.setText("-")
self._AR_raster_style.setText("-\n")
self._AR_raster_style.setStyleSheet("background-color: none; color: #000;")
self._AR_X_final_velocity.setText("-")
self._AR_Y_final_velocity.setText("-")
self._AR_XY_final_velocity.setText("-")
self._AR_X_final_power.setText("-")
self._AR_Y_final_power.setText("-")
self._AR_XY_final_power.setText("-")
# Check if the values are even valid
# Change background if necessary
else:
# There are no errors, and we check if startRaster
if startRaster and not _got_error:
# JUMP TO DEF
# def arrayraster(self, inivel, inipower, x_isVel, ncols, xincrement, xGap, y_isVel, nrows, yincrement, ygap, xDist, yDist, rasterSettings, returnToOrigin = True):
# Raster in a rectangle
# rasterSettings = {
# "direction": "x" || "y" || "xy", # Order matters here xy vs yx
# "step": 1 # If set to xy, step is not necessary
# }
if not step_along_x and not step_along_y:
self.setOperationStatus("Step-axis not selected!")
return
rsSettingsDir = ""
rsSettingsDir += "x" if step_along_x else ""
rsSettingsDir += "y" if step_along_y else ""
if step_along_x and step_along_y:
rsSettings = { "direction" : rsSettingsDir }
else:
rsSettings = { "direction" : rsSettingsDir, "step": step_size }
self.setStartButtonsEnabled(False)
self.setOperationStatus("Starting Array Raster...")
ar_thread = ThreadWithExc(target = self._arrayraster, kwargs = dict(
xDist = size[1], yDist = size[0],
xGap = x_spac, yGap = y_spac,
nrows = y_rows, ncols = x_cols,
inivel = vel_0, inipower = pow_0,
x_isVel = x_isPow ^ 1, y_isVel = y_isPow ^ 1,
xincrement = x_incr, yincrement = y_incr,
rasterSettings = rsSettings,
returnToOrigin = returnToOrigin
))
ar_thread.start()
elif startRaster:
# Alert got error
self.criticalDialog(message = "Error in array raster settings.\nPlease check again!", host = self)
def _arrayraster(self, **kwargs):
try:
self.stageControl.arrayraster(**kwargs)
except Exception as e:
self.setOperationStatus("Error Occurred. {}".format(e))
if self.devMode:
raise
else:
self.setOperationStatus("Ready.")
finally:
self.setStartButtonsEnabled(True)
# Draw Picture
def _DP_getFile(self):
# THIS IS PURELY FOR GUI CHOOSE FILE
# Attempt to get filename from QLineEdit
filename = self._DP_picture_fn.text()
if os.path.isfile(filename):
filename = os.path.abspath(os.path.realpath(os.path.expanduser(filename)))
dirname = os.path.dirname(filename)
else:
dirname = base_dir # base_dir of this file
self._DP_FileDialog = QtWidgets.QFileDialog(self, "Open Image", dirname, "1-Bit Bitmap Files (*.bmp)")
self._DP_FileDialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)
self._DP_FileDialog.setViewMode(QtWidgets.QFileDialog.Detail)
firstRun = True
isValid = False
error = None
while not os.path.isfile(filename) or not isValid:
if not firstRun:
self.criticalDialog(message = "You have selected an invalid file", informativeText = "E: {}".format(error), title = "Invalid File", host = self)
firstRun = False
if (self._DP_FileDialog.exec_()):
filename = self._DP_FileDialog.selectedFiles()[0]
filename = os.path.abspath(os.path.realpath(os.path.expanduser(filename)))
# Running this before loadPicture is so that windows gives the same file path url
else:
return
# else (i.e. the user cancelled) we just ignore and act as though nothing happened
# We run some checks here
try:
isValid = self._DP_runChecks(filename)
except picConv.ImageError as e:
isValid = False
error = e
if isValid:
self._DP_picture_fn.setText(filename)
def _DP_runChecks(self, filename):
# Checks if the file exists and is valid
# Raises picConv.ImageError if there are any errors
if not os.path.isfile(filename):
raise picConv.ImageError("Path is not file!")
try:
image = PILImage.open(filename)
except Exception as e:
raise picConv.ImageError("({}): {}".format(type(e).__name__, e))
if image.format != "BMP":
raise picConv.ImageError("The selected image is not a valid .bmp file!")
if image.mode != '1':
raise picConv.ImageError("Your image has mode {}. Please use a 1-bit indexed (mode 1) image, see <a href='https://pillow.readthedocs.io/en/stable/handbook/concepts.html#bands'>https://pillow.readthedocs.io/en/stable/handbook/concepts.html#bands</a>. If using GIMP to convert picture to 1-bit index, ensure 'remove colour from palette' is unchecked. ".format(image.mode))
# Check size purely based on image and stage size
# Does not take into account the scaling factor yet
xlim = self.stageControl.controller.stage.xlim
ylim = self.stageControl.controller.stage.ylim
if image.size[0] > abs(xlim[1] - xlim[0]) or image.size[1] > abs(ylim[1] - ylim[0]):
raise picConv.ImageError("Image way too big ._., exceeds stage limits")
return True
def _DP_loadPictureIntoPreviewer(self, filename):
self._DP_picture_preview_pic = QtGui.QPixmap()
if self._DP_picture_preview_pic.load(filename):
self._DP_picture_preview_pic = self._DP_picture_preview_pic.scaled(self._DP_picture_preview.size(), QtCore.Qt.KeepAspectRatio)
self._DP_picture_preview.setPixmap(self._DP_picture_preview_pic)
return DoneObject()
else:
return self.criticalDialog(message = "Unable to load the selected file into preview", title = "Unable to load file", host = self)
def _DP_loadPicture(self):
# run tests again
filename = self._DP_picture_fn.text()
try:
filename = os.path.abspath(os.path.realpath(os.path.expanduser(filename)))
isValid = self._DP_runChecks(filename)
except picConv.ImageError as e:
return self.criticalDialog(message = "You have selected an invalid file", informativeText = "E: {}".format(e), title = "Invalid File", host = self)
# Load picture into previewer
x = self._DP_loadPictureIntoPreviewer(filename)
if isinstance(x, DoneObject):
# save the filename if everything passes
self._DP_filename_string = filename
self._DP_optionsChanged()
def _DP_filenameLineEditChanged(self):
self._DP_picture_load.setStyleSheet("background-color: #FF8800;")
def _DP_optionsChanged(self):
if hasattr(self, "_DP_filename_string"):
self._DP_optionsChangedFlag = True
self._DP_picture_load.setStyleSheet("")
self._DP_picture_parse.setStyleSheet("background-color: #FF8800;")
def _DP_parsePicture(self):
# _DP_loadPicture has to be run first to do sanity checks
if not hasattr(self, "_DP_filename_string") or self._DP_filename_string is None or not len(self._DP_filename_string):
return self.criticalDialog(message = "Image not loaded!", informativeText = "Please load the image first before parsing. Filename not captured", title = "Image not loaded!", host = self)
# Differs from Line edit, prompt to let user know
lefn = self._DP_picture_fn.text()
if self._DP_filename_string != lefn:
ret = self.unsavedQuestionDialog(message = "Load new filenames?", title = "Differing filenames",informativeText = "Registered filename differs from the input filename:\n\nREG:{}\nENT:{}\n\nSave to proceed.\nDiscard/Cancel to go back".format(self._DP_filename_string, lefn), host = self)
if ret == QtWidgets.QMessageBox.Save:
self._DP_loadPicture()
else:
return
# Change colour of the Parse button
self._DP_picture_parse.setStyleSheet("")
# Get Options
try:
xscale = float(self._DP_xscale.text())
yscale = float(self._DP_yscale.text())
cutMode = self._DP_cutMode.currentIndex()
allowDiagonals = not not self._DP_allowDiagonals.checkState()
flipVertically = not not self._DP_flipVertically.checkState()
flipHorizontally = not not self._DP_flipHorizontally.checkState()
prioritizeLeft = not not self._DP_prioritizeLeft.checkState()
except Exception as e:
return self.criticalDialog(message = "Invalid values", informativeText = "Please double check your parameters.", host = self)
# TODO: create the picConv object
# def __init__(self, filename, xscale = 1, yscale = 1, cut = 0, allowDiagonals = False, prioritizeLeft = False, flipHorizontally = False, flipVertically = False ,frames = False, simulate = False, simulateDrawing = False, micronInstance = None, shutterTime = 800):
self.picConv = picConv.PicConv(
filename = self._DP_filename_string,
xscale = xscale,
yscale = yscale,
cut = cutMode,
allowDiagonals = allowDiagonals,
prioritizeLeft = prioritizeLeft,
flipHorizontally = flipHorizontally,
flipVertically = flipVertically,
shutterTime = self.stageControl.controller.shutter.duration * 2,
micronInstance = self.stageControl.controller,
GUI_Object = self
)
# def progressDialog(self, host = None, title = "Progress", labelText = None, cancelButtonText = "Cancel", range = (0, 100)):
# Should be regenerated every time
if hasattr(self, "pDialog"):
del self.pDialog
self.pDialog = self.progressDialog(
host = self,
title = "PicConv",
labelText = "Converting Picture",
)
self.picConv_conversion_thread = ThreadWithExc(target = self._DP_ConvertParseLines)
self.picConv_conversion_thread.start()
self.pDialog.exec_()
if hasattr(self, "pDialog"):
del self.pDialog
def _DP_ConvertParseLines(self):
def cancelOperation(self):
return self.pDialog.close() if hasattr(self, "pDialog") else None
self.pDialog.canceled.connect(lambda: cancelOperation(self))
self.pDialog.setValue(0)
# Redirect output through pDialog.setLabelText()
try:
self.picConv.convert()
except Exception as e:
# The error should have been emitted already
self.logconsole("{}: {}".format(type(e).__name__, e))
return cancelOperation(self)
# Show test.png
try:
self._DP_loadPictureIntoPreviewer("picconv_test.png")
except Exception as e:
self.logconsole("{}: {}".format(type(e).__name__, e))
while datetime.datetime.now() < self.lastPDialogUpdate + datetime.timedelta(seconds = self.PDIALOG_TIMEOUT):
time.sleep(self.PDIALOG_TIMEOUT)
self.pDialog.setWindowTitle("Parsing Lines")
self.pDialog_setLabelText("Parsing Lines")
try:
self.picConv.parseLines()
except Exception as e:
# The error should have been emitted already
self.logconsole("{}: {}".format(type(e).__name__, e))
return cancelOperation(self)
# Change Colour of Draw
self._DP_picture_draw.setStyleSheet("background-color: #FF8800;")
self._DP_optionsChangedFlag = False
return self.pDialog.close() if hasattr(self, "pDialog") else None
def _DP_drawPicture(self, estimateOnly = False):
# Check if loaded
if not hasattr(self, "_DP_filename_string") or self._DP_filename_string is None or not len(self._DP_filename_string):
return self.criticalDialog(message = "Image not loaded!", informativeText = "Please load the image first before parsing and drawing. Filename not captured", title = "Image not loaded!", host = self)
# Check if parseded
if not hasattr(self, "picConv") or not isinstance(self.picConv, picConv.PicConv):
return self.criticalDialog(message = "Image not parsed!", informativeText = "Please parse the image first before drawing. Image and options not captured.", | |
<gh_stars>1-10
import functools
import importlib
import os
from pathlib import Path
from subprocess import check_call, run, PIPE, CompletedProcess
import sys
from typing import Optional, Sequence, Iterable, List
import traceback
from . import LazyLogger
log = LazyLogger('HPI cli')
@functools.lru_cache()
def mypy_cmd() -> Optional[Sequence[str]]:
try:
# preferably, use mypy from current python env
import mypy
return [sys.executable, '-m', 'mypy']
except ImportError:
pass
# ok, not ideal but try from PATH
import shutil
if shutil.which('mypy'):
return ['mypy']
warning("mypy not found, so can't check config with it. See https://github.com/python/mypy#readme if you want to install it and retry")
return None
from types import ModuleType
def run_mypy(pkg: ModuleType) -> Optional[CompletedProcess]:
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
# todo ugh. not sure how to extract it from pkg?
# todo dunno maybe use the same mypy config in repository?
# I'd need to install mypy.ini then??
env = {**os.environ}
mpath = env.get('MYPYPATH')
mpath = str(mycfg_dir) + ('' if mpath is None else f':{mpath}')
env['MYPYPATH'] = mpath
cmd = mypy_cmd()
if cmd is None:
return None
mres = run([
*cmd,
'--namespace-packages',
'--color-output', # not sure if works??
'--pretty',
'--show-error-codes',
'--show-error-context',
'--check-untyped-defs',
'-p', pkg.__name__,
], stderr=PIPE, stdout=PIPE, env=env)
return mres
def eprint(x: str) -> None:
print(x, file=sys.stderr)
def indent(x: str) -> str:
return ''.join(' ' + l for l in x.splitlines(keepends=True))
OK = '✅'
OFF = '🔲'
def info(x: str) -> None:
eprint(OK + ' ' + x)
def error(x: str) -> None:
eprint('❌ ' + x)
def warning(x: str) -> None:
eprint('❗ ' + x) # todo yellow?
def tb(e: Exception) -> None:
tb = ''.join(traceback.format_exception(Exception, e, e.__traceback__))
sys.stderr.write(indent(tb))
# todo not gonna work on Windows... perhaps make it optional and use colorama/termcolor? (similar to core.warnings)
class color:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
UNDERLINE = '\033[4m'
RESET = '\033[0m'
from argparse import Namespace
def config_create(args: Namespace) -> None:
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
created = False
if not mycfg_dir.exists():
# todo not sure about the layout... should I use my/config.py instead?
my_config = mycfg_dir / 'my' / 'config' / '__init__.py'
my_config.parent.mkdir(parents=True)
my_config.write_text('''
### HPI personal config
## see
# https://github.com/karlicoss/HPI/blob/master/doc/SETUP.org#setting-up-modules
# https://github.com/karlicoss/HPI/blob/master/doc/MODULES.org
## for some help on writing your own config
# to quickly check your config, run:
# hpi config check
# to quickly check a specific module setup, run hpi doctor <module>, e.g.:
# hpi doctor my.reddit
### useful default imports
from my.core import Paths, PathIsh, get_files
###
# most of your configs will look like this:
class example:
export_path: Paths = '/home/user/data/example_data_dir/'
### you can insert your own configuration below
### but feel free to delete the stuff above if you don't need ti
'''.lstrip())
info(f'created empty config: {my_config}')
created = True
else:
error(f"config directory '{mycfg_dir}' already exists, skipping creation")
check_passed = config_ok(args)
if not created or not check_passed:
sys.exit(1)
def config_check_cli(args: Namespace) -> None:
ok = config_ok(args)
sys.exit(0 if ok else False)
# TODO return the config as a result?
def config_ok(args: Namespace) -> bool:
errors: List[Exception] = []
import my
try:
paths: Sequence[str] = my.__path__._path # type: ignore[attr-defined]
except Exception as e:
errors.append(e)
error('failed to determine module import path')
tb(e)
else:
info(f'import order: {paths}')
try:
import my.config as cfg
except Exception as e:
errors.append(e)
error("failed to import the config")
tb(e)
# todo yield exception here? so it doesn't fail immediately..
# I guess it's fairly critical and worth exiting immediately
sys.exit(1)
cfg_path = cfg.__file__# todo might be better to use __path__?
info(f"config file : {cfg_path}")
import my.core as core
try:
core_pkg_path = str(Path(core.__path__[0]).parent) # type: ignore[attr-defined]
if cfg_path.startswith(core_pkg_path):
error(f'''
Seems that the stub config is used ({cfg_path}). This is likely not going to work.
See https://github.com/karlicoss/HPI/blob/master/doc/SETUP.org#setting-up-modules for more information
'''.strip())
errors.append(RuntimeError('bad config path'))
except Exception as e:
errors.append(e)
tb(e)
# todo for some reason compileall.compile_file always returns true??
try:
cmd = [sys.executable, '-m', 'compileall', str(cfg_path)]
check_call(cmd)
info('syntax check: ' + ' '.join(cmd))
except Exception as e:
errors.append(e)
mres = run_mypy(cfg)
if mres is not None: # has mypy
rc = mres.returncode
if rc == 0:
info('mypy check : success')
else:
error('mypy check: failed')
errors.append(RuntimeError('mypy failed'))
sys.stderr.write(indent(mres.stderr.decode('utf8')))
sys.stderr.write(indent(mres.stdout.decode('utf8')))
if len(errors) > 0:
error(f'config check: {len(errors)} errors')
return False
else:
# note: shouldn't exit here, might run something else
info('config check: success!')
return True
from .util import HPIModule, modules
def _modules(*, all: bool=False) -> Iterable[HPIModule]:
skipped = []
for m in modules():
if not all and m.skip_reason is not None:
skipped.append(m.name)
else:
yield m
if len(skipped) > 0:
warning(f'Skipped {len(skipped)} modules: {skipped}. Pass --all if you want to see them.')
def modules_check(args: Namespace) -> None:
verbose: bool = args.verbose
quick: bool = args.quick
module: Optional[str] = args.module
if module is not None:
verbose = True # hopefully makes sense?
vw = '' if verbose else '; pass --verbose to print more information'
from . import common
common.QUICK_STATS = quick # dirty, but hopefully OK for cli
tabulate_warnings()
from .util import get_stats, HPIModule
mods: Iterable[HPIModule]
if module is None:
mods = _modules(all=args.all)
else:
mods = [HPIModule(name=module, skip_reason=None)]
# todo add a --all argument to disregard is_active check?
for mr in mods:
skip = mr.skip_reason
m = mr.name
if skip is not None:
eprint(OFF + f' {color.YELLOW}SKIP{color.RESET}: {m:<50} {skip}')
continue
try:
mod = importlib.import_module(m)
except Exception as e:
# todo more specific command?
error(f'{color.RED}FAIL{color.RESET}: {m:<50} loading failed{vw}')
if verbose:
tb(e)
continue
info(f'{color.GREEN}OK{color.RESET} : {m:<50}')
stats = get_stats(m)
if stats is None:
eprint(" - no 'stats' function, can't check the data")
# todo point to a readme on the module structure or something?
continue
try:
res = stats()
assert res is not None, 'stats() returned None'
except Exception as ee:
warning(f' - {color.RED}stats:{color.RESET} computing failed{vw}')
if verbose:
tb(ee)
else:
info(f' - stats: {res}')
def list_modules(args: Namespace) -> None:
# todo add a --sort argument?
tabulate_warnings()
all: bool = args.all
for mr in _modules(all=all):
m = mr.name
sr = mr.skip_reason
if sr is None:
pre = OK
suf = ''
else:
pre = OFF
suf = f' {color.YELLOW}[disabled: {sr}]{color.RESET}'
print(f'{pre} {m:50}{suf}')
def tabulate_warnings() -> None:
'''
Helper to avoid visual noise in hpi modules/doctor
'''
import warnings
orig = warnings.formatwarning
def override(*args, **kwargs) -> str:
res = orig(*args, **kwargs)
return ''.join(' ' + x for x in res.splitlines(keepends=True))
warnings.formatwarning = override
# TODO loggers as well?
# todo check that it finds private modules too?
def doctor(args: Namespace) -> None:
ok = config_ok(args)
# TODO propagate ok status up?
modules_check(args)
def _requires(module: str) -> Sequence[str]:
from .discovery_pure import module_by_name
mod = module_by_name(module)
# todo handle when module is missing
r = mod.requires
if r is None:
error(f"Module {module} has no REQUIRES specification")
sys.exit(1)
return r
def module_requires(args: Namespace) -> None:
module: str = args.module
rs = [f"'{x}'" for x in _requires(module)]
eprint(f'dependencies of {module}')
for x in rs:
print(x)
def module_install(args: Namespace) -> None:
# TODO hmm. not sure how it's gonna work -- presumably people use different means of installing...
# how do I install into the 'same' environment??
user: bool = args.user
module: str = args.module
import shlex
cmd = [
sys.executable, '-m', 'pip', 'install',
*(['--user'] if user else []), # meh
*_requires(module),
]
eprint('Running: ' + ' '.join(map(shlex.quote, cmd)))
check_call(cmd)
from argparse import ArgumentParser
def parser() -> ArgumentParser:
p = ArgumentParser('Human Programming Interface', epilog='''
Tool for HPI.
Work in progress, will be used for config management, troubleshooting & introspection
''')
sp = p.add_subparsers(dest='mode')
dp = sp.add_parser('doctor', help='Run various checks')
dp.add_argument('--verbose', action='store_true', help='Print more diagnosic infomration')
dp.add_argument('--all' , action='store_true', help='List all modules, including disabled')
dp.add_argument('--quick' , action='store_true', help='Only run partial checks (first 100 items)')
dp.add_argument('module', nargs='?', type=str , help='Pass to check a specific module')
dp.set_defaults(func=doctor)
cp = sp.add_parser('config', help='Work with configuration')
scp = cp.add_subparsers(dest='config_mode')
if True:
ccp = scp.add_parser('check', help='Check config')
ccp.set_defaults(func=config_check_cli)
icp = scp.add_parser('create', help='Create user config')
icp.set_defaults(func=config_create)
mp = sp.add_parser('modules', help='List available modules')
mp.add_argument('--all' , action='store_true', help='List all modules, including disabled')
mp.set_defaults(func=list_modules)
op = sp.add_parser('module', help='Module management')
ops = op.add_subparsers(dest='module_mode')
if True:
add_module_arg = lambda x: x.add_argument('module', type=str, help='Module name (e.g. my.reddit)')
opsr = ops.add_parser('requires', help='Print module requirements')
# todo not sure, might be worth exposing outside...
add_module_arg(opsr)
opsr.set_defaults(func=module_requires)
# todo support multiple
opsi = ops.add_parser('install', help='Install | |
<reponame>m1griffin/arrayfunc<filename>codegen/amaxamin_testgen.py
#!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Purpose: Generate the unit tests for amax and amin.
# Language: Python 3.6
# Date: 13-May-2014
#
###############################################################################
#
# Copyright 2014 - 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# ==============================================================================
import copy
import itertools
import codegen_common
# ==============================================================================
# ==============================================================================
# The basic template for testing each array type for operator function.
op_template_general = '''
##############################################################################
class %(funclabel)s_general_%(arrayevenodd)s_arraysize_%(simdpresent)s_simd_%(typecode)s(unittest.TestCase):
"""Test %(funclabel)s for basic general function operation.
op_template_general
"""
########################################################
def setUp(self):
"""Initialise.
"""
# We use a template to generate this code, so the following
# compare is inserted into the template to generate code which
# spills over past the SIMD handler.
if '%(arrayevenodd)s' == 'odd':
arrayextension = 5
else:
arrayextension = 0
arraylength = 96 + arrayextension
MaxVal = arrayfunc.arraylimits.%(typecode)s_max
MinVal = arrayfunc.arraylimits.%(typecode)s_min
# This is generated by a common template, so we need to make
# sure that in cases where we are using floating point values
# we don't pass floating point values for range().
# Plus, double precision calcuations will overflow when calculating
# the step value unless we convert to integer first. Python
# integers have no upper limit, and so will not overflow.
MaxInt = int(MaxVal)
MinInt = int(MinVal)
# Create some arbitrary data over a wide range of the data type. This
# creates evenly spaced data over a range straddling the mid point of the data.
midpoint = (MaxInt + MinInt) // 2
startval = (midpoint + MinInt) // 2
endval = (midpoint + MaxInt) // 2
stepval = (MaxInt - MinInt) // 100
halfrangeinc = list(range(startval, endval, stepval))
halfrangedec = list(range(endval, startval, -stepval))
gendata = list(itertools.chain.from_iterable(zip(halfrangeinc, halfrangedec)))
incdata = halfrangeinc
decdata = halfrangedec
maxvaldata = list(itertools.chain(halfrangeinc, [MaxVal], halfrangedec))
minvaldata = list((itertools.chain(halfrangeinc, [MinVal], halfrangedec)))
# Test arrays.
self.gentest = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(gendata), range(arraylength))])
self.inctest = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(incdata), range(arraylength))])
self.dectest = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(decdata), range(arraylength))])
self.maxvaltest = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(maxvaldata), range(arraylength))])
self.minvaltest = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(minvaldata), range(arraylength))])
########################################################
def test_%(funclabel)s_general_function_01(self):
"""Test a%(funcname)s - Array code %(typecode)s. General test %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.gentest %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.gentest))
########################################################
def test_%(funclabel)s_general_function_02(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test increasing values %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.inctest %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.inctest))
########################################################
def test_%(funclabel)s_general_function_03(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test decreasing values %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.dectest %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.dectest))
########################################################
def test_%(funclabel)s_general_function_04(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test finding max for data type %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.maxvaltest %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.maxvaltest))
########################################################
def test_%(funclabel)s_general_function_05(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test finding value from array that contains min for data type %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.minvaltest %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.minvaltest))
########################################################
def test_%(funclabel)s_general_function_06(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test optional maxlen parameter %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.maxvaltest, maxlen=5 %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.maxvaltest[:5]))
##############################################################################
'''
# ==============================================================================
# The basic template for testing each array type for parameter errors.
op_template_params = '''
##############################################################################
class %(funclabel)s_parameter_%(arrayevenodd)s_arraysize_%(simdpresent)s_simd_%(typecode)s(unittest.TestCase):
"""Test %(funclabel)s for basic parameter tests.
op_template_params
"""
########################################################
def setUp(self):
"""Initialise.
"""
# We use a template to generate this code, so the following
# compare is inserted into the template to generate code which
# spills over past the SIMD handler.
if '%(arrayevenodd)s' == 'odd':
arrayextension = 5
else:
arrayextension = 0
arraylength = 96 + arrayextension
MaxVal = arrayfunc.arraylimits.%(typecode)s_max
MinVal = arrayfunc.arraylimits.%(typecode)s_min
self.gentest = array.array('%(typecode)s', [MaxVal // 2] * arraylength)
########################################################
def test_%(funclabel)s_param_function_01(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test invalid parameter type %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
with self.assertRaises(TypeError):
result = arrayfunc.a%(funcname)s(1 %(nosimd)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(funcname)s(1)
########################################################
def test_%(funclabel)s_param_function_02(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test missing parameter %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
with self.assertRaises(TypeError):
result = arrayfunc.a%(funcname)s()
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(funcname)s()
########################################################
def test_%(funclabel)s_param_function_03(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test excess parameters %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
with self.assertRaises(TypeError):
result = arrayfunc.a%(funcname)s(self.gentest, 5, 2, 2 %(nosimd)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(funcname)s(self.gentest, 2)
########################################################
def test_%(funclabel)s_param_function_04(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test invalid keyword parameter %(arrayevenodd)s length array %(simdpresent)s SIMD.
"""
with self.assertRaises(TypeError):
result = arrayfunc.a%(funcname)s(self.gentest, xxxx=5 %(nosimd)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(funcname)s(self.gentest, xxxx=5)
##############################################################################
'''
# ==============================================================================
# The basic template for testing floating point arrays with nan, inf -inf.
nonfinite_template = '''
##############################################################################
class %(funclabel)s_nonfinite_%(rotplaces)s_%(arrayevenodd)s_arraysize_%(simdpresent)s_simd_%(typecode)s(unittest.TestCase):
"""Test with floating point nan, inf -inf.
nonfinite_template
"""
########################################################
def setUp(self):
"""Initialise.
"""
# We use a template to generate this code, so the following
# compare is inserted into the template to generate code which
# spills over past the SIMD handler.
if '%(arrayevenodd)s' == 'odd':
arrayextension = 5
else:
arrayextension = 0
arraylength = 96 + arrayextension
MaxVal = arrayfunc.arraylimits.%(typecode)s_max
MinVal = arrayfunc.arraylimits.%(typecode)s_min
# Create some test data containing a mixture of values.
halfrangedata = list(range(int(MinVal / 2), int(MaxVal / 2), int(MaxVal) - int(MinVal) // 100))
nanvaldatabase = list(itertools.chain(halfrangedata, [math.nan, -10.0, MaxVal, 10.0, MinVal], halfrangedata))
infvaldatabase = list(itertools.chain(halfrangedata, [math.inf, -10.0, MaxVal, 10.0, MinVal], halfrangedata))
ninfvaldatabase = list(itertools.chain(halfrangedata, [-math.inf, -10.0, MaxVal, 10.0, MinVal], halfrangedata))
mixedvaldatabase = list(itertools.chain(halfrangedata, [math.inf, -math.inf, -10.0, MaxVal, 10.0, MinVal], halfrangedata))
# Rotate the values in place in order to create different combinations.
# This is being generated through a template to allow us to create
# different combinations to help test the effects of having the
# special values in various locations. This is primarily of use
# for the SIMD tests which do operations in parallel.
rotplaces = %(rotplaces)s
nanvaldata = nanvaldatabase[rotplaces:] + nanvaldatabase[:rotplaces]
infvaldata = infvaldatabase[rotplaces:] + infvaldatabase[:rotplaces]
ninfvaldata = ninfvaldatabase[rotplaces:] + ninfvaldatabase[:rotplaces]
mixedvaldata = mixedvaldatabase[rotplaces:] + mixedvaldatabase[:rotplaces]
# Now create the arrays used in the tests.
self.data_nan = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(nanvaldata), range(arraylength))])
self.data_inf = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(infvaldata), range(arraylength))])
self.data_ninf = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(ninfvaldata), range(arraylength))])
self.data_mixed = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle(mixedvaldata), range(arraylength))])
########################################################
def test_%(funclabel)s_nonfinite_nan_01(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test NaN data with %(arrayevenodd)s length array data shifted %(rotplaces)s %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.data_nan %(nosimd)s)
result2 = %(funcname)s(self.data_nan)
# We don't actually test the result as there is no meaningful order
# comparison with NaN.
########################################################
def test_%(funclabel)s_nonfinite_inf_02(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test Inf data with %(arrayevenodd)s length array data shifted %(rotplaces)s %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.data_inf %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.data_inf))
########################################################
def test_%(funclabel)s_nonfinite_ninf_03(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test Negative Inf data with %(arrayevenodd)s length array data shifted %(rotplaces)s %(simdpresent)s SIMD.
"""
result = arrayfunc.a%(funcname)s(self.data_ninf %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.data_ninf))
########################################################
def test_%(funclabel)s_nonfinite_mixed_04(self):
"""Test a%(funcname)s - Array code %(typecode)s. Test mixed non-finite data with %(arrayevenodd)s length array data shifted %(rotplaces)s %(simdpresent)s SIMD.
"""
# The mixed test does not include NaN, as there is no meaningful order
# comparison with NaN.
result = arrayfunc.a%(funcname)s(self.data_mixed %(nosimd)s)
self.assertEqual(result, %(funcname)s(self.data_mixed))
##############################################################################
'''
# ==============================================================================
# The functions which are implemented by this program.
completefuncnames = ('amax', 'amin')
# The name of the function without the leading 'a'.
optype = {'amax' : 'max',
'amin' : 'min',
}
# ==============================================================================
# This is used to generate test template data for tests.
def gentestdata(funcname):
""" Generate test template data for tests.
funcname (string): - The name of the function.
Returns: (list) - A list of dictionaries containing the keys and
values to generate individual test functions.
"""
# These are the different test values we will combine in various ways.
arraycode = [('typecode', x) for x in codegen_common.arraycodes]
hassimd = (('simdpresent', 'with'), ('simdpresent', 'without'))
arraylen = (('arrayevenodd', 'even'), ('arrayevenodd', 'odd'))
# The product function produces all possible combinations.
combos = list(itertools.product(arraycode, hassimd, arraylen))
# Convert the data into a list of dictionaries.
testdata = [dict(x) for x in combos]
nosimd = {'with' : '', 'without' : ', nosimd=True'}
# Add in the data that doesn't change.
for x in testdata:
x['nosimd'] = nosimd[x['simdpresent']]
x['funclabel'] = funcname
x['funcname'] = optype[funcname]
return testdata
# ==============================================================================
# This is used to generate test template data for non-finite tests.
def gennonfinitetestdata(funcname):
""" Generate test template data for non-finite tests.
funcname (string): - The name of the function.
Returns: (list) - A list of dictionaries containing the keys and
values to generate individual test functions.
"""
# These are the different test values we will combine in various ways.
arraycode = [('typecode', x) for x in codegen_common.floatarrays]
hassimd = (('simdpresent', 'with'), ('simdpresent', 'without'))
arraylen = (('arrayevenodd', 'even'), ('arrayevenodd', 'odd'))
datarot = [('rotplaces', x) for x in range(5)]
# The product function produces all possible combinations.
combos = list(itertools.product(arraycode, hassimd, arraylen, datarot))
# Convert the data into a list of dictionaries.
testdata = | |
'$expr$[[1]]'.
>> First[{a, b, c}]
= a
>> First[a + b + c]
= a
>> First[x]
: Nonatomic expression expected.
= First[x]
>> First[{}]
: {} has zero length and no first element.
= First[{}]
"""
summary_text = "first element of a list or expression"
messages = {
"normal": "Nonatomic expression expected.",
"nofirst": "`1` has zero length and no first element.",
}
def apply(self, expr, evaluation):
"First[expr_]"
if isinstance(expr, Atom):
evaluation.message("First", "normal")
return
if len(expr.leaves) == 0:
evaluation.message("First", "nofirst", expr)
return
return expr.leaves[0]
class FirstCase(Builtin):
"""
<dl>
<dt> FirstCase[{$e1$, $e2$, ...}, $pattern$]
<dd>gives the first $ei$ to match $pattern$, or $Missing[\"NotFound\"]$ if none matching pattern is found.
<dt> FirstCase[{$e1$,$e2$, ...}, $pattern$ -> $rhs$]
<dd> gives the value of $rhs$ corresponding to the first $ei$ to match pattern.
<dt> FirstCase[$expr$, $pattern$, $default$]
<dd> gives $default$ if no element matching $pattern$ is found.
<dt>FirstCase[$expr$, $pattern$, $default$, $levelspec$] \
<dd>finds only objects that appear on levels specified by $levelspec$.
<dt>FirstCase[$pattern$]
<dd>represents an operator form of FirstCase that can be applied to an expression.
</dl>
"""
summary_text = "first element that matches a pattern"
attributes = hold_rest | protected
options = Cases.options
rules = {
'FirstCase[expr_, pattOrRule_, Shortest[default_:Missing["NotFound"], 1],Shortest[levelspec_:{1}, 2], opts:OptionsPattern[]]': "Replace[Cases[expr, pattOrRule, levelspec, 1, opts],{{} :> default, {match_} :> match}]",
"FirstCase[pattOrRule_][expr_]": "FirstCase[expr, pattOrRule]",
}
class Extract(Builtin):
"""
<dl>
<dt>'Extract[$expr$, $list$]'
<dd>extracts parts of $expr$ specified by $list$.
<dt>'Extract[$expr$, {$list1$, $list2$, ...}]'
<dd>extracts a list of parts.
</dl>
'Extract[$expr$, $i$, $j$, ...]' is equivalent to 'Part[$expr$, {$i$, $j$, ...}]'.
>> Extract[a + b + c, {2}]
= b
>> Extract[{{a, b}, {c, d}}, {{1}, {2, 2}}]
= {{a, b}, d}
"""
summary_text = "extract elements that appear at a list of positions"
attributes = n_hold_rest | protected
rules = {
"Extract[expr_, list_List]": "Part[expr, Sequence @@ list]",
"Extract[expr_, {lists___List}]": "Extract[expr, #]& /@ {lists}",
}
class FirstPosition(Builtin):
"""
<dl>
<dt>'FirstPosition[$expr$, $pattern$]'
<dd>gives the position of the first element in $expr$ that matches $pattern$, or Missing["NotFound"] if no such element is found.
<dt>'FirstPosition[$expr$, $pattern$, $default$]'
<dd>gives default if no element matching $pattern$ is found.
<dt>'FirstPosition[$expr$, $pattern$, $default$, $levelspec$]'
<dd>finds only objects that appear on levels specified by $levelspec$.
</dl>
>> FirstPosition[{a, b, a, a, b, c, b}, b]
= {2}
>> FirstPosition[{{a, a, b}, {b, a, a}, {a, b, a}}, b]
= {1, 3}
>> FirstPosition[{x, y, z}, b]
= Missing[NotFound]
Find the first position at which x^2 to appears:
>> FirstPosition[{1 + x^2, 5, x^4, a + (1 + x^2)^2}, x^2]
= {1, 2}
#> FirstPosition[{1, 2, 3}, _?StringQ, "NoStrings"]
= NoStrings
#> FirstPosition[a, a]
= {}
#> FirstPosition[{{{1, 2}, {2, 3}, {3, 1}}, {{1, 2}, {2, 3}, {3, 1}}},3]
= {1, 2, 2}
#> FirstPosition[{{1, {2, 1}}, {2, 3}, {3, 1}}, 2, Missing["NotFound"],2]
= {2, 1}
#> FirstPosition[{{1, {2, 1}}, {2, 3}, {3, 1}}, 2, Missing["NotFound"],4]
= {1, 2, 1}
#> FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing["NotFound"], {1}]
= Missing[NotFound]
#> FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing["NotFound"], 0]
= Missing[NotFound]
#> FirstPosition[{{1, 2}, {1, {2, 1}}, {2, 3}}, 2, Missing["NotFound"], {3}]
= {2, 2, 1}
#> FirstPosition[{{1, 2}, {1, {2, 1}}, {2, 3}}, 2, Missing["NotFound"], 3]
= {1, 2}
#> FirstPosition[{{1, 2}, {1, {2, 1}}, {2, 3}}, 2, Missing["NotFound"], {}]
= {1, 2}
#> FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing["NotFound"], {1, 2, 3}]
: Level specification {1, 2, 3} is not of the form n, {n}, or {m, n}.
= FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing[NotFound], {1, 2, 3}]
#> FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing["NotFound"], a]
: Level specification a is not of the form n, {n}, or {m, n}.
= FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing[NotFound], a]
#> FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing["NotFound"], {1, a}]
: Level specification {1, a} is not of the form n, {n}, or {m, n}.
= FirstPosition[{{1, 2}, {2, 3}, {3, 1}}, 3, Missing[NotFound], {1, a}]
"""
summary_text = "position of the first element matching a pattern"
messages = {
"level": "Level specification `1` is not of the form n, {n}, or {m, n}.",
}
def apply(
self, expr, pattern, evaluation, default=None, minLevel=None, maxLevel=None
):
"FirstPosition[expr_, pattern_]"
if expr == pattern:
return Expression(SymbolList)
result = []
def check_pattern(input_list, pat, result, beginLevel):
for i in range(0, len(input_list.leaves)):
nested_level = beginLevel
result.append(i + 1)
if input_list.leaves[i] == pat:
# found the pattern
if minLevel is None or nested_level >= minLevel:
return True
else:
if isinstance(input_list.leaves[i], Expression) and (
maxLevel is None or maxLevel > nested_level
):
nested_level = nested_level + 1
if check_pattern(
input_list.leaves[i], pat, result, nested_level
):
return True
result.pop()
return False
is_found = False
if isinstance(expr, Expression) and (maxLevel is None or maxLevel > 0):
is_found = check_pattern(expr, pattern, result, 1)
if is_found:
return Expression(SymbolList, *result)
else:
return Expression("Missing", "NotFound") if default is None else default
def apply_default(self, expr, pattern, default, evaluation):
"FirstPosition[expr_, pattern_, default_]"
return self.apply(expr, pattern, evaluation, default=default)
def apply_level(self, expr, pattern, default, level, evaluation):
"FirstPosition[expr_, pattern_, default_, level_]"
def is_interger_list(expr_list):
return all(
isinstance(expr_list.leaves[i], Integer)
for i in range(len(expr_list.leaves))
)
if level.has_form("List", None):
len_list = len(level.leaves)
if len_list > 2 or not is_interger_list(level):
return evaluation.message("FirstPosition", "level", level)
elif len_list == 0:
min_Level = max_Level = None
elif len_list == 1:
min_Level = max_Level = level.leaves[0].get_int_value()
elif len_list == 2:
min_Level = level.leaves[0].get_int_value()
max_Level = level.leaves[1].get_int_value()
elif isinstance(level, Integer):
min_Level = 0
max_Level = level.get_int_value()
else:
return evaluation.message("FirstPosition", "level", level)
return self.apply(
expr,
pattern,
evaluation,
default=default,
minLevel=min_Level,
maxLevel=max_Level,
)
class Last(Builtin):
"""
<dl>
<dt>'Last[$expr$]'
<dd>returns the last element in $expr$.
</dl>
'Last[$expr$]' is equivalent to '$expr$[[-1]]'.
>> Last[{a, b, c}]
= c
>> Last[x]
: Nonatomic expression expected.
= Last[x]
>> Last[{}]
: {} has zero length and no last element.
= Last[{}]
"""
summary_text = "last element of a list or expression"
messages = {
"normal": "Nonatomic expression expected.",
"nolast": "`1` has zero length and no last element.",
}
def apply(self, expr, evaluation):
"Last[expr_]"
if isinstance(expr, Atom):
evaluation.message("Last", "normal")
return
if len(expr.leaves) == 0:
evaluation.message("Last", "nolast", expr)
return
return expr.leaves[-1]
class Length(Builtin):
"""
<dl>
<dt>'Length[$expr$]'
<dd>returns the number of elements in $expr$.
</dl>
Length of a list:
>> Length[{1, 2, 3}]
= 3
'Length' operates on the 'FullForm' of expressions:
>> Length[Exp[x]]
= 2
>> FullForm[Exp[x]]
= Power[E, x]
The length of atoms is 0:
>> Length[a]
= 0
Note that rational and complex numbers are atoms, although their
'FullForm' might suggest the opposite:
>> Length[1/3]
= 0
>> FullForm[1/3]
= Rational[1, 3]
"""
summary_text = "number of elements in a list or expression"
def apply(self, expr, evaluation):
"Length[expr_]"
if isinstance(expr, Atom):
return Integer0
else:
return Integer(len(expr.leaves))
class MemberQ(Builtin):
"""
<dl>
<dt>'MemberQ[$list$, $pattern$]'
<dd>returns 'True' if $pattern$ matches any element of $list$, or 'False' otherwise.
</dl>
>> MemberQ[{a, b, c}, b]
= True
>> MemberQ[{a, b, c}, d]
= False
>> MemberQ[{"a", b, f[x]}, _?NumericQ]
= False
>> MemberQ[_List][{{}}]
= True
"""
summary_text = "test whether an element is a member of a list"
rules = {
"MemberQ[list_, pattern_]": ("Length[Select[list, MatchQ[#, pattern]&]] > 0"),
"MemberQ[pattern_][expr_]": "MemberQ[expr, pattern]",
}
class Most(Builtin):
"""
<dl>
<dt>'Most[$expr$]'
<dd>returns $expr$ with the last element removed.
</dl>
'Most[$expr$]' is equivalent to '$expr$[[;;-2]]'.
>> Most[{a, b, c}]
= {a, b}
>> Most[a + b + c]
= a + b
>> Most[x]
: Nonatomic expression expected.
= Most[x]
#> A[x__] := 7 /; Length[{x}] == 3;
#> Most[A[1, 2, 3, 4]]
= 7
#> ClearAll[A];
"""
summary_text = "remove the last element"
def apply(self, expr, evaluation):
"Most[expr_]"
if isinstance(expr, Atom):
evaluation.message("Most", "normal")
return
return expr.slice(expr.head, slice(0, -1), evaluation)
class Part(Builtin):
"""
<dl>
<dt>'Part[$expr$, $i$]'
<dd>returns part $i$ of $expr$.
</dl>
Extract an element from a list:
>> A = {a, b, c, d};
>> A[[3]]
= c
Negative indices count from the end:
>> {a, b, c}[[-2]]
= b
'Part' can be applied on any | |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import numpy as np
import argparse
import time
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
from torchvision import datasets
import torchvision.transforms as transforms
from torch.autograd import Variable
from lfads import LFADS_Net
from objective import *
from scheduler import LFADS_Scheduler
parser = argparse.ArgumentParser()
parser.add_argument('--save_loc', default='./', type=str)
parser.add_argument('--num_epochs', default=500, type=int)
global args; args = parser.parse_args()
# os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
torch.backends.cudnn.benchmark = True
class conv_block(nn.Module):# *args, **kwargs
def __init__(self, in_f, out_f):
super(conv_block,self).__init__()
self.conv1 = nn.Conv3d(in_f, out_f,
kernel_size=3,
padding=1,
dilation = (1,1,1))
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv3d(in_f, out_f,
kernel_size=3,
padding=1,
dilation = 1)
self.pool1 = nn.MaxPool3d(kernel_size=(1,4,4),
return_indices=True)
self.relu2 = nn.ReLU()
def forward(self,x):
x = self.conv1(x)
x, ind = self.pool1(x)
x = self.relu1(x)
return x, ind
class deconv_block(nn.Module):
def __init__(self, in_f, out_f):
super(deconv_block,self).__init__()
self.unpool1 = nn.MaxUnpool3d(kernel_size=(1,4,4))
self.deconv1 = nn.ConvTranspose3d(in_channels=in_f,
out_channels=out_f,
kernel_size=3,
padding=1,
dilation = (1,1,1))
self.relu1 = nn.ReLU()
def forward(self,x,ind):
# print(x.shape,ind.shape)
x = self.unpool1(x,ind)
x = self.deconv1(x)
x = self.relu1(x)
return x
class convVAE(nn.Module):
def __init__(self):
super(convVAE,self).__init__()
device = 'cuda' if torch.cuda.is_available() else 'cpu';
in_f = 1
out_f = [16,32]#[16,32,64]#[1,1]#
all_f = [in_f,*out_f]
self.n_layers = 2#3
self.video_dim_space = 128
self.video_dim_time = 50
self.final_size = 8#
self.final_f = 32#64#20#3
self.factor_size = 4 # LFADS number of factors
self.bottleneck_size = 64
self.convlayers = nn.ModuleList()
for n in range(0,self.n_layers):
self.convlayers.add_module('{}{}'.format('ce', n),conv_block(all_f[n], all_f[n+1]))
# self.convlayers.add_module('ce1',conv_block(out_f1, out_f2))
self.deconvlayers = nn.ModuleList()
for n in range(0,self.n_layers):
self.deconvlayers.add_module('{}{}'.format('dec', n),deconv_block(all_f[self.n_layers-n], all_f[self.n_layers-n-1]))
# self.deconvlayers.add_module('dec0',deconv_block(out_f2,out_f1))
# self.deconvlayers.add_module('dec1',deconv_block(out_f1,in_f))
# self.ce1 = conv_block(in_f, out_f1)
# self.ce2 = conv_block(out_f1, out_f2)
# self.dec1 = deconv_block(out_f2,out_f1)
# self.dec2 = deconv_block(out_f1,in_f)
self.fc1 = nn.Linear(self.final_size*self.final_size*self.final_f,self.bottleneck_size)
self.lfads = LFADS_Net(self.bottleneck_size, factor_size = self.factor_size, #self.final_size * self.final_size * self.final_f
g_encoder_size = 64, c_encoder_size = 0,
g_latent_size = 64, u_latent_size = 0,
controller_size = 0, generator_size = 64,
prior = {'g0' : {'mean' : {'value': 0.0, 'learnable' : True},
'var' : {'value': 0.1, 'learnable' : False}},
'u' : {'mean' : {'value': 0.0, 'learnable' : False},
'var' : {'value': 0.1, 'learnable' : True},
'tau' : {'value': 10, 'learnable' : True}}},
clip_val=5.0, dropout=0.0, max_norm = 200, deep_freeze = False,
do_normalize_factors=True, device = device)
self.fc2 = nn.Linear(self.factor_size,self.final_size*self.final_size*self.final_f)
def forward(self,video):
frame_per_block = 10
x = video
batch_size, num_ch, seq_len, w, h = x.shape
num_blocks = int(seq_len/frame_per_block)
# print(num_blocks)
x = x.view(batch_size,num_ch,num_blocks,frame_per_block,w,h).contiguous()
x = x.permute(0,2,1,3,4,5).contiguous()
x = x.view(batch_size * num_blocks,num_ch,frame_per_block,w,h).contiguous()
Ind = list()
# conv_tic = time.time()
for n, layer in enumerate(self.convlayers):
x, ind1 = layer(x)
Ind.append(ind1)
num_out_ch = x.shape[1]
w_out = x.shape[3]
h_out = x.shape[4]
x = x.view(batch_size,num_blocks,num_out_ch,frame_per_block,w_out,h_out).contiguous()
x = x.permute(0,2,1,3,4,5).contiguous()
x = x.view(batch_size,num_out_ch,seq_len,w_out,h_out).contiguous()
x = x.permute(0,2,1,3,4)
x = x.reshape(x.shape[0],x.shape[1],-1)
x = self.fc1(x.view(batch_size,seq_len,w_out*h_out*num_out_ch))
# conv_toc = time.time()
x = x.permute(1,0,2)
# lfads_tic = time.time()
# r, (factors, gen_inputs) = self.lfads(x)
(factors, gen_inputs) = self.lfads(x)
# lfads_toc = time.time()
x = factors
x = x.permute(1,0,2)
x = self.fc2(x)
# print(conv_toc-conv_tic,lfads_toc - lfads_tic)
# call LFADS here:
# x should be reshaped for LFADS [time x batch x cells]:
#
# LFADS output should be also reshaped back for the conv decoder
x = x.reshape(x.shape[0],x.shape[1],self.final_f,self.final_size, self.final_size)
x = x.permute(0,2,1,3,4)
x = x.view(batch_size,num_out_ch,num_blocks,frame_per_block,w_out,h_out).contiguous()
x = x.permute(0,2,1,3,4,5).contiguous()
x = x.view(batch_size * num_blocks,num_out_ch,frame_per_block,w_out,h_out).contiguous()
for n, layer in enumerate(self.deconvlayers):
x = layer(x,Ind[self.n_layers-n-1])
x = x.view(batch_size,num_blocks,1,frame_per_block,w,h).contiguous()
x = x.permute(0,2,1,3,4,5)
x = x.view(batch_size,1,seq_len,w,h)
# x, ind1 = self.ce0(video)
# x, ind2 = self.ce1(x)
# x = self.dec0(x,ind2)
# v_p = self.dec1(x,ind1)
# return v_p
return x, factors
def get_data():
from synthetic_data import generate_lorenz_data, SyntheticCalciumVideoDataset
import numpy as np
import torch
from tempfile import mkdtemp
import os.path as path
import time
filename_train = path.join(mkdtemp(), 'newfile_train.dat')
filename_test = path.join(mkdtemp(), 'newfile_test.dat')
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# load the training and test datasets
N_trials = 10
N_inits = 35
N_cells = 30
N_steps = 100
N_stepsinbin = 2
data_dict = generate_lorenz_data(N_trials, N_inits, N_cells, N_steps, N_stepsinbin, save=False) # [N_trials, N_inits, N_cells, N_steps, N_stepsinbin]
cells = data_dict['cells']
traces = data_dict['train_fluor']
train_data = SyntheticCalciumVideoDataset(traces=traces, cells=cells)
test_data = SyntheticCalciumVideoDataset(traces=traces, cells=cells)
num_workers = 0
# how many samples per batch to load
batch_size = 35
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
fp_train = np.memmap(filename_train, dtype='float32', mode='w+', shape=(batch_size, 8, int(N_steps/N_stepsinbin), 128, 128))
i = 0
tic = time.time()
for data in train_loader:
videos = data
fp_train[:,i,:,:,:] = np.squeeze(videos[:])
i += 1
fp_test = np.memmap(filename_test, dtype='float32', mode='w+', shape=(batch_size,8,int(N_steps/N_stepsinbin),128,128))
i = 0
tic = time.time()
for data in test_loader:
print(i)
videos = data
fp_test[:,i,:,:,:] = np.squeeze(videos[:])
i += 1
return fp_train, fp_test #train_data, train_loader, test_loader
class convLFADS_loss(nn.Module):
def __init__(self,
kl_weight_init=0.1, l2_weight_init=0.1,
kl_weight_schedule_dur = 2000, l2_weight_schedule_dur = 2000,
kl_weight_schedule_start = 0, l2_weight_schedule_start = 0,
kl_weight_max=1.0, l2_weight_max=1.0,
l2_con_scale=1.0, l2_gen_scale=1.0):
super(convLFADS_loss,self).__init__()
self.loss_weights = {'kl' : {'weight' : kl_weight_init,
'schedule_dur' : kl_weight_schedule_dur,
'schedule_start' : kl_weight_schedule_start,
'max' : kl_weight_max,
'min' : kl_weight_init},
'l2' : {'weight' : l2_weight_init,
'schedule_dur' : l2_weight_schedule_dur,
'schedule_start' : l2_weight_schedule_start,
'max' : l2_weight_max,
'min' : l2_weight_init}}
self.l2_con_scale = l2_con_scale
self.l2_gen_scale = l2_gen_scale
self.recon_loss = nn.MSELoss()
def forward(self, video_orig, video_recon, model):
kl_weight = self.loss_weights['kl']['weight']
l2_weight = self.loss_weights['l2']['weight']
# recon_loss = -self.loglikelihood(x_orig.permute(1, 0, 2), x_recon['data'].permute(1, 0, 2))
recon_loss = self.recon_loss(video_recon,video_orig)
kl_loss = kl_weight * kldiv_gaussian_gaussian(post_mu = model.g_posterior_mean,
post_lv = model.g_posterior_logvar,
prior_mu = model.g_prior_mean,
prior_lv = model.g_prior_logvar)
l2_loss = 0.5 * l2_weight * self.l2_gen_scale * model.generator.gru_generator.hidden_weight_l2_norm()
# if hasattr(model, 'controller'):
# kl_loss += kl_weight * kldiv_gaussian_gaussian(post_mu = model.u_posterior_mean,
# post_lv = model.u_posterior_logvar,
# prior_mu = model.u_prior_mean,
# prior_lv = model.u_prior_logvar)
# l2_loss += 0.5 * l2_weight * self.l2_con_scale * model.controller.gru_controller.hidden_weight_l2_norm()
return recon_loss, kl_loss, l2_loss
def train_convVAE(fp_train,fp_test,n_epochs): #model,
device = 'cuda' if torch.cuda.is_available() else 'cpu';
model = convVAE().to(device)
lfads = model.lfads.to(device)
for ix, (name, param) in enumerate(model.named_parameters()):
print(ix, name, list(param.shape), param.numel(), param.requires_grad)
# total_params += param.numel()
# number of epochs to train the model
# n_epochs = 30
# train_loader, test_loader = get_data()
# model = convVAE()
# criterion = nn.MSELoss()
criterion = convLFADS_loss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# scheduler = LFADS_Scheduler(optimizer = optimizer,
# mode = 'min',
# factor = 0.1,
# patience = 10,
# verbose = False,
# threshold = 1e-4,
# threshold_mode = 'rel',
# cooldown = 0,
# min_lr = 0,
# eps = 1e-8)
writer_val = SummaryWriter(logdir=os.path.join(args.save_loc, 'log/val')) #
writer_train = SummaryWriter(logdir=os.path.join(args.save_loc, 'log/train')) #
for epoch in range(1, n_epochs+1):
# monitor training loss
bw_tic = time.time()
train_loss = 0.0
###################
# train the model #
###################
i = 0
# tic_train = time.time()
num_step = fp_train.shape[1]
videos = torch.zeros((35,1,50,128,128)).to(device)
for i in range(num_step):#for data in train_loader:
# _ stands in for labels, here
# no need to flatten images
# videos = data.to(device)
videos[:,0,:,:,:] = torch.Tensor(fp_train[:,i,:,:,:]).to(device)
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
tic_forw = time.time()
outputs,_ = model(videos)
toc_forw = time.time()
# print(toc_forw - tic_forw)
# calculate the loss
recon_loss, kl_loss, l2_loss = criterion(outputs, videos,lfads)
loss = recon_loss + kl_loss + l2_loss
# backward pass: compute gradient of the loss with respect to model parameters
tic_backw = time.time()
loss.backward()
toc_backw = time.time()
# print(toc_forw - tic_forw,toc_backw - tic_backw)
# perform a single optimization step (parameter update)
tic_optim = time.time()
optimizer.step()
toc_optim = time.time()
# print(toc_optim - tic_optim)
# update running training loss
train_loss += loss.item()*videos.size(0)
i += 1
# toc_train = time.time()
# print(toc_train - tic_train)
writer_train.add_scalar('total/loss', train_loss, epoch)
test_loss = 0.0
# tic_val = time.time()
with torch.no_grad():
num_step = fp_test.shape[1]
videos_test = torch.zeros((35,1,50,128,128)).to(device)
for j in range(num_step):#for data_test in test_loader:
# print(i)
# _ stands in for labels, here
# no need to flatten images
# videos_test = data_test.to(device)
videos_test[:,0,:,:,:] = torch.Tensor(fp_test[:,j,:,:,:]).to(device)
# forward pass: compute predicted outputs by passing inputs to the model
outputs_test,_ = model(videos_test)
# calculate the loss
recon_loss_test, kl_loss_test, l2_loss_test = criterion(outputs_test, videos_test,lfads)
loss_test = recon_loss_test + kl_loss_test + l2_loss_test
# update running training loss
test_loss += loss_test.item()*videos.size(0)
i += 1
# toc_val = time.time()
# print(toc_val - tic_val)
writer_val.add_scalar('total/loss', test_loss, epoch)
# scheduler.step(loss)
# print avg training statistics
train_loss = train_loss/len(fp_train)
test_loss = test_loss/len(fp_test)
print(len(fp_train))
bw_toc = time.time()
print('Epoch: {} \tTotal Loss: {:.6f} \tl2 Loss: {:.6f} \tkl Loss: | |
instead or None if the original URL should be used.
"""
for url_pattern, endpoint in _oembed_patterns.items():
if url_pattern.fullmatch(url):
return endpoint
# No match.
return None
async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
"""
Request content from an oEmbed endpoint.
Args:
endpoint: The oEmbed API endpoint.
url: The URL to pass to the API.
Returns:
An object representing the metadata returned.
Raises:
OEmbedError if fetching or parsing of the oEmbed information fails.
"""
try:
logger.debug("Trying to get oEmbed content for url '%s'", url)
result = await self.client.get_json(
endpoint,
# TODO Specify max height / width.
# Note that only the JSON format is supported.
args={"url": url},
)
# Ensure there's a version of 1.0.
if result.get("version") != "1.0":
raise OEmbedError("Invalid version: %s" % (result.get("version"),))
oembed_type = result.get("type")
# Ensure the cache age is None or an int.
cache_age = result.get("cache_age")
if cache_age:
cache_age = int(cache_age)
oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
# HTML content.
if oembed_type == "rich":
oembed_result.html = result.get("html")
return oembed_result
if oembed_type == "photo":
oembed_result.url = result.get("url")
return oembed_result
# TODO Handle link and video types.
if "thumbnail_url" in result:
oembed_result.url = result.get("thumbnail_url")
return oembed_result
raise OEmbedError("Incompatible oEmbed information.")
except OEmbedError as e:
# Trap OEmbedErrors first so we can directly re-raise them.
logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
raise
except Exception as e:
# Trap any exception and let the code follow as usual.
# FIXME: pass through 404s and other error messages nicely
logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
raise OEmbedError() from e
async def _download_url(self, url: str, user: str) -> Dict[str, Any]:
# TODO: we should probably honour robots.txt... except in practice
# we're most likely being explicitly triggered by a human rather than a
# bot, so are we really a robot?
file_id = datetime.date.today().isoformat() + "_" + random_string(16)
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
# If this URL can be accessed via oEmbed, use that instead.
url_to_download = url # type: Optional[str]
oembed_url = self._get_oembed_url(url)
if oembed_url:
# The result might be a new URL to download, or it might be HTML content.
try:
oembed_result = await self._get_oembed_content(oembed_url, url)
if oembed_result.url:
url_to_download = oembed_result.url
elif oembed_result.html:
url_to_download = None
except OEmbedError:
# If an error occurs, try doing a normal preview.
pass
if url_to_download:
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
try:
logger.debug("Trying to get preview for url '%s'", url_to_download)
length, headers, uri, code = await self.client.get_file(
url_to_download,
output_stream=f,
max_size=self.max_spider_size,
headers={"Accept-Language": self.url_preview_accept_language},
)
except SynapseError:
# Pass SynapseErrors through directly, so that the servlet
# handler will return a SynapseError to the client instead of
# blank data or a 500.
raise
except DNSLookupError:
# DNS lookup returned no results
# Note: This will also be the case if one of the resolved IP
# addresses is blacklisted
raise SynapseError(
502,
"DNS resolution failure during URL preview generation",
Codes.UNKNOWN,
)
except Exception as e:
# FIXME: pass through 404s and other error messages nicely
logger.warning("Error downloading %s: %r", url_to_download, e)
raise SynapseError(
500,
"Failed to download content: %s"
% (traceback.format_exception_only(sys.exc_info()[0], e),),
Codes.UNKNOWN,
)
await finish()
if b"Content-Type" in headers:
media_type = headers[b"Content-Type"][0].decode("ascii")
else:
media_type = "application/octet-stream"
download_name = get_filename_from_headers(headers)
# FIXME: we should calculate a proper expiration based on the
# Cache-Control and Expire headers. But for now, assume 1 hour.
expires = ONE_HOUR
etag = (
headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
)
else:
# we can only get here if we did an oembed request and have an oembed_result.html
assert oembed_result.html is not None
assert oembed_url is not None
html_bytes = oembed_result.html.encode("utf-8")
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
f.write(html_bytes)
await finish()
media_type = "text/html"
download_name = oembed_result.title
length = len(html_bytes)
# If a specific cache age was not given, assume 1 hour.
expires = oembed_result.cache_age or ONE_HOUR
uri = oembed_url
code = 200
etag = None
try:
time_now_ms = self.clock.time_msec()
await self.store.store_local_media(
media_id=file_id,
media_type=media_type,
time_now_ms=time_now_ms,
upload_name=download_name,
media_length=length,
user_id=user,
url_cache=url,
)
except Exception as e:
logger.error("Error handling downloaded %s: %r", url, e)
# TODO: we really ought to delete the downloaded file in this
# case, since we won't have recorded it in the db, and will
# therefore not expire it.
raise
return {
"media_type": media_type,
"media_length": length,
"download_name": download_name,
"created_ts": time_now_ms,
"filesystem_id": file_id,
"filename": fname,
"uri": uri,
"response_code": code,
"expires": expires,
"etag": etag,
}
def _start_expire_url_cache_data(self):
return run_as_background_process(
"expire_url_cache_data", self._expire_url_cache_data
)
async def _expire_url_cache_data(self) -> None:
"""Clean up expired url cache content, media and thumbnails."""
# TODO: Delete from backup media store
assert self._worker_run_media_background_jobs
now = self.clock.time_msec()
logger.debug("Running url preview cache expiry")
if not (await self.store.db_pool.updates.has_completed_background_updates()):
logger.info("Still running DB updates; skipping expiry")
return
# First we delete expired url cache entries
media_ids = await self.store.get_expired_url_cache(now)
removed_media = []
for media_id in media_ids:
fname = self.filepaths.url_cache_filepath(media_id)
try:
os.remove(fname)
except OSError as e:
# If the path doesn't exist, meh
if e.errno != errno.ENOENT:
logger.warning("Failed to remove media: %r: %s", media_id, e)
continue
removed_media.append(media_id)
try:
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
for dir in dirs:
os.rmdir(dir)
except Exception:
pass
await self.store.delete_url_cache(removed_media)
if removed_media:
logger.info("Deleted %d entries from url cache", len(removed_media))
else:
logger.debug("No entries removed from url cache")
# Now we delete old images associated with the url cache.
# These may be cached for a bit on the client (i.e., they
# may have a room open with a preview url thing open).
# So we wait a couple of days before deleting, just in case.
expire_before = now - 2 * 24 * ONE_HOUR
media_ids = await self.store.get_url_cache_media_before(expire_before)
removed_media = []
for media_id in media_ids:
fname = self.filepaths.url_cache_filepath(media_id)
try:
os.remove(fname)
except OSError as e:
# If the path doesn't exist, meh
if e.errno != errno.ENOENT:
logger.warning("Failed to remove media: %r: %s", media_id, e)
continue
try:
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
for dir in dirs:
os.rmdir(dir)
except Exception:
pass
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
try:
shutil.rmtree(thumbnail_dir)
except OSError as e:
# If the path doesn't exist, meh
if e.errno != errno.ENOENT:
logger.warning("Failed to remove media: %r: %s", media_id, e)
continue
removed_media.append(media_id)
try:
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
for dir in dirs:
os.rmdir(dir)
except Exception:
pass
await self.store.delete_url_cache_media(removed_media)
if removed_media:
logger.info("Deleted %d media from url cache", len(removed_media))
else:
logger.debug("No media removed from url cache")
def get_html_media_encoding(body: bytes, content_type: str) -> str:
"""
Get the encoding of the body based on the (presumably) HTML body or media_type.
The precedence used for finding a character encoding is:
1. meta tag with a charset declared.
2. The XML document's character encoding attribute.
3. The Content-Type header.
4. Fallback to UTF-8.
Args:
body: The HTML document, as bytes.
content_type: The Content-Type header.
Returns:
The character encoding of the body, as a string.
"""
# Limit searches to the first 1kb, since it ought to be at the top.
body_start = body[:1024]
# Let's try and figure out if it has an encoding set in a meta tag.
match = _charset_match.search(body_start)
if match:
return match.group(1).decode("ascii")
# TODO Support <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
# If we didn't find a match, see if it an XML document with an encoding.
match = _xml_encoding_match.match(body_start)
if match:
return match.group(1).decode("ascii")
# If we don't find a match, we'll look at the HTTP Content-Type, and
# if that doesn't exist, we'll fall back to UTF-8.
content_match = _content_type_match.match(content_type)
if content_match:
return content_match.group(1)
return "utf-8"
def decode_and_calc_og(
body: bytes, media_uri: str, request_encoding: Optional[str] = None
) -> Dict[str, Optional[str]]:
"""
Calculate metadata for an HTML document.
This uses lxml to parse the HTML document into the OG response. If errors
occur during processing of the document, an empty response is returned.
Args:
body: The HTML document, as bytes.
media_url: The URI used to download the body.
request_encoding: The character encoding of the body, as a string.
Returns:
The OG response as a dictionary.
"""
# If there's no body, nothing useful is going to be found.
if not body:
return | |
<filename>xclim/run_length.py
# -*- coding: utf-8 -*-
"""Run length algorithms module"""
import logging
from warnings import warn
import numpy as np
import xarray as xr
logging.captureWarnings(True)
npts_opt = 9000
def get_npts(da):
"""Return the number of gridpoints in a data-array.
Parameters
----------
da : N-dimensional xarray.DataArray
Input array
Returns
-------
(int)
Product of input dataarray coordinate sizes excluding the dimension 'time'
"""
coords = list(da.coords)
coords.remove("time")
npts = 1
for c in coords:
npts *= da[c].size
return npts
def rle(da, dim="time", max_chunk=1000000):
n = len(da[dim])
i = xr.DataArray(np.arange(da[dim].size), dims=dim).chunk({"time": 1})
ind = xr.broadcast(i, da)[0].chunk(da.chunks)
b = ind.where(~da) # find indexes where false
end1 = (
da.where(b[dim] == b[dim][-1], drop=True) * 0 + n
) # add additional end value index (deal with end cases)
start1 = (
da.where(b[dim] == b[dim][0], drop=True) * 0 - 1
) # add additional start index (deal with end cases)
b = xr.concat([start1, b, end1], dim)
# Ensure bfill operates on entire (unchunked) time dimension
# Determine appropraite chunk size for other dims - do not exceed 'max_chunk' total size per chunk (default 1000000)
ndims = len(b.shape)
chunk_dim = b[dim].size
# divide extra dims into equal size
# Note : even if calculated chunksize > dim.size result will have chunk==dim.size
if ndims > 1:
chunksize_ex_dims = np.round(np.power(max_chunk / chunk_dim, 1 / (ndims - 1)))
chunks = {}
chunks[dim] = -1
for dd in b.dims:
if dd != dim:
chunks[dd] = chunksize_ex_dims
b = b.chunk(chunks)
# back fill nans with first position after
z = b.bfill(dim=dim)
# calculate lengths
d = z.diff(dim=dim) - 1
d = d.where(d >= 0)
return d
def longest_run(da, dim="time", ufunc_1dim="auto"):
"""Return the length of the longest consecutive run of True values.
Parameters
----------
arr : N-dimensional array (boolean)
Input array
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
ufunc_1dim : optional, one of 'auto' (default), True or False
Use the 1d 'ufunc' version of this function : default (auto) will attempt to select optimal
usage based on number of data points. Using 1D_ufunc=True is typically more efficient
for dataarray with a small number of gridpoints.
Returns
-------
N-dimensional array (int)
Length of longest run of True values along dimension
"""
if ufunc_1dim == "auto":
npts = get_npts(da)
ufunc_1dim = npts <= npts_opt
if ufunc_1dim:
rl_long = longest_run_ufunc(da)
else:
d = rle(da, dim=dim)
rl_long = d.max(dim=dim)
return rl_long
def windowed_run_events(da, window, dim="time", ufunc_1dim="auto"):
"""Return the number of runs of a minimum length.
Parameters
----------
da: N-dimensional Xarray data array (boolean)
Input data array
window : int
Minimum run length.
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
ufunc_1dim : optional, one of 'auto' (default), True or False
Use the 1d 'ufunc' version of this function : default (auto) will attempt to select optimal
usage based on number of data points. Using 1D_ufunc=True is typically more efficient
for dataarray with a small number of gridpoints.
Returns
-------
out : N-dimensional xarray data array (int)
Number of distinct runs of a minimum length.
"""
if ufunc_1dim == "auto":
npts = get_npts(da)
ufunc_1dim = npts <= npts_opt
if ufunc_1dim:
out = windowed_run_events_ufunc(da, window)
else:
d = rle(da, dim=dim)
out = (d >= window).sum(dim=dim)
return out
def windowed_run_count(da, window, dim="time", ufunc_1dim="auto"):
"""Return the number of consecutive true values in array for runs at least as long as given duration.
Parameters
----------
da: N-dimensional Xarray data array (boolean)
Input data array
window : int
Minimum run length.
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
ufunc_1dim : optional, one of 'auto' (default), True or False
Use the 1d 'ufunc' version of this function : default (auto) will attempt to select optimal
usage based on number of data points. Using 1D_ufunc=True is typically more efficient
for dataarray with a small number of gridpoints.
Returns
-------
out : N-dimensional xarray data array (int)
Total number of true values part of a consecutive runs of at least `window` long.
"""
if ufunc_1dim == "auto":
npts = get_npts(da)
ufunc_1dim = npts <= npts_opt
if ufunc_1dim:
out = windowed_run_count_ufunc(da, window)
else:
d = rle(da, dim=dim)
out = d.where(d >= window, 0).sum(dim=dim)
return out
def first_run(da, window, dim="time", ufunc_1dim="auto"):
"""Return the index of the first item of a run of at least a given length.
Parameters
----------
----------
arr : N-dimensional Xarray data array (boolean)
Input array
window : int
Minimum duration of consecutive run to accumulate values.
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
ufunc_1dim : optional, one of 'auto' (default), True or False
Use the 1d 'ufunc' version of this function : default (auto) will attempt to select optimal
usage based on number of data points. Using 1D_ufunc=True is typically more efficient
for dataarray with a small number of gridpoints.
Returns
-------
out : N-dimensional xarray data array (int)
Index of first item in first valid run. Returns np.nan if there are no valid run.
"""
if ufunc_1dim == "auto":
npts = get_npts(da)
ufunc_1dim = npts <= npts_opt
if ufunc_1dim:
out = first_run_ufunc(da, window)
else:
dims = list(da.dims)
if "time" not in dims:
da["time"] = da[dim]
da.swap_dims({dim: "time"})
da = da.astype("int")
i = xr.DataArray(np.arange(da[dim].size), dims=dim).chunk({"time": 1})
ind = xr.broadcast(i, da)[0].chunk(da.chunks)
wind_sum = da.rolling(time=window).sum()
out = ind.where(wind_sum >= window).min(dim=dim) - (
window - 1
) # remove window -1 as rolling result index is last element of the moving window
return out
def rle_1d(arr):
"""Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
"""
ia = np.asarray(arr)
n = len(ia)
if n == 0:
e = "run length array empty"
warn(e)
return None, None, None
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element position
rl = np.diff(np.append(-1, i)) # run lengths
pos = np.cumsum(np.append(0, rl))[:-1] # positions
return ia[i], rl, pos
def first_run_1d(arr, window):
"""Return the index of the first item of a run of at least a given length.
Parameters
----------
----------
arr : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
int
Index of first item in first valid run. Returns np.nan if there are no valid run.
"""
v, rl, pos = rle_1d(arr)
ind = np.where(v * rl >= window, pos, np.inf).min()
if np.isinf(ind):
return np.nan
return ind
def longest_run_1d(arr):
"""Return the length of the longest consecutive run of identical values.
Parameters
----------
arr : bool array
Input array
Returns
-------
int
Length of longest run.
"""
v, rl = rle_1d(arr)[:2]
return np.where(v, rl, 0).max()
def windowed_run_count_1d(arr, window):
"""Return the number of consecutive true values in array for runs at least as long as given duration.
Parameters
----------
arr : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
int
Total number of true values part of a consecutive run at least `window` long.
"""
v, rl = rle_1d(arr)[:2]
return np.where(v * rl >= window, rl, 0).sum()
def windowed_run_events_1d(arr, window):
"""Return the number of runs of a minimum length.
Parameters
----------
arr : bool array
Input array
window : int
Minimum run length.
Returns
-------
out : func
Number of distinct runs of a minimum length.
"""
v, rl, pos = rle_1d(arr)
return (v * rl >= window).sum()
def windowed_run_count_ufunc(x, window):
"""Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
array for runs at least | |
<filename>deepchem/models/fcnet.py
"""TensorFlow implementation of fully connected networks.
"""
import logging
import warnings
import time
import numpy as np
import tensorflow as tf
import threading
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
import deepchem as dc
from deepchem.models import KerasModel
from deepchem.models.layers import SwitchedDropout
from deepchem.metrics import to_one_hot
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Activation, Lambda
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union
from deepchem.utils.typing import KerasActivationFn, LossFn, OneOrMany
logger = logging.getLogger(__name__)
class MultitaskClassifier(KerasModel):
"""A fully connected network for multitask classification.
This class provides lots of options for customizing aspects of the model: the
number and widths of layers, the activation functions, regularization methods,
etc.
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
dense layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same width. Wherever the layer width changes, a
simple dense layer will be used even if residual=True.
"""
def __init__(self,
n_tasks: int,
n_features: int,
layer_sizes: Sequence[int] = [1000],
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = "l2",
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[KerasActivationFn] = tf.nn.relu,
n_classes: int = 2,
residual: bool = False,
**kwargs) -> None:
"""Create a MultitaskClassifier.
In addition to the following arguments, this class also accepts
all the keyword arguments from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of
this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight
initialization of each layer. The length of this list should
equal len(layer_sizes). Alternatively this may be a single
value instead of a list, in which case the same value is used
for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The
length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in
which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
n_classes: int
the number of classes
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of dense layers.
"""
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
# Add the input features.
mol_features = Input(shape=(n_features,))
prev_layer = mol_features
prev_size = n_features
next_activation = None
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = prev_layer
if next_activation is not None:
layer = Activation(next_activation)(layer)
layer = Dense(
size,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=weight_stddev),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer)(layer)
if dropout > 0.0:
layer = Dropout(rate=dropout)(layer)
if residual and prev_size == size:
prev_layer = Lambda(lambda x: x[0] + x[1])([prev_layer, layer])
else:
prev_layer = layer
prev_size = size
next_activation = activation_fn
if next_activation is not None:
prev_layer = Activation(activation_fn)(prev_layer)
self.neural_fingerprint = prev_layer
logits = Reshape((n_tasks,
n_classes))(Dense(n_tasks * n_classes)(prev_layer))
output = Softmax()(logits)
model = tf.keras.Model(inputs=mol_features, outputs=[output, logits])
super(MultitaskClassifier, self).__init__(
model,
dc.models.losses.SoftmaxCrossEntropy(),
output_types=['prediction', 'loss'],
**kwargs)
def default_generator(
self,
dataset: dc.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(
-1, self.n_tasks, self.n_classes)
yield ([X_b], [y_b], [w_b])
class MultitaskRegressor(KerasModel):
"""A fully connected network for multitask regression.
This class provides lots of options for customizing aspects of the model: the
number and widths of layers, the activation functions, regularization methods,
etc.
It optionally can compose the model from pre-activation residual blocks, as
described in https://arxiv.org/abs/1603.05027, rather than a simple stack of
dense layers. This often leads to easier training, especially when using a
large number of layers. Note that residual blocks can only be used when
successive layers have the same width. Wherever the layer width changes, a
simple dense layer will be used even if residual=True.
"""
def __init__(self,
n_tasks: int,
n_features: int,
layer_sizes: Sequence[int] = [1000],
weight_init_stddevs: OneOrMany[float] = 0.02,
bias_init_consts: OneOrMany[float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = "l2",
dropouts: OneOrMany[float] = 0.5,
activation_fns: OneOrMany[KerasActivationFn] = tf.nn.relu,
uncertainty: bool = False,
residual: bool = False,
**kwargs) -> None:
"""Create a MultitaskRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
uncertainty: bool
if True, include extra outputs and loss terms to enable the uncertainty
in outputs to be predicted
residual: bool
if True, the model will be composed of pre-activation residual blocks instead
of a simple stack of dense layers.
"""
self.n_tasks = n_tasks
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * (n_layers + 1)
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection):
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
else:
regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
if uncertainty:
if any(d == 0.0 for d in dropouts):
raise ValueError(
'Dropout must be included in every layer to predict uncertainty')
# Add | |
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Alien gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Lien gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Alien1 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Lien1 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Alien2 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Lien3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Alien kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_second kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Alien1 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_third kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Alien2 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_fourth kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Alien invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("sinvite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Alien1 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("tinvite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "Alien2 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("finvite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Admin add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin αlíҽղ Ditambahkan")
except:
pass
print "[Command]Admin add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Admin Remove Executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin αlíҽղ Dihapus")
except:
pass
print "[Command]Admin remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Staff add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Staff αlíҽղ Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner dan Admin Yang bisa Gunain Perintah ini.")
elif "Staff remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff Remove Executing"
_name = msg.text.replace("Staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Staff αlíҽղ Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Dan Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Admin list","admin list","List admin"]:
if admin == []:
cl.sendText(msg.to,"The Admin List Is Empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "╔═════════════════\n║ ☆☞ Admin αlíҽղ ☜☆\n╠═════════════════\n"
for mi_d in admin:
mc += "╠••> " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc + "╚═════════════════")
print "[Command]Admin List executed"
elif msg.text in ["Staff list","staff list","List staff"]:
if admin == []:
cl.sendText(msg.to,"The Admin List Is Empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "╔═════════════════\n║ ☆☞ Admin αlíҽղ ☜☆\n╠═════════════════\n"
for mi_d in admin:
mc += "╠••> " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc + "╚═════════════════")
print "[Command]Staff List executed"
elif msg.text in ["Owner menu","owner menu","Owner Menu"]:
cl.sendText(msg.to,ownerMessage)
elif msg.text in ["Staff menu","staff menu","Staff Menu"]:
cl.sendText(msg.to,staffMessage)
elif msg.text in ["Key","help","Help"]:
cl.sendText(msg.to,helpMessage)
elif msg.text in ["Public menu","public menu","Public Menu"]:
cl.sendText(msg.to,publicMessage)
elif msg.text in ["Admin menu","admin menu","Admin Menu"]:
cl.sendText(msg.to,adminMessage)
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam: " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam: ")+str(txt[1])+" "+str(jmlh + " ","")
tulisan = jmlh * (teks+"\n")
#@reno.a.w
if txt[1] == "on":
if jmlh <= 300:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 300:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Cv2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka","Open"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
| |
<gh_stars>0
#!/bin/bash
"exec" "python" "-u" "$0" "$@"
"""
Build job
Usage:
source $SITEROOT/setup.sh
source $T_DISTREL/AtlasRelease/*/cmt/setup.sh -tag_add=???
buildJob.py -i [sources] -o [libraries]
[sources] : an archive which contains source files.
each file path must be relative to $CMTHOME
[libraries] : an archive which will contain compiled libraries.
each file path is relative to $CMTHOME.
absolute paths in InstallArea are changed to relative paths
except for externals.
Procedure:
* create a tmp dir for 'cmt broadcast'
* expand sources
* make a list of packages
* create requirements in the tmp dir
* do 'cmt broadcast' in the tmp dir
* change absolute paths in InstallArea to be relative
* archive
"""
import os
import re
import sys
import time
import uuid
import getopt
import subprocess
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, HTTPError
from pandawnutil.wnmisc.misc_utils import commands_get_status_output, get_file_via_http, record_exec_directory,\
propagate_missing_sandbox_error
# error code
EC_MissingArg = 10
EC_CMTFailed = 20
EC_NoTarball = 30
print ("--- start ---")
print (time.ctime())
debugFlag = False
sourceURL = 'https://gridui01.usatlas.bnl.gov:25443'
noCompile = False
useCMake = False
# command-line parameters
opts, args = getopt.getopt(sys.argv[1:], "i:o:u:",
["pilotpars","debug","oldPrefix=","newPrefix=",
"directIn","sourceURL=","lfcHost=","envvarFile=",
"useFileStager","accessmode=","copytool=",
"noCompile","useCMake"])
for o, a in opts:
if o == "-i":
sources = a
if o == "-o":
libraries = a
if o == "--debug":
debugFlag = True
if o == "--sourceURL":
sourceURL = a
if o == "--noCompile":
noCompile = True
if o == "--useCMake":
useCMake = True
# dump parameter
try:
print ("sources", sources)
print ("libraries", libraries)
print ("debugFlag", debugFlag)
print ("sourceURL", sourceURL)
print ("noCompile", noCompile)
print ("useCMake", useCMake)
except:
sys.exit(EC_MissingArg)
# save current dir
currentDir = record_exec_directory()
print (time.ctime())
url = '%s/cache/%s' % (sourceURL, sources)
tmpStat, tmpOut = get_file_via_http(full_url=url)
if not tmpStat:
print ("ERROR : " + tmpOut)
propagate_missing_sandbox_error()
sys.exit(EC_NoTarball)
# goto work dir
workDir = currentDir + '/workDir'
print (commands_get_status_output('rm -rf %s' % workDir)[-1])
os.makedirs(workDir)
print ("--- Goto workDir %s ---\n" % workDir)
os.chdir(workDir)
# cmake
if useCMake:
# go back to current dir
os.chdir(currentDir)
print ("--- Checking tarball for CMake ---\n")
os.rename(sources,libraries)
if debugFlag:
# expand
tmpStat = subprocess.call('tar xvfzm {0}'.format(libraries),shell=True)
else:
tmpStat = subprocess.call('tar tvfz {0}'.format(libraries),shell=True)
if tmpStat != 0:
print ("")
print ("ERROR : check with tar tvfz gave non-zero return code")
print ("ERROR : {0} is corrupted".format(sources))
propagate_missing_sandbox_error()
sys.exit(EC_NoTarball)
print ("\n--- finished ---")
print (time.ctime())
# return
sys.exit(0)
# crate tmpdir
tmpDir = str(uuid.uuid4()) + '/cmt'
print ("--- Making tmpDir ---",tmpDir)
os.makedirs(tmpDir)
print ("--- expand source ---")
print (time.ctime())
# expand sources
if sources.startswith('/'):
tmpStat, out = commands_get_status_output('tar xvfzm %s' % sources)
else:
tmpStat, out = commands_get_status_output('tar xvfzm %s/%s' % (currentDir,sources))
print (out)
if tmpStat != 0:
print ("ERROR : {0} is corrupted".format(sources))
sys.exit(EC_NoTarball)
# check if groupArea exists
groupFile = re.sub('^sources','groupArea',sources)
groupFile = re.sub('\.gz$','',groupFile)
useGroupArea = False
if os.path.exists("%s/%s" % (workDir,groupFile)):
useGroupArea = True
# make groupArea
groupAreaDir = currentDir + '/personal/groupArea'
commands_get_status_output('rm -rf %s' % groupAreaDir)
os.makedirs(groupAreaDir)
# goto groupArea
print ("Goto groupAreaDir",groupAreaDir)
os.chdir(groupAreaDir)
# expand groupArea
print (commands_get_status_output('tar xvfm %s/%s' % (workDir,groupFile))[-1])
# make symlink to InstallArea
os.symlink('%s/InstallArea' % workDir, 'InstallArea')
# back to workDir
os.chdir(workDir)
# list packages
packages=[]
for line in out.splitlines():
name = line.split()[-1]
if name.endswith('/cmt/') and not '__panda_rootCoreWorkDir' in name:
# remove /cmt/
name = re.sub('/cmt/$','',name)
packages.append(name)
# create requirements
oFile = open(tmpDir+'/requirements','w')
oFile.write('use AtlasPolicy AtlasPolicy-*\n')
useVersionDir = False
# append user packages
for pak in packages:
# version directory
vmat = re.search('-\d+-\d+-\d+$',pak)
if vmat:
useVersionDir = True
mat = re.search('^(.+)/([^/]+)/([^/]+)$',pak)
if mat:
oFile.write('use %s %s %s\n' % (mat.group(2),mat.group(3),mat.group(1)))
else:
mat = re.search('^(.+)/([^/]+)$',pak)
if mat:
oFile.write('use %s %s\n' % (mat.group(1),mat.group(2)))
else:
oFile.write('use %s\n' % pak)
else:
mat = re.search('^(.+)/([^/]+)$',pak)
if mat:
oFile.write('use %s %s-* %s\n' % (mat.group(2),mat.group(2),mat.group(1)))
else:
oFile.write('use %s\n' % pak)
oFile.close()
# OS release
print ("--- /etc/redhat-release ---")
tmp = commands_get_status_output('cat /etc/redhat-release')[-1]
print (tmp)
match = re.search('(\d+\.\d+[\d\.]*)\s+\([^\)]+\)',tmp)
osRelease = ''
if match is not None:
osRelease = match.group(1)
print ("Release -> %s" % osRelease)
# processor
print ("--- uname -p ---")
processor = commands_get_status_output('uname -p')[-1]
print (processor)
# cmt config
print ("--- CMTCONFIG ---")
cmtConfig = commands_get_status_output('echo $CMTCONFIG')[-1]
print (cmtConfig)
# compiler
print ("--- gcc ---")
tmp = commands_get_status_output('gcc -v')[-1]
print (tmp)
match = re.search('gcc version (\d+\.\d+[^\s]+)',tmp.split('\n')[-1])
gccVer = ''
if match is not None:
gccVer = match.group(1)
print ("gcc -> %s" % gccVer)
# check if g++32 is available
print ("--- g++32 ---")
s32,o32 = commands_get_status_output('which g++32')
print (s32)
print (o32)
# make alias of gcc323 for SLC4
gccAlias = ''
if s32 == 0 and osRelease != '' and osRelease >= '4.0':
# when CMTCONFIG has slc3-gcc323
if cmtConfig.find('slc3-gcc323') != -1:
# unset alias when gcc ver is unknown or already has 3.2.3
if not gccVer in ['','3.2.3']:
# 64bit or not
if processor == 'x86_64':
gccAlias = 'echo "%s -m32 \$*" > g++;' % o32
else:
gccAlias = 'echo "%s \$*" > g++;' % o32
gccAlias += 'chmod +x g++; export PATH=%s/%s:$PATH;' % (workDir,tmpDir)
print ("--- gcc alias ---")
print (" -> %s" % gccAlias)
print ("--- compile ---")
print (time.ctime())
if not useGroupArea:
# append workdir to CMTPATH
env = 'CMTPATH=%s:$CMTPATH' % os.getcwd()
else:
# append workdir+groupArea to CMTPATH
env = 'CMTPATH=%s:%s:$CMTPATH' % (os.getcwd(),groupAreaDir)
# use short basename
symLinkRel = ''
try:
# get tmp dir name
tmpDirName = ''
if 'EDG_TMP' in os.environ:
tmpDirName = os.environ['EDG_TMP']
elif 'OSG_WN_TMP' in os.environ:
tmpDirName = os.environ['OSG_WN_TMP']
else:
tmpDirName = '/tmp'
# make symlink
if 'SITEROOT' in os.environ:
# use /tmp if it is too long. 10 is the length of tmp filename
if len(tmpDirName)+10 > len(os.environ['SITEROOT']):
print ("INFO : use /tmp since %s is too long" % tmpDirName)
tmpDirName = '/tmp'
# make tmp file first
import tempfile
tmpFD,tmpPathName = tempfile.mkstemp(dir=tmpDirName)
os.close(tmpFD)
# change tmp file to symlink
tmpS,tmpO = commands_get_status_output('ln -fs %s %s' % (os.environ['SITEROOT'],tmpPathName))
if tmpS != 0:
print (tmpO)
print ("WARNING : cannot make symlink %s %s" % (os.environ['SITEROOT'],tmpPathName))
# remove
os.remove(tmpPathName)
else:
# compare length
if len(tmpPathName) < len(os.environ['SITEROOT']):
shortCMTPATH = os.environ['CMTPATH'].replace(os.environ['SITEROOT'],tmpPathName)
# set path name
symLinkRel = tmpPathName
else:
print ("WARNING : %s is shorter than %s" % (os.environ['SITEROOT'],tmpPathName))
# remove
os.remove(tmpPathName)
except Exception as e:
print ('WARNING : failed to make short CMTPATH due to %s' % str(e))
# construct command
com = ''
if symLinkRel != '':
com += 'export SITEROOT=%s;export CMTPATH=%s;' % (symLinkRel,shortCMTPATH)
if 'CMTPROJECTPATH' in os.environ and os.environ['SITEROOT'] == os.environ['CMTPROJECTPATH']:
com += 'export CMTPROJECTPATH=%s;' % symLinkRel
com += 'export %s;' % env
com += 'cmt config;'
com += 'source ./setup.sh; source ./setup.sh;'
if gccAlias != '':
com += gccAlias
if useVersionDir:
com += 'export CMTSTRUCTURINGSTYLE=with_version_directory;'
com += 'export TestArea=%s;' % workDir
comConf = com
com += 'env; cmt br cmt config;'
com += 'cmt br make'
comConf += 'cmt br "rm -rf ../genConf";'
comConf += 'cmt br make'
# do cmt under tmp dir
if not noCompile:
print ("cmt:", com)
os.chdir(tmpDir)
if not debugFlag:
status,out = commands_get_status_output(com)
print (out)
# look for error since cmt doesn't set error code when make failed
if status == 0:
try:
for line in out.split('\n')[-3:]:
if line.startswith('make') and re.search('Error \d+$',line) is not None:
status = 1
print ("ERROR: make failed. set status=%d" % status)
break
except Exception:
pass
else:
status = os.system(com)
if status:
print ("ERROR: CMT failed : %d" % status)
sys.exit(EC_CMTFailed)
# copy so for genConf
print ('')
print ("==== copy so")
# sleep for touch
time.sleep(120)
for pak in packages:
try:
# look for so
srcSoDir = '%s/%s/%s' % (workDir,pak,cmtConfig)
dstSoDir = '%s/InstallArea/%s/lib' % (workDir,cmtConfig)
srcSoFiles = os.listdir(srcSoDir)
for srcSoFile in srcSoFiles:
if srcSoFile.endswith('.so') or srcSoFile.endswith('.dsomap'):
# remove symlink
com = "rm -fv %s/%s" % (dstSoDir,srcSoFile)
print (com)
print (commands_get_status_output(com)[-1])
# copy so
com = "cp -v %s/%s %s" % (srcSoDir,srcSoFile,dstSoDir)
print (com)
print (commands_get_status_output(com)[-1])
# update timestamp to prevent creating symlink again
com = "touch %s/%s" % (dstSoDir,srcSoFile)
print (com)
print (commands_get_status_output(com)[-1])
except Exception as e:
print ("ERROR: in copy so : %s" % str(e))
# check lib dir before
com = 'ls -l %s/InstallArea/%s/lib' % (workDir,cmtConfig)
print ("==== %s" % com)
print (commands_get_status_output(com)[-1])
# run make again for genConf
print ("==== run genConf again")
print (comConf)
print (commands_get_status_output(comConf)[-1])
# check lib dir after
com = 'ls -l %s/InstallArea/%s/lib' % (workDir,cmtConfig)
print ("==== %s" % com)
print (commands_get_status_output(com)[-1])
# go back to work dir
os.chdir(workDir)
# change absolute paths in InstallArea to relative paths
fullPathList = []
def reLink(dir,dirPrefix):
try:
# get files
flist=os.listdir(dir)
dirPrefix = dirPrefix+'/..'
# save the current dir
curDir = os.getcwd()
os.chdir(dir)
for item in flist:
# if symbolic link
if os.path.islink(item):
# change full path to relative path
fullPath = os.readlink(item)
# check if it is already processed, to avoid an infinite loop
if fullPath in fullPathList:
continue
| |
psnrs_train.append(float(x[11]))
losses_val.append(float(x[15]))
psnrs_val.append(float(x[17]))
avr_dt = float(lines[-1].split()[5])
bds_dict = {
'near': tf.cast(near, tf.float32),
'far': tf.cast(far, tf.float32),
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
# Short circuit if only rendering out from trained model
if args.render_only:
print('RENDER ONLY')
if args.render_test:
# render_test switches to test poses
images = images[i_test]
else:
# Default is smoother render_poses path
images = None
testsavedir = os.path.join(basedir, expname, 'renderonly_{}_{:06d}'.format(
'test' if args.render_test else 'path', start))
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', render_poses.shape)
rgbs, _,_,_,_ = render_path(render_poses, hwf, args.chunk, render_kwargs_test,
gt_imgs=images, savedir=testsavedir, render_factor=args.render_factor, depth_imgs=render_depths)
print('Done rendering', testsavedir)
imageio.mimwrite(os.path.join(testsavedir, 'video.mp4'),
to8b(rgbs), fps=30, quality=8)
return
# Create optimizer
lrate = args.lrate
if args.lrate_decay > 0:
lrate = tf.keras.optimizers.schedules.ExponentialDecay(lrate,
decay_steps=args.lrate_decay * 1000, decay_rate=0.1)
optimizer = tf.keras.optimizers.Adam(lrate)
models['optimizer'] = optimizer
global_step = tf.compat.v1.train.get_or_create_global_step()
global_step.assign(start)
# Prepare raybatch tensor if batching random rays
N_rand = args.N_rand
use_batching = not args.no_batching
if use_batching:
# For random ray batching.
#
# Constructs an array 'rays_rgb' of shape [N*H*W, 3, 3] where axis=1 is
# interpreted as,
# axis=0: ray origin in world space
# axis=1: ray direction in world space
# axis=2: observed RGB color of pixel
print('get rays')
# get_rays_np() returns rays_origin=[H, W, 3], rays_direction=[H, W, 3]
# for each pixel in the image. This stack() adds a new dimension.
rays = [get_rays_np(H, W, focal, p) for p in poses[:, :3, :4]]
rays = np.stack(rays, axis=0) # [N, ro+rd, H, W, 3]
print('done, concats')
# [N, ro+rd+rgb, H, W, 3]
rays_rgb = np.concatenate([rays, images[:, None, ...]], 1)
# [N, H, W, ro+rd+rgb, 3]
rays_rgb = np.transpose(rays_rgb, [0, 2, 3, 1, 4])
rays_rgb = np.stack([rays_rgb[i]
for i in i_train], axis=0) # train images only
# [(N-1)*H*W, ro+rd+rgb, 3]
rays_rgb = np.reshape(rays_rgb, [-1, 3, 3])
rays_rgb = rays_rgb.astype(np.float32)
print('shuffle rays')
np.random.shuffle(rays_rgb)
print('done')
i_batch = 0
N_iters = 400002
print('Begin')
print('TRAIN views are', i_train)
print('TEST views are', i_test)
print('VAL views are', i_val)
# Summary writers
writer = tf.contrib.summary.create_file_writer(
os.path.join(basedir, 'summaries', expname))
writer.set_as_default()
for i in range(start, N_iters):
time0 = time.time()
depth_img = None
# Sample random ray batch
if use_batching:
print("use_batching")
# Random over all images
batch = rays_rgb[i_batch:i_batch+N_rand] # [B, 2+1, 3*?]
batch = tf.transpose(batch, [1, 0, 2])
# batch_rays[i, n, xyz] = ray origin or direction, example_id, 3D position
# target_s[n, rgb] = example_id, observed color.
batch_rays, target_s = batch[:2], batch[2]
i_batch += N_rand
if i_batch >= rays_rgb.shape[0]:
np.random.shuffle(rays_rgb)
i_batch = 0
else: #default lego
# Random from one image
img_i = np.random.choice(i_train)
target = images[img_i]
pose = poses[img_i, :3, :4]
if N_rand is not None:
rays_o, rays_d, _ = get_rays(H, W, focal, pose)
if i < args.precrop_iters:
dH = int(H//2 * args.precrop_frac)
dW = int(W//2 * args.precrop_frac)
coords = tf.stack(tf.meshgrid(
tf.range(H//2 - dH, H//2 + dH),
tf.range(W//2 - dW, W//2 + dW),
indexing='ij'), -1)
if i < 10:
print('precrop', dH, dW, coords[0,0], coords[-1,-1])
elif args.use_depth :
if use_backgd :
depth_img = depth_imgs[img_i]
coords = tf.stack(tf.meshgrid(
tf.range(H), tf.range(W), indexing='ij'), -1) #[H, W, 2]
else :
depth_img = depth_imgs[img_i]
ii, jj = np.where(np.logical_and(depth_img[...,0]>0, depth_img[...,0]<1000))
coords = tf.stack([ii, jj], -1)
else : ## lego default
coords = tf.stack(tf.meshgrid(
tf.range(H), tf.range(W), indexing='ij'), -1) #[H, W, 2]
coords = tf.reshape(coords, [-1, 2]) # [HxW, 2]
select_inds = np.random.choice(
coords.shape[0], size=[N_rand], replace=False)
select_inds = tf.gather_nd(coords, select_inds[:, tf.newaxis]) # [ray_batch , 2]
rays_o = tf.gather_nd(rays_o, select_inds)
rays_d = tf.gather_nd(rays_d, select_inds)
if args.use_depth:
depth_img = tf.gather_nd(depth_img, select_inds)
batch_rays = tf.stack([rays_o, rays_d, depth_img[...,:3]], 0)
else :
batch_rays = tf.stack([rays_o, rays_d], 0)
target_s = tf.gather_nd(target, select_inds)
##### Core optimization loop #####
with tf.GradientTape() as tape:
# Make predictions for color, disparity, accumulated opacity.
if batch_rays is None :
print("batch_rays is none!!!")
rgb, disp, acc, extras = render(
H, W, focal, chunk=args.chunk, rays=batch_rays,
verbose=i < 10, retraw=True, depth_img=depth_img, **render_kwargs_train)
# rgb, disp, acc, extras = render(
# H, W, focal, chunk=args.chunk, rays=batch_rays,
# verbose=i < 10, retraw=True, depth_img=depth_img[..., 0], **render_kwargs_train)
# Compute MSE loss between predicted and true RGB.
img_loss = img2mse(rgb, target_s)
trans = extras['raw'][..., -1]
loss = img_loss
psnr = mse2psnr(img_loss)
if cal_backgd:
loss_train = img_loss
psnr_train = psnr
else:
loss_train = np.mean(np.square(rgb[target_s<1.] - target_s[target_s<1.]))
psnr_train = -10. * np.log10(loss_train)
# Add MSE loss for coarse-grained model
if 'rgb0' in extras:
img_loss0 = img2mse(extras['rgb0'], target_s)
loss += img_loss0
psnr0 = mse2psnr(img_loss0)
gradients = tape.gradient(loss, grad_vars)
optimizer.apply_gradients(zip(gradients, grad_vars))
dt = time.time()-time0
##### end #####
# Rest is logging
def save_weights(net, prefix, i):
path = os.path.join(
basedir, expname, '{}_{:06d}.npy'.format(prefix, i))
np.save(path, net.get_weights())
print('saved weights at', path)
if i % args.i_weights == 0:
for k in models:
save_weights(models[k], k, i)
if i % args.i_video == 0 and i > 0:
rgbs, disps,_,_ = render_path(
render_poses, hwf, args.chunk, render_kwargs_test)
print('Done, saving', rgbs.shape, disps.shape)
moviebase = os.path.join(
basedir, expname, '{}_spiral_{:06d}_'.format(expname, i))
imageio.mimwrite(moviebase + 'rgb.mp4',
to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(moviebase + 'disp.mp4',
to8b(disps / np.max(disps)), fps=30, quality=8)
if args.use_viewdirs:
render_kwargs_test['c2w_staticcam'] = render_poses[0][:3, :4]
rgbs_still, _,_,_,_ = render_path(
render_poses, hwf, args.chunk, render_kwargs_test)
render_kwargs_test['c2w_staticcam'] = None
imageio.mimwrite(moviebase + 'rgb_still.mp4',
to8b(rgbs_still), fps=30, quality=8)
if i % args.i_testset == 0 and i > 0:
testsavedir = os.path.join(
basedir, expname, 'testset_{:06d}'.format(i))
os.makedirs(testsavedir, exist_ok=True)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
n = 100
# For each set of style and range settings, plot n random points in the box
# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].
#for c, m, zlow, zhigh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]:
if depthimg_mode == 'GT':
_,_, losses, psnrs, times = render_path(_, poses[i_test], hwf, args.chunk, render_kwargs_test,
gt_imgs=images[i_test], savedir=testsavedir, depth_imgs=render_depths)
elif depthimg_mode == 'ESTIMATE':
_,_, losses, psnrs, times = render_path(pc, poses[i_test], hwf, args.chunk, render_kwargs_test,
gt_imgs=images[i_test], savedir=testsavedir, depth_imgs=render_depths)
avr_loss=0
avr_psnr=0
avr_time=0
f = open(testsavedir+"/testlog_"+str(i)+".txt", "a")
for ii in range(len(i_test)):
f.write('iter: {} one_render_time: {:.05f} test_img_i: {} test_loss: {:.7f} test_psnr: {:.4f}\n'\
.format(i, times[ii], i_test[ii],losses[ii],psnrs[ii]))
avr_loss += losses[ii]
avr_psnr += psnrs[ii]
avr_time += times[ii]
avr_loss /= len(i_test)
avr_psnr /= len(i_test)
avr_time /= len(i_test)
f.write('iter: {} avr_train_time: {:.05f} avr_render_time: {:.05f} avr_loss: {:.7f} avr_psnr: {:.4f} stddev_of_psnrs: {:.4f}\n'\
.format(i, avr_dt, avr_time, avr_loss, avr_psnr, statistics.stdev(psnrs)))
f.close()
print('Saved test set')
if i % args.i_print == 0 or i < 10:
print(args.config)
print(expname, i, psnr_train, loss_train, global_step.numpy())
print('iter time {:.05f}'.format(dt))
with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_print):
tf.contrib.summary.scalar('loss', loss)
tf.contrib.summary.scalar('psnr', psnr)
tf.contrib.summary.histogram('tran', trans)
if args.N_importance > 0:
tf.contrib.summary.scalar('psnr0', psnr0)
if i % args.i_img == 0:
val_size = 1
# Log a rendered validation view to Tensorboard
img_val_i_s = np.random.choice(i_val, val_size)
val_losses =[]
val_psnrs=[]
for img_val_i in img_val_i_s:
target = images[img_val_i]
pose = poses[img_val_i, :3, :4]
if args.use_depth:
if depthimg_mode == 'ESTIMATE':
depthmap_estimated, _, _ = pointcloud.make_filled_depthmap(pose, focal, pc, H, W, args.kernel_size)
depthmap = np.stack([depthmap_estimated,depthmap_estimated,depthmap_estimated,depthmap_estimated] ,axis=-1)
rgb, disp, acc, extras = render(
H, W, focal, chunk=args.chunk, c2w=pose, depth_img=depthmap, **render_kwargs_test)
elif depthimg_mode == 'GT':
rgb, disp, acc, extras = render(
H, W, focal, chunk=args.chunk, c2w=pose, depth_img=depth_imgs[img_val_i], **render_kwargs_test)
# plt.figure()
# plt.imshow(depth_imgs[img_val_i])
# plt.figure()
# plt.imshow(target)
# plt.figure()
# plt.imshow(depthmap_estimated)
# plt.show()
if use_backgd :
loss_val = img2mse(rgb, target)
psnr_val = mse2psnr(loss_val)
loss_val = loss_val.numpy()
psnr_val = psnr_val.numpy()
else :
depth = depth_imgs[img_val_i][...,:3]
# depth = depthmap[...,:3]
rgb = tf.where(np.logical_and(depth != args.back_value, depth<1000), rgb, tf.ones(depth.shape))
loss_val = np.mean(np.square(rgb[np.logical_and(depth != args.back_value, depth<1000)] - target[np.logical_and(depth != args.back_value, depth<1000)]))
psnr_val = -10. * np.log10(loss_val)
else:
rgb, disp, acc, extras = render(H, W, focal, chunk=args.chunk, c2w=pose, **render_kwargs_test)
if not cal_backgd:
loss_val = np.mean(np.square(rgb[target<1.] - target[target<1.]))
psnr_val = -10. * np.log10(loss_val)
else :
loss_val = img2mse(rgb, target)
psnr_val = mse2psnr(loss_val)
val_losses.append(loss_val)
val_psnrs.append(psnr_val)
loss_val = statistics.mean(val_losses)
psnr_val = statistics.mean(val_psnrs)
print("train loss:", loss)
print("train loss_train:", loss_train)
print("train psnr:", psnr_train)
print("val loss:", loss_val)
print("val psnr:", psnr_val)
if len(iters_train) == 0 or i > iters_train[-1] :
# Save out the validation image for Tensorboard-free monitoring
testimgdir = os.path.join(basedir, expname, 'tboard_val_imgs')
os.makedirs(testimgdir, exist_ok=True)
imageio.imwrite(os.path.join(testimgdir, '{:06d}.png'.format(i)), to8b(rgb))
imageio.imwrite(os.path.join(testimgdir, 'depth_{:06d}.png'.format(i)), to8b(disp / np.max(disp)))
if args.use_depth and depthimg_mode == 'ESTIMATE':
max_coef = 8.
imageio.imwrite(os.path.join(testimgdir, 'depth_est_{:06d}.png'.format(i)), to8b(depthmap_estimated / max_coef))
iters_train.append(i)
losses_train.append(loss_train)
psnrs_train.append(psnr_train)
| |
= doc(tag)
for attr, val in attrs.items():
assert el.attr(attr) == val
assert el.text() == text
for needle in needles:
self.assertContains(response, needle)
for tag, needle in url_needles.iteritems():
url = doc(tag).text()
self.assertUrlEqual(url, needle)
def test_slug(self):
Addon.objects.get(pk=5299).update(type=amo.ADDON_EXTENSION)
self.assertContains(make_call('addon/5299', version=1.5),
'<slug>%s</slug>' %
Addon.objects.get(pk=5299).slug)
def test_is_featured(self):
self.assertContains(make_call('addon/5299', version=1.5),
'<featured>0</featured>')
c = CollectionAddon.objects.create(
addon=Addon.objects.get(id=5299),
collection=Collection.objects.create())
FeaturedCollection.objects.create(
locale='ja', application=amo.FIREFOX.id, collection=c.collection)
for lang, app, result in [('ja', 'firefox', 1),
('en-US', 'firefox', 0),
('ja', 'android', 0)]:
self.assertContains(make_call('addon/5299', version=1.5,
lang=lang, app=app),
'<featured>%s</featured>' % result)
def test_default_icon(self):
addon = Addon.objects.get(pk=5299)
addon.update(icon_type='')
self.assertContains(make_call('addon/5299'), '<icon size="32"></icon>')
def test_thumbnail_size(self):
addon = Addon.objects.get(pk=5299)
preview = Preview.objects.create(addon=addon)
preview.sizes = {'thumbnail': [200, 150]}
preview.save()
result = make_call('addon/5299', version=1.5)
self.assertContains(result, '<full type="image/png">')
self.assertContains(
result, '<thumbnail type="image/png" width="200" height="150">')
def test_disabled_addon(self):
Addon.objects.get(pk=3615).update(disabled_by_user=True)
response = self.client.get('/en-US/firefox/api/%.1f/addon/3615' %
legacy_api.CURRENT_VERSION)
doc = pq(response.content)
assert doc[0].tag == 'error'
assert response.status_code == 404
def test_addon_with_no_listed_versions(self):
self.make_addon_unlisted(Addon.objects.get(pk=3615))
response = self.client.get('/en-US/firefox/api/%.1f/addon/3615' %
legacy_api.CURRENT_VERSION)
doc = pq(response.content)
assert doc[0].tag == 'error'
assert response.status_code == 404
def test_cross_origin(self):
# Add-on details should allow cross-origin requests.
response = self.client.get('/en-US/firefox/api/%.1f/addon/3615' %
legacy_api.CURRENT_VERSION)
assert response['Access-Control-Allow-Origin'] == '*'
assert response['Access-Control-Allow-Methods'] == 'GET'
# Even those that are not found.
response = self.client.get('/en-US/firefox/api/%.1f/addon/999' %
legacy_api.CURRENT_VERSION)
assert response['Access-Control-Allow-Origin'] == '*'
assert response['Access-Control-Allow-Methods'] == 'GET'
class ListTest(TestCase):
"""Tests the list view with various urls."""
fixtures = ['base/users', 'base/addon_3615', 'base/featured',
'addons/featured', 'bandwagon/featured_collections',
'base/collections']
def test_defaults(self):
"""
This tests the default settings for /list.
i.e. We should get 3 items by default.
"""
response = make_call('list')
self.assertContains(response, '<addon id', 3)
def test_type_filter(self):
"""
This tests that list filtering works.
E.g. /list/recommended/theme gets only shows themes
"""
response = make_call('list/recommended/9/1')
self.assertContains(response, """<type id="9">Theme</type>""", 1)
def test_persona_search_15(self):
response = make_call('list/recommended/9/1', version=1.5)
self.assertContains(response, """<type id="9">Theme</type>""", 1)
def test_limits(self):
"""
Assert /list/recommended/all/1 gets one item only.
"""
response = make_call('list/recommended/all/1')
self.assertContains(response, "<addon id", 1)
def test_version_filter(self):
"""
Assert that filtering by application version works.
E.g.
/list/new/all/1/mac/4.0 gives us nothing
"""
response = make_call('list/new/1/1/all/4.0')
self.assertNotContains(response, "<addon id")
def test_backfill(self):
"""
The /list/recommended should first populate itself with addons in its
locale. If it doesn't reach the desired limit, it should backfill from
the general population of featured addons.
"""
response = make_call('list', lang='fr')
self.assertContains(response, "<addon id", 3)
response = make_call('list', lang='he')
self.assertContains(response, "<addon id", 3)
def test_browser_featured_list(self):
"""
This is a query that a browser would use to show it's featured list.
c.f.: https://bugzilla.mozilla.org/show_bug.cgi?id=548114
"""
response = make_call('list/featured/all/10/Linux/3.7a2pre',
version=1.3)
self.assertContains(response, "<addons>")
def test_average_daily_users(self):
"""Verify that average daily users returns data in order."""
r = make_call('list/by_adu', version=1.5)
doc = pq(r.content)
vals = [int(a.text) for a in doc("average_daily_users")]
sorted_vals = sorted(vals, reverse=True)
assert vals == sorted_vals
def test_adu_no_personas(self):
"""Verify that average daily users does not return Themes."""
response = make_call('list/by_adu')
self.assertNotContains(response, """<type id="9">Theme</type>""")
def test_featured_no_personas(self):
"""Verify that featured does not return Themes."""
response = make_call('list/featured')
self.assertNotContains(response, """<type id="9">Theme</type>""")
def test_json(self):
"""Verify that we get some json."""
r = make_call('list/by_adu?format=json', version=1.5)
assert json.loads(r.content)
def test_unicode(self):
make_call(u'list/featured/all/10/Linux/3.7a2prexec\xb6\u0153\xec\xb2')
class AddonFilterTest(TestCase):
"""Tests the addon_filter, including the various d2c cases."""
fixtures = ['base/appversion']
def setUp(self):
super(AddonFilterTest, self).setUp()
# Start with 2 compatible add-ons.
self.addon1 = addon_factory(version_kw={'max_app_version': '5.0'})
self.addon2 = addon_factory(version_kw={'max_app_version': '6.0'})
self.addons = [self.addon1, self.addon2]
def _defaults(self, **kwargs):
# Default args for addon_filter.
defaults = {
'addons': self.addons,
'addon_type': 'ALL',
'limit': 0,
'app': amo.FIREFOX,
'platform': 'all',
'version': '5.0',
'compat_mode': 'strict',
'shuffle': False,
}
defaults.update(kwargs)
return defaults
def test_basic(self):
addons = addon_filter(**self._defaults())
assert addons == self.addons
def test_limit(self):
addons = addon_filter(**self._defaults(limit=1))
assert addons == [self.addon1]
def test_app_filter(self):
self.addon1.update(type=amo.ADDON_DICT)
addons = addon_filter(
**self._defaults(addon_type=str(amo.ADDON_EXTENSION)))
assert addons == [self.addon2]
def test_platform_filter(self):
file = self.addon1.current_version.files.all()[0]
file.update(platform=amo.PLATFORM_WIN.id)
# Transformers don't know 'bout my files.
self.addons[0] = Addon.objects.get(pk=self.addons[0].pk)
addons = addon_filter(
**self._defaults(platform=amo.PLATFORM_LINUX.shortname))
assert addons == [self.addon2]
def test_version_filter_strict(self):
addons = addon_filter(**self._defaults(version='6.0'))
assert addons == [self.addon2]
def test_version_filter_ignore(self):
addons = addon_filter(**self._defaults(version='6.0',
compat_mode='ignore'))
assert addons == self.addons
def test_version_version_less_than_min(self):
# Ensure we filter out addons with a higher min than our app.
addon3 = addon_factory(version_kw={
'min_app_version': '12.0', 'max_app_version': '14.0'})
addons = self.addons + [addon3]
addons = addon_filter(**self._defaults(addons=addons, version='11.0',
compat_mode='ignore'))
assert addons == self.addons
def test_version_filter_normal_strict_opt_in(self):
# Ensure we filter out strict opt-in addons in normal mode.
addon3 = addon_factory(version_kw={'max_app_version': '7.0'},
file_kw={'strict_compatibility': True})
addons = self.addons + [addon3]
addons = addon_filter(**self._defaults(addons=addons, version='11.0',
compat_mode='normal'))
assert addons == self.addons
def test_version_filter_normal_binary_components(self):
# Ensure we filter out strict opt-in addons in normal mode.
addon3 = addon_factory(version_kw={'max_app_version': '7.0'},
file_kw={'binary_components': True})
addons = self.addons + [addon3]
addons = addon_filter(**self._defaults(addons=addons, version='11.0',
compat_mode='normal'))
assert addons == self.addons
def test_version_filter_normal_compat_override(self):
# Ensure we filter out strict opt-in addons in normal mode.
addon3 = addon_factory()
addons = self.addons + [addon3]
# Add override for this add-on.
compat = CompatOverride.objects.create(guid='three', addon=addon3)
CompatOverrideRange.objects.create(
compat=compat, app=1,
min_version=addon3.current_version.version, max_version='*')
addons = addon_filter(**self._defaults(addons=addons, version='11.0',
compat_mode='normal'))
assert addons == self.addons
def test_locale_preferencing(self):
# Add-ons matching the current locale get prioritized.
addon3 = addon_factory()
addon3.description = {'de': 'Unst Unst'}
addon3.save()
addons = self.addons + [addon3]
translation.activate('de')
addons = addon_filter(**self._defaults(addons=addons))
assert addons == [addon3] + self.addons
translation.deactivate()
class TestGuidSearch(TestCase):
fixtures = ('base/addon_6113', 'base/addon_3615')
# These are the guids for addon 6113 and 3615.
good = ('search/guid:{22870005-adef-4c9d-ae36-d0e1f2f27e5a},'
'{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}')
def setUp(self):
super(TestGuidSearch, self).setUp()
addon = Addon.objects.get(id=3615)
c = CompatOverride.objects.create(guid=addon.guid)
app = addon.compatible_apps.keys()[0]
CompatOverrideRange.objects.create(compat=c, app=app.id)
def test_success(self):
r = make_call(self.good)
dom = pq(r.content)
assert set(['3615', '6113']) == (
set([a.attrib['id'] for a in dom('addon')]))
# Make sure the <addon_compatibility> blocks are there.
assert ['3615'] == [a.attrib['id'] for a in dom('addon_compatibility')]
@patch('waffle.switch_is_active', lambda x: True)
def test_api_caching_locale(self):
addon = Addon.objects.get(pk=3615)
addon.summary = {'en-US': 'Delicious', 'fr': 'Francais'}
addon.save()
# This will prime the cache with the en-US version.
response = make_call(self.good)
self.assertContains(response, '<summary>Delicious')
# We should get back the fr version, not the en-US one.
response = make_call(self.good, lang='fr')
self.assertContains(response, '<summary>Francais')
def test_api_caching_app(self):
response = make_call(self.good)
assert 'en-US/firefox/addon/None/reviews/?src=api' in response.content
assert 'en-US/android/addon/None/reviews/' not in response.content
response = make_call(self.good, app='android')
assert 'en-US/android/addon/None/reviews/?src=api' in response.content
assert 'en-US/firefox/addon/None/reviews/' not in response.content
def test_xss(self):
addon_factory(guid='test@xss', name='<script>alert("test");</script>')
r = make_call('search/guid:test@xss')
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
def test_block_inactive(self):
Addon.objects.filter(id=6113).update(disabled_by_user=True)
r = make_call(self.good)
assert set(['3615']) == (
set([a.attrib['id'] for a in pq(r.content)('addon')]))
def test_block_nonpublic(self):
Addon.objects.filter(id=6113).update(status=amo.STATUS_NOMINATED)
r = make_call(self.good)
assert set(['3615']) == (
set([a.attrib['id'] for a in pq(r.content)('addon')]))
def test_empty(self):
"""
Bug: https://bugzilla.mozilla.org/show_bug.cgi?id=607044
guid:foo, should search for just 'foo' and not empty guids.
"""
r = make_call('search/guid:koberger,')
doc = pq(r.content)
# No addons should exist with guid koberger and the , should not
# indicate that we are searching for null guid.
assert len(doc('addon')) == 0
def test_addon_compatibility(self):
addon = Addon.objects.get(id=3615)
r = make_call('search/guid:%s' % addon.guid)
dom = pq(r.content, parser='xml')
assert len(dom('addon_compatibility')) == 1
assert dom('addon_compatibility')[0].attrib['id'] == '3615'
assert dom('addon_compatibility')[0].attrib['hosted'] == 'true'
assert dom('addon_compatibility guid').text() == addon.guid
assert dom('addon_compatibility > name').text() == ''
assert dom('addon_compatibility version_ranges version_range '
'compatible_applications application appID').text() == (
amo.FIREFOX.guid)
def test_addon_compatibility_not_hosted(self):
c = CompatOverride.objects.create(guid='yeah', name='ok')
CompatOverrideRange.objects.create(app=1, compat=c,
min_version='1', max_version='2',
min_app_version='3',
max_app_version='4')
r = make_call('search/guid:%s' % c.guid)
dom = pq(r.content, parser='xml')
assert len(dom('addon_compatibility')) == 1
assert dom('addon_compatibility')[0].attrib['hosted'] == 'false'
assert 'id' not in dom('addon_compatibility')[0].attrib
assert dom('addon_compatibility guid').text() == c.guid
assert dom('addon_compatibility > name').text() == c.name
cr = c.compat_ranges[0]
assert dom('version_range')[0].attrib['type'] == cr.override_type()
assert dom('version_range > min_version').text() == cr.min_version
assert dom('version_range > max_version').text() == cr.max_version
assert dom('application name').text() == amo.FIREFOX.pretty
assert dom('application application_id').text() == str(amo.FIREFOX.id)
assert dom('application appID').text() == amo.FIREFOX.guid
assert dom('application min_version').text() == cr.min_app_version
assert dom('application max_version').text() == cr.max_app_version
class SearchTest(ESTestCase):
fixtures = ('base/appversion',
'base/addon_6113', 'base/addon_40', 'base/addon_3615',
'base/addon_6704_grapple', 'base/addon_4664_twitterbar',
'base/addon_10423_youtubesearch', 'base/featured')
no_results = """<searchresults total_results="0">"""
def setUp(self):
super(SearchTest, self).setUp()
self.addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
t = Tag.objects.create(tag_text='ballin')
a = Addon.objects.get(pk=3615)
AddonTag.objects.create(tag=t, addon=a)
[addon.save() for addon in self.addons]
self.refresh()
self.url = ('/en-US/firefox/api/%(api_version)s/search/%(query)s/'
'%(type)s/%(limit)s/%(platform)s/%(app_version)s/'
'%(compat_mode)s')
self.defaults = {
'api_version': '1.5',
'type': 'all',
'limit': '30',
'platform': 'Linux',
'app_version': '4.0',
'compat_mode': 'strict',
}
def test_double_escaping(self):
"""
For API < 1.5 we use double escaping in search.
"""
resp = make_call('search/%25E6%2596%25B0%25E5%2590%258C%25E6%2596%'
'2587%25E5%25A0%2582/all/10/WINNT/3.6', version=1.2)
self.assertContains(resp, '<addon id="6113">')
def test_zero_results(self):
"""
Tests that the search API correctly gives us zero results found.
"""
# The following URLs should yield zero results.
zeros = (
'yslow',
'jsonview',
'firebug/3',
'grapple/all/10/Linux',
'jsonview/all/10/Darwin/1.0',
)
for url in zeros:
if not url.startswith('/'):
url = '/en-US/firefox/api/1.2/search/' + url
response = | |
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"status": "Open",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [],
},
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"agent": "pingou",
},
),
):
output = self.app.post(
"/test/pull-request/1/update",
data=data,
follow_redirects=True,
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>PR#1: PR from the feature branch - test\n - "
"Pagure</title>",
output_text,
)
self.assertIn(
'<h4 class="ml-1">\n <div>\n '
'<span class="fa fa-fw text-success fa-arrow-circle-down pt-1"></span>\n '
'<span class="text-success '
'font-weight-bold">#1</span>\n '
'<span class="font-weight-bold">\n '
"PR from the feature branch\n",
output_text,
)
self.assertIn("Request assigned", output_text)
# Pull-Request closed - reset assignee
repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = repo.requests[0]
req.status = "Closed"
req.closed_by_in = 1
self.session.add(req)
self.session.commit()
data = {"csrf_token": csrf_token, "user": None}
with testing.mock_sends(
api.Message(
topic="pagure.request.assigned.reset",
body={
"request": {
"id": 1,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"uid": ANY,
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": None,
"status": "Closed",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [
{
"id": 1,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "**Metadata Update from @pingou**:\n- Request assigned",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
}
],
},
"pullrequest": {
"id": 1,
"uid": ANY,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"full_url": "http://localhost.localdomain/test",
"url_path": "test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": None,
"status": "Closed",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [
{
"id": 1,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "**Metadata Update from @pingou**:\n- Request assigned",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
}
],
},
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"agent": "pingou",
},
),
pagure_messages.PullRequestAssignedResetV1(
topic="pagure.pull-request.assigned.reset",
body={
"pullrequest": {
"id": 1,
"uid": ANY,
"full_url": "http://localhost.localdomain/test/pull-request/1",
"title": "PR from the feature branch",
"branch": "master",
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"branch_from": "feature",
"repo_from": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
],
"milestones": {},
},
"remote_git": None,
"date_created": ANY,
"updated_on": ANY,
"last_updated": ANY,
"closed_at": None,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"assignee": None,
"status": "Closed",
"commit_start": ANY,
"commit_stop": ANY,
"closed_by": None,
"initial_comment": None,
"cached_merge_status": "unknown",
"threshold_reached": None,
"tags": [],
"comments": [
{
"id": 1,
"commit": None,
"tree": None,
"filename": None,
"line": None,
"comment": "**Metadata Update from @pingou**:\n- Request assigned",
"parent": None,
"date_created": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"edited_on": None,
"editor": None,
"notification": True,
"reactions": {},
}
],
},
"project": {
"id": 1,
"name": "test",
"fullname": "test",
"url_path": "test",
"full_url": "http://localhost.localdomain/test",
"description": "test project #1",
"namespace": None,
"parent": None,
"date_created": ANY,
"date_modified": ANY,
"user": {
"name": "pingou",
"fullname": "<NAME>",
"url_path": "user/pingou",
"full_url": "http://localhost.localdomain/user/pingou",
},
"access_users": {
"owner": ["pingou"],
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"access_groups": {
"admin": [],
"commit": [],
"collaborator": [],
"ticket": [],
},
"tags": [],
"priorities": {},
"custom_keys": [],
"close_status": [
"Invalid",
"Insufficient data",
"Fixed",
"Duplicate",
| |
'well_id': 186,
},
u'H19': {
'col_and_row': u'H19',
'row': 8,
'col': 19,
'well_id': 187,
},
u'M24': {
'col_and_row': u'M24',
'row': 13,
'col': 24,
'well_id': 312,
},
u'H11': {
'col_and_row': u'H11',
'row': 8,
'col': 11,
'well_id': 179,
},
u'H12': {
'col_and_row': u'H12',
'row': 8,
'col': 12,
'well_id': 180,
},
u'H13': {
'col_and_row': u'H13',
'row': 8,
'col': 13,
'well_id': 181,
},
u'H14': {
'col_and_row': u'H14',
'row': 8,
'col': 14,
'well_id': 182,
},
u'H15': {
'col_and_row': u'H15',
'row': 8,
'col': 15,
'well_id': 183,
},
u'H16': {
'col_and_row': u'H16',
'row': 8,
'col': 16,
'well_id': 184,
},
u'M23': {
'col_and_row': u'M23',
'row': 13,
'col': 23,
'well_id': 311,
},
u'O20': {
'col_and_row': u'O20',
'row': 15,
'col': 20,
'well_id': 356,
},
u'O21': {
'col_and_row': u'O21',
'row': 15,
'col': 21,
'well_id': 357,
},
u'E24': {
'col_and_row': u'E24',
'row': 5,
'col': 24,
'well_id': 120,
},
u'E20': {
'col_and_row': u'E20',
'row': 5,
'col': 20,
'well_id': 116,
},
u'E21': {
'col_and_row': u'E21',
'row': 5,
'col': 21,
'well_id': 117,
},
u'E22': {
'col_and_row': u'E22',
'row': 5,
'col': 22,
'well_id': 118,
},
u'E23': {
'col_and_row': u'E23',
'row': 5,
'col': 23,
'well_id': 119,
},
u'G19': {
'col_and_row': u'G19',
'row': 7,
'col': 19,
'well_id': 163,
},
u'G18': {
'col_and_row': u'G18',
'row': 7,
'col': 18,
'well_id': 162,
},
u'G17': {
'col_and_row': u'G17',
'row': 7,
'col': 17,
'well_id': 161,
},
u'G16': {
'col_and_row': u'G16',
'row': 7,
'col': 16,
'well_id': 160,
},
u'G15': {
'col_and_row': u'G15',
'row': 7,
'col': 15,
'well_id': 159,
},
u'G14': {
'col_and_row': u'G14',
'row': 7,
'col': 14,
'well_id': 158,
},
u'G13': {
'col_and_row': u'G13',
'row': 7,
'col': 13,
'well_id': 157,
},
u'G12': {
'col_and_row': u'G12',
'row': 7,
'col': 12,
'well_id': 156,
},
u'G11': {
'col_and_row': u'G11',
'row': 7,
'col': 11,
'well_id': 155,
},
u'G10': {
'col_and_row': u'G10',
'row': 7,
'col': 10,
'well_id': 154,
},
u'O17': {
'col_and_row': u'O17',
'row': 15,
'col': 17,
'well_id': 353,
},
u'F1': {
'col_and_row': u'F1',
'row': 6,
'col': 1,
'well_id': 121,
},
u'F2': {
'col_and_row': u'F2',
'row': 6,
'col': 2,
'well_id': 122,
},
u'F3': {
'col_and_row': u'F3',
'row': 6,
'col': 3,
'well_id': 123,
},
u'F4': {
'col_and_row': u'F4',
'row': 6,
'col': 4,
'well_id': 124,
},
u'F5': {
'col_and_row': u'F5',
'row': 6,
'col': 5,
'well_id': 125,
},
u'F6': {
'col_and_row': u'F6',
'row': 6,
'col': 6,
'well_id': 126,
},
u'F7': {
'col_and_row': u'F7',
'row': 6,
'col': 7,
'well_id': 127,
},
u'F8': {
'col_and_row': u'F8',
'row': 6,
'col': 8,
'well_id': 128,
},
u'F9': {
'col_and_row': u'F9',
'row': 6,
'col': 9,
'well_id': 129,
},
u'O19': {
'col_and_row': u'O19',
'row': 15,
'col': 19,
'well_id': 355,
},
u'O18': {
'col_and_row': u'O18',
'row': 15,
'col': 18,
'well_id': 354,
},
u'L6': {
'col_and_row': u'L6',
'row': 12,
'col': 6,
'well_id': 270,
},
u'L7': {
'col_and_row': u'L7',
'row': 12,
'col': 7,
'well_id': 271,
},
u'L4': {
'col_and_row': u'L4',
'row': 12,
'col': 4,
'well_id': 268,
},
u'L5': {
'col_and_row': u'L5',
'row': 12,
'col': 5,
'well_id': 269,
},
u'L2': {
'col_and_row': u'L2',
'row': 12,
'col': 2,
'well_id': 266,
},
u'L3': {
'col_and_row': u'L3',
'row': 12,
'col': 3,
'well_id': 267,
},
u'L1': {
'col_and_row': u'L1',
'row': 12,
'col': 1,
'well_id': 265,
},
u'L8': {
'col_and_row': u'L8',
'row': 12,
'col': 8,
'well_id': 272,
},
u'L9': {
'col_and_row': u'L9',
'row': 12,
'col': 9,
'well_id': 273,
},
u'O10': {
'col_and_row': u'O10',
'row': 15,
'col': 10,
'well_id': 346,
},
u'E9': {
'col_and_row': u'E9',
'row': 5,
'col': 9,
'well_id': 105,
},
u'E8': {
'col_and_row': u'E8',
'row': 5,
'col': 8,
'well_id': 104,
},
u'E5': {
'col_and_row': u'E5',
'row': 5,
'col': 5,
'well_id': 101,
},
u'E4': {
'col_and_row': u'E4',
'row': 5,
'col': 4,
'well_id': 100,
},
u'E7': {
'col_and_row': u'E7',
'row': 5,
'col': 7,
'well_id': 103,
},
u'E6': {
'col_and_row': u'E6',
'row': 5,
'col': 6,
'well_id': 102,
},
u'E1': {
'col_and_row': u'E1',
'row': 5,
'col': 1,
'well_id': 97,
},
u'E3': {
'col_and_row': u'E3',
'row': 5,
'col': 3,
'well_id': 99,
},
u'E2': {
'col_and_row': u'E2',
'row': 5,
'col': 2,
'well_id': 98,
},
u'F18': {
'col_and_row': u'F18',
'row': 6,
'col': 18,
'well_id': 138,
},
u'F19': {
'col_and_row': u'F19',
'row': 6,
'col': 19,
'well_id': 139,
},
u'F12': {
'col_and_row': u'F12',
'row': 6,
'col': 12,
'well_id': 132,
},
u'F13': {
'col_and_row': u'F13',
'row': 6,
'col': 13,
'well_id': 133,
},
u'F10': {
'col_and_row': u'F10',
'row': 6,
'col': 10,
'well_id': 130,
},
u'F11': {
'col_and_row': u'F11',
'row': 6,
'col': 11,
'well_id': 131,
},
u'F16': {
'col_and_row': u'F16',
'row': 6,
'col': 16,
'well_id': 136,
},
u'F17': {
'col_and_row': u'F17',
'row': 6,
'col': 17,
'well_id': 137,
},
u'F14': {
'col_and_row': u'F14',
'row': 6,
'col': 14,
'well_id': 134,
},
u'F15': {
'col_and_row': u'F15',
'row': 6,
'col': 15,
'well_id': 135,
},
u'N12': {
'col_and_row': u'N12',
'row': 14,
'col': 12,
'well_id': 324,
},
u'N13': {
'col_and_row': u'N13',
'row': 14,
'col': 13,
'well_id': 325,
},
u'N10': {
'col_and_row': u'N10',
'row': 14,
'col': 10,
'well_id': 322,
},
u'N11': {
'col_and_row': u'N11',
'row': 14,
'col': 11,
'well_id': 323,
},
u'N16': {
'col_and_row': u'N16',
'row': 14,
'col': 16,
'well_id': 328,
},
u'N17': {
'col_and_row': u'N17',
'row': 14,
'col': 17,
'well_id': 329,
},
u'N14': {
'col_and_row': u'N14',
'row': 14,
'col': 14,
'well_id': 326,
},
u'N15': {
'col_and_row': u'N15',
'row': 14,
'col': 15,
'well_id': 327,
},
u'N18': {
'col_and_row': u'N18',
'row': 14,
'col': 18,
'well_id': 330,
},
u'N19': {
'col_and_row': u'N19',
'row': 14,
'col': 19,
'well_id': 331,
},
u'P24': {
'col_and_row': u'P24',
'row': 16,
'col': 24,
'well_id': 384,
},
u'P21': {
'col_and_row': u'P21',
'row': 16,
'col': 21,
'well_id': 381,
},
u'P20': {
'col_and_row': u'P20',
'row': 16,
'col': 20,
'well_id': 380,
},
u'P23': {
'col_and_row': u'P23',
'row': 16,
'col': 23,
'well_id': 383,
},
u'P22': {
'col_and_row': u'P22',
'row': 16,
'col': 22,
'well_id': 382,
},
u'H24': {
'col_and_row': u'H24',
'row': 8,
'col': 24,
'well_id': 192,
},
u'H21': {
'col_and_row': u'H21',
'row': 8,
'col': 21,
'well_id': 189,
},
u'H20': {
'col_and_row': u'H20',
'row': 8,
'col': 20,
'well_id': 188,
},
u'H23': {
'col_and_row': u'H23',
'row': 8,
'col': 23,
'well_id': 191,
},
u'H22': {
'col_and_row': u'H22',
'row': 8,
'col': 22,
'well_id': 190,
},
u'K3': {
'col_and_row': u'K3',
'row': 11,
'col': 3,
'well_id': 243,
},
u'K2': {
'col_and_row': u'K2',
'row': 11,
'col': 2,
'well_id': 242,
},
u'K1': {
'col_and_row': u'K1',
'row': 11,
'col': 1,
'well_id': 241,
},
u'K7': {
'col_and_row': u'K7',
'row': 11,
'col': 7,
'well_id': 247,
},
u'K6': {
'col_and_row': u'K6',
'row': 11,
'col': 6,
'well_id': 246,
},
u'K5': {
'col_and_row': u'K5',
'row': 11,
'col': 5,
'well_id': 245,
},
u'K4': {
'col_and_row': u'K4',
'row': 11,
'col': 4,
'well_id': 244,
},
u'K9': {
'col_and_row': u'K9',
'row': 11,
'col': 9,
'well_id': 249,
},
u'K8': {
'col_and_row': u'K8',
'row': 11,
'col': 8,
'well_id': 248,
},
u'D10': {
'col_and_row': u'D10',
'row': 4,
'col': 10,
'well_id': 82,
},
u'M11': {
'col_and_row': u'M11',
'row': 13,
'col': 11,
'well_id': 299,
},
u'M10': {
'col_and_row': u'M10',
'row': 13,
'col': 10,
'well_id': 298,
},
u'M13': {
'col_and_row': u'M13',
'row': 13,
'col': 13,
'well_id': 301,
},
u'M12': {
'col_and_row': u'M12',
'row': 13,
'col': 12,
'well_id': 300,
},
u'M15': {
'col_and_row': u'M15',
'row': 13,
'col': 15,
'well_id': 303,
},
u'M14': {
'col_and_row': u'M14',
'row': 13,
'col': 14,
'well_id': 302,
},
u'M17': {
'col_and_row': u'M17',
'row': 13,
'col': 17,
'well_id': 305,
},
u'M16': {
'col_and_row': u'M16',
'row': 13,
'col': 16,
'well_id': 304,
},
u'M19': {
'col_and_row': u'M19',
'row': 13,
'col': 19,
'well_id': 307,
},
u'M18': {
'col_and_row': u'M18',
'row': 13,
'col': 18,
'well_id': 306,
},
u'E19': {
'col_and_row': u'E19',
'row': 5,
'col': 19,
'well_id': 115,
},
u'E18': {
'col_and_row': u'E18',
'row': 5,
'col': 18,
'well_id': 114,
},
u'E11': {
'col_and_row': u'E11',
'row': 5,
'col': 11,
'well_id': 107,
},
u'E10': {
'col_and_row': u'E10',
'row': 5,
'col': 10,
'well_id': 106,
},
u'E13': {
'col_and_row': u'E13',
'row': 5,
'col': 13,
'well_id': 109,
},
u'E12': {
'col_and_row': u'E12',
'row': 5,
'col': 12,
'well_id': 108,
},
u'E15': {
'col_and_row': u'E15',
'row': 5,
'col': 15,
'well_id': 111,
},
u'E14': {
'col_and_row': u'E14',
'row': 5,
'col': 14,
'well_id': 110,
},
u'E17': {
'col_and_row': u'E17',
'row': 5,
'col': 17,
'well_id': 113,
},
u'E16': | |
import dataclasses
import itertools
import operator
import xml.etree.ElementTree as ET
from abc import ABC, abstractmethod
import collections
from copy import deepcopy
from functools import reduce
from typing import Optional, Callable, List, Union, Iterable
import more_itertools.more
import numpy as np
from more_itertools import collapse, first
from pytest import approx
import botbowl
import botbowl.core.forward_model as forward_model
import botbowl.core.pathfinding.python_pathfinding as pf
import botbowl.core.procedure as procedures
from botbowl import Skill, BBDieResult
from examples.tree_search.hashmap import HashMap, create_gamestate_hash
from tests.util import only_fixed_rolls
accumulated_prob_2d_roll = np.array([36, 36, 36, 35, 33, 30, 26, 21, 15, 10, 6, 3, 1]) / 36
HeuristicVector = collections.namedtuple('HeuristicVector', ['score',
'tv_on_pitch',
'ball_position',
'ball_carried',
'ball_marked'])
@dataclasses.dataclass
class MCTS_Info:
probabilities: np.ndarray
actions: List[botbowl.Action]
action_values: np.ndarray
visits: np.ndarray
heuristic: np.ndarray
reward: np.ndarray
state_value: float
class Node(ABC):
parent: Optional['Node']
children: List['Node']
change_log: List[botbowl.core.forward_model.Step]
step_nbr: int # forward model's step count
top_proc: str
def __init__(self, game: botbowl.Game, parent: Optional['Node']):
self.step_nbr = game.get_step()
self.parent = parent
self.children = []
self.change_log = game.trajectory.action_log[parent.step_nbr:] if parent is not None else []
self.top_proc = str(game.get_procedure()) if not game.state.game_over else "GAME_OVER"
assert parent is None or len(self.change_log) > 0
def _connect_child(self, child_node: 'Node'):
assert child_node.parent is self
self.children.append(child_node)
def __repr__(self):
self_type = str(type(self)).split(".")[-1]
return f"{self_type}({self.step_nbr=}, {self.top_proc}"
@staticmethod
def format_proc(proc) -> str:
index_first_parenthesis = str(proc).find('(')
return str(proc)[:index_first_parenthesis]
@abstractmethod
def to_xml(self, parent, weights):
pass
class ActionNode(Node):
team: botbowl.Team
explored_actions: List[botbowl.Action]
is_home: bool
turn: int
info: Optional[MCTS_Info] # Only purpose is to store information for users of SearchTree
simple_hash: str
def __init__(self, game: botbowl.Game, parent: Optional[Node]):
super().__init__(game, parent)
self.team = game.active_team
if game.state.game_over:
self.team = game.state.home_team
self.is_home = self.team is game.state.home_team
assert self.is_home or self.team is game.state.away_team or game.state.game_over
self.explored_actions = []
self.turn = self.team.state.turn
self.info = None
self.simple_hash = create_gamestate_hash(game)
@property
def depth(self):
return len(list(filter(lambda n: type(n) is ActionNode, self.get_all_parents(include_self=False))))
def connect_child(self, child_node: Node, action: botbowl.Action):
super()._connect_child(child_node)
self.explored_actions.append(action)
def get_child_action(self, child: Node) -> botbowl.Action:
assert child in self.children
return self.explored_actions[self.children.index(child)]
def make_root(self):
self.parent = None
self.change_log.clear()
def get_all_parents(self, include_self) -> Iterable[Node]:
if include_self:
yield self
node = self.parent
while node is not None:
yield node
node = node.parent
return
def get_children_from_action(self, action: botbowl.Action) -> Iterable['ActionNode']:
if action not in self.explored_actions:
return []
child = self.children[self.explored_actions.index(action)]
return get_action_node_children(child)
def get_accum_prob(self, *, end_node=None):
"""
:param end_node: node where search ends, if None (default) it ends at the root of the tree
:returns: accumulated probability from chance nodes
"""
node = self
prob = 1.0
while node.parent is not end_node:
if isinstance(node.parent, ChanceNode):
prob *= node.parent.get_child_prob(node)
node = node.parent
return prob
def __repr__(self):
team = "home" if self.is_home else "away"
return f"ActionNode({team}, {self.top_proc}, depth={self.depth}, acc_prob={self.get_accum_prob():.3f}, " \
f"len(children)={len(self.children)})"
@staticmethod
def format_action(action: botbowl.Action) -> str:
pos_str = "" if action.position is None else f" {action.position}"
return f"{action.action_type.name}{pos_str}"
def to_xml(self, parent: Union[ET.Element, ET.SubElement], weights: HeuristicVector):
team = "home" if self.is_home else "away"
tag_attributes = {'proc': Node.format_proc(self.top_proc),
'team': team,
'num_actions': str(len(self.explored_actions))}
this_tag = ET.SubElement(parent, 'action_node',
attrib=tag_attributes)
for action, child_node in zip(self.explored_actions, self.children):
a_index = self.info.actions.index(action)
visits = self.info.visits[a_index]
action_values = np.dot(weights, self.info.action_values[a_index]) / visits
action_tag_attributes = {'action': ActionNode.format_action(action),
'visits': str(visits),
'action_values': f'{action_values:.3f}'}
action_tag = ET.SubElement(this_tag, 'action', attrib=action_tag_attributes)
child_node.to_xml(action_tag, weights)
def get_action_node_children(node: Node) -> Iterable[ActionNode]:
if isinstance(node, ActionNode):
return [node]
elif isinstance(node, ChanceNode):
return more_itertools.collapse(map(get_action_node_children, node.children))
else:
raise ValueError()
class ChanceNode(Node):
"""
Contains different outcomes of dices rolls.
If there are not any connected children,
then step the game from here until there are available actions
could possibly be converted to an ActionNode
"""
child_probability: List[float]
def __init__(self, game: botbowl.Game, parent: Optional[Node]):
super().__init__(game, parent)
self.child_probability = []
def connect_child(self, child_node: Node, prob: float):
super()._connect_child(child_node)
self.child_probability.append(prob)
def get_child_prob(self, child_node: Node) -> float:
assert child_node in self.children
return self.child_probability[self.children.index(child_node)]
def to_xml(self, parent: Union[ET.Element, ET.SubElement], weights: HeuristicVector):
tag_attributes = {'proc': Node.format_proc(self.top_proc)}
this_tag = ET.SubElement(parent, 'chance_node', attrib=tag_attributes)
for prob, child_node in zip(self.child_probability, self.children):
child_node: Union[ChanceNode, ActionNode]
outcome_tag = ET.SubElement(this_tag, 'outcome', attrib={'p': f"{prob:.2f}"})
child_node.to_xml(outcome_tag, weights)
class SearchTree:
game: botbowl.Game
root_node: ActionNode
all_action_nodes: HashMap
current_node: ActionNode
on_every_action_node: Callable[['SearchTree', ActionNode], None]
def __init__(self, game, on_every_action_node=None):
self.game = deepcopy(game)
self.game.home_agent.human = True
self.game.away_agent.human = True
if not self.game.trajectory.enabled:
self.game.enable_forward_model()
self.root_node = ActionNode(game, None)
self.all_action_nodes = HashMap([self.root_node])
self.current_node = self.root_node
self.on_every_action_node = on_every_action_node
if self.on_every_action_node is not None:
self.on_every_action_node(self, self.root_node)
def set_new_root(self, game: botbowl.Game) -> None:
if self.game is game:
raise ValueError("Can't search the tree for its own game object.")
target_node = ActionNode(game, None)
found_node = None
# compare with all nodes that have the same hash
for node in self.all_action_nodes[target_node]:
self.set_game_to_node(node)
diff = self.game.state.compare(game.state)
diff = filter(lambda d: d[:13] != 'state.reports', diff)
diff = list(diff)
if len(diff) == 0:
found_node = node
break
if found_node is None:
self.__init__(game, self.on_every_action_node)
else:
self.root_node = found_node
self.root_node.make_root()
self.set_game_to_node(self.root_node)
self.all_action_nodes = HashMap()
self._look_for_action_nodes(self.root_node) # add all children to the 'self.all_action_nodes'
def set_game_to_node(self, target_node: ActionNode) -> None:
"""Uses forward model to set self.game to the state of Node"""
assert self.current_node.step_nbr == self.game.get_step(), \
f"gamestate {self.game.get_step()} and SearchTree {self.current_node.step_nbr} are not synced, big fault!"
if target_node is self.current_node:
return
if target_node is self.root_node:
self.game.revert(self.root_node.step_nbr)
self.current_node = target_node
return
assert target_node in self.all_action_nodes, "target node is not in SearchTree, major fault"
if self.current_node.step_nbr < target_node.step_nbr \
and self.current_node in itertools.takewhile(lambda n: n.step_nbr >= self.current_node.step_nbr,
target_node.get_all_parents(include_self=False)):
# forward current_node -> target_node
nodes_to_forward = itertools.takewhile(lambda n: n is not self.current_node,
target_node.get_all_parents(include_self=True))
for node in reversed(list(nodes_to_forward)):
self.game.forward(node.change_log)
elif self.current_node.step_nbr > target_node.step_nbr \
and target_node in itertools.takewhile(lambda n: n.step_nbr >= target_node.step_nbr,
self.current_node.get_all_parents(include_self=False)):
self.game.revert(target_node.step_nbr)
else: # not in same branch. We need to revert back to a common node and the forward to target
current_node_parents = set(self.current_node.get_all_parents(include_self=False))
first_common_node = more_itertools.first_true(iterable=target_node.get_all_parents(include_self=True),
pred=lambda n: n in current_node_parents)
self.game.revert(first_common_node.step_nbr)
nodes_to_forward = itertools.takewhile(lambda n: n is not first_common_node,
target_node.get_all_parents(include_self=True))
for node in reversed(list(nodes_to_forward)):
self.game.forward(node.change_log)
self.current_node = target_node
assert target_node.step_nbr == self.game.get_step(), f"{target_node.step_nbr} != {self.game.get_step()}"
def expand_action_node(self, node: ActionNode, action: botbowl.Action) -> List[ActionNode]:
assert action not in node.explored_actions, f"{action} has already been explored in this node"
assert node in self.all_action_nodes, f"{node} is not in all_action_nodes"
self.set_game_to_node(node)
new_node = expand_action(self.game, action, node)
node.connect_child(new_node, action)
self.set_game_to_node(self.root_node)
# find all newly added action nodes
return self._look_for_action_nodes(new_node)
def _look_for_action_nodes(self, node: Node) -> List[ActionNode]:
new_action_nodes = []
if isinstance(node, ActionNode):
assert node not in self.all_action_nodes
new_action_nodes.append(node)
self.all_action_nodes.add(node)
if self.on_every_action_node is not None:
self.on_every_action_node(self, node)
for child_node in node.children:
new_action_nodes.extend(self._look_for_action_nodes(child_node))
return new_action_nodes
def to_xml(self, weights: HeuristicVector = None) -> ET.ElementTree:
if weights is None:
weights = HeuristicVector(score=1, tv_on_pitch=0, ball_position=0, ball_carried=0, ball_marked=0)
root = ET.Element('search_tree')
self.root_node.to_xml(root, weights)
if hasattr(ET, 'indent'):
ET.indent(root)
return ET.ElementTree(root)
def expand_action(game: botbowl.Game, action: botbowl.Action, parent: ActionNode) -> Node:
"""
:param game: game object used for calculations. Will be reverted to original state.
:param action: action to be evaluated.
:param parent: parent node
:returns - list of tuples containing (Steps, probability) for each possible outcome.
- probabilities sums to 1.0
Not called recursively
"""
# noinspection PyProtectedMember
assert game._is_action_allowed(action)
assert game.trajectory.enabled
game.config.fast_mode = False
with only_fixed_rolls(game):
game.step(action)
return expand_none_action(game, parent)
def get_expanding_function(proc, moving_handled, pickup_handled) -> Optional[Callable[[botbowl.Game, Node], Node]]:
proc_type = type(proc)
if proc_type in {procedures.Dodge, procedures.GFI} and not moving_handled:
return expand_moving
elif proc_type is procedures.Pickup and not pickup_handled and proc.roll is None:
return expand_pickup
elif proc_type is procedures.Block and proc.roll is None and proc.gfi is False:
return expand_block
elif proc_type is procedures.Armor:
return expand_armor
elif proc_type is procedures.Injury:
return expand_injury
elif proc_type is procedures.Bounce:
return expand_bounce
elif proc_type is procedures.Catch:
return expand_catch
elif proc_type is procedures.ThrowIn:
return expand_throw_in
elif proc_type is procedures.PreKickoff:
return handle_ko_wakeup
elif proc_type is procedures.ClearBoard:
return handle_sweltering_heat
else:
return None
# saved for later
# procedures.Foul
# procedures.KickoffTable
# procedures.PassAttempt
# procedures.Intercept
# procedures.Scatter
def expand_none_action(game: botbowl.Game, parent: Node, moving_handled=False, pickup_handled=False) -> Node:
"""
:param game: the game state is changed during expansion but restored to state of argument 'parent'
:param parent: shall represent the current state of argument 'game'. game state is restored to parent.step_nbr
:param moving_handled:
:param pickup_handled:
:returns: A subclass of Node:
- ChanceNode in a nestled structure with multiple ActionNode as leaf nodes.
- ActionNode if only one possible outcome.
param game is changed but restored to initial state af
Called recursively.
"""
while len(game.state.available_actions) == 0 and not game.state.game_over:
proc = game.get_procedure()
expand_func = get_expanding_function(proc, moving_handled, pickup_handled)
if expand_func is not None:
assert len(botbowl.D6.FixedRolls) | |
. .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_quark(klass):
"""
Finalize quark's class attibutes.
Finalization can only proceed when all quark classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.quark_class('Charm')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_quark_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color):
"""
Anticharm quark initializer.
Parameters:
color QCD color charge.
"""
Quark.__init__(self, color)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"({self.color_charge!r})"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print anticharm quark state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Quark.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Strange Class
# -----------------------------------------------------------------------------
@Quark.subclass()
class Strange(Quark):
""" Strange quark class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.STRANGE_QUARK
Name = "strange"
Symbol = default_encoder('$sm(s)')
RestMass = 96.0 # MeV/c^2
ElecCharge = ElectricCharge(-1, 3) #
QSpin = SpinQuantumNumber(1, 2) # intrinsic spin number
Generation = 2
Strangeness = -1
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_quark(klass):
"""
Finalize quark's class attibutes.
Finalization can only proceed when all quark classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.quark_class('AntiStrange')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_quark_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color):
"""
Strange quark initializer.
Parameters:
color QCD color charge.
"""
Quark.__init__(self, color)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"({self.color_charge!r})"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print strange quark state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Quark.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# AnitStrange Class
# -----------------------------------------------------------------------------
@Quark.subclass()
class AntiStrange(Quark):
""" AntiStrange quark class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.STRANGE_ANTIQUARK
Name = "antistrange"
Symbol = default_encoder('$sm(s-bar)')
RestMass = 96.0 # MeV/c^2
ElecCharge = ElectricCharge(1, 3) #
QSpin = SpinQuantumNumber(1, 2) # intrinsic spin number
Generation = 2
Strangeness = 1
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_quark(klass):
"""
Finalize quark's class attibutes.
Finalization can only proceed when all quark classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.quark_class('Strange')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_quark_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color):
"""
Antistrange quark initializer.
Parameters:
color QCD color charge.
"""
Quark.__init__(self, color)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"({self.color_charge!r})"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print antistrange quark state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Quark.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Top Class
# -----------------------------------------------------------------------------
@Quark.subclass()
class Top(Quark):
""" Top quark class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.TOP_QUARK
Name = "top"
Symbol = default_encoder('$sm(t)')
RestMass = 173100.0
ElecCharge = ElectricCharge(2, 3) #
QSpin = SpinQuantumNumber(1, 2) # intrinsic spin number
Generation = 3
Topness = 1
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_quark(klass):
"""
Finalize quark's class attibutes.
Finalization can only proceed when all quark classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.quark_class('AntiTop')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_quark_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color):
"""
Top quark initializer.
Parameters:
color QCD color charge.
"""
Quark.__init__(self, color)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"({self.color_charge!r})"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print top quark state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Quark.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# AnitTop Class
# -----------------------------------------------------------------------------
@Quark.subclass()
class AntiTop(Quark):
""" AntiTop quark class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.TOP_ANTIQUARK
Name = "antitop"
Symbol = default_encoder('$sm(t-bar)')
RestMass = 173100.0
ElecCharge = ElectricCharge(-2, 3) #
QSpin = SpinQuantumNumber(1, 2) # intrinsic spin number
Generation = 3
Topness = -1
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_quark(klass):
"""
Finalize quark's class attibutes.
Finalization can only proceed when all quark classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.quark_class('Top')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_quark_properties(indent=indent, **print_kwargs)
| |
# Class: XYZFile
# used for getUncertaintyDEM
# by <NAME>, Jul 28 2016
#
# Class: AmpcoroffFile
# manipulating the ampcor outpuf (and translating it into a geotiff)
# by <NAME>, Jul 10 2018
import numpy as np
from carst.libraster import SingleRaster
from scipy.interpolate import griddata
from scipy.stats import gaussian_kde
import pickle
import matplotlib.pyplot as plt
class DuoZArray:
def __init__(self, z1=None, z2=None, ini=None):
self.z1 = z1
self.z2 = z2
self.ini = ini
self.signal_idx = None
def OutlierDetection2D(self, thres_sigma=3.0, plot=True):
x = self.z1
y = self.z2
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
thres_multiplier = np.e ** (thres_sigma ** 2 / 2) # normal dist., +- sigma number
thres = max(z) / thres_multiplier
idx = z >= thres
self.signal_idx = idx
if plot:
pt_style = {'s': 5, 'edgecolor': None}
ax_center = [x[idx].mean(), y[idx].mean()]
ax_halfwidth = max([max(x) - x[idx].mean(),
x[idx].mean() - min(x),
max(y) - y[idx].mean(),
y[idx].mean() - min(y)]) + 1
plt.subplot(121)
plt.scatter(x, y, c=z, **pt_style)
plt.scatter(x[~idx], y[~idx], c='xkcd:red', **pt_style)
plt.axis('scaled')
plt.xlim([ax_center[0] - ax_halfwidth, ax_center[0] + ax_halfwidth])
plt.ylim([ax_center[1] - ax_halfwidth, ax_center[1] + ax_halfwidth])
plt.ylabel('Offset-Y (pixels)')
plt.xlabel('Offset-X (pixels)')
plt.subplot(122)
plt.scatter(x, y, c=z, **pt_style)
plt.scatter(x[~idx], y[~idx], c='xkcd:red', **pt_style)
plt.axis('scaled')
plt.xlim([min(x[idx]) - 1, max(x[idx]) + 1])
plt.ylim([min(y[idx]) - 1, max(y[idx]) + 1])
plt.savefig(self.ini.velocorrection['label_bedrock_histogram'] + '_vx-vs-vy.png', format='png', dpi=200)
plt.clf()
def HistWithOutliers(self, which=None):
if which == 'x':
x = self.z1
pnglabel = '_vx.png'
elif which == 'y':
x = self.z2
pnglabel = '_vy.png'
else:
raise ValueError('Please indicate "x" or "y" for your histogram.')
r_uniq, r_uniq_n = np.unique(x, return_counts=True)
b_uniq, b_uniq_n = np.unique(x[self.signal_idx], return_counts=True)
bar_w = min(np.diff(r_uniq))
lbound = min(x[self.signal_idx]) - np.std(x)
rbound = max(x[self.signal_idx]) + np.std(x)
N_outside_lbound_red = int(sum(x < lbound))
N_outside_rbound_red = int(sum(x > rbound))
plt.bar(r_uniq, r_uniq_n, width=bar_w, color='xkcd:red')
plt.bar(b_uniq, b_uniq_n, width=bar_w, color='xkcd:blue')
plt.xlim([lbound, rbound])
title_str = 'Red points outside (L|R): {}|{}'.format(N_outside_lbound_red, N_outside_rbound_red)
plt.title(title_str)
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.savefig(self.ini.velocorrection['label_bedrock_histogram'] + pnglabel, format='png', dpi=200)
plt.clf()
def VeloCorrectionInfo(self):
a = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date'])
b = SingleRaster(self.ini.imagepair['image2'], date=self.ini.imagepair['image2_date'])
datedelta = b.date - a.date
geot = a.GetGeoTransform()
xres = geot[1]
yres = geot[5]
x_culled = self.z1[self.signal_idx]
y_culled = self.z2[self.signal_idx]
self.z1.MAD_median = np.median(x_culled)
self.z1.MAD_std = np.std(x_culled, ddof=1)
self.z1.MAD_mean = np.mean(x_culled)
self.z2.MAD_median = np.median(y_culled)
self.z2.MAD_std = np.std(y_culled, ddof=1)
self.z2.MAD_mean = np.mean(y_culled)
vx_zarray_velo = self.z1[:] * abs(xres) / datedelta.days
vx_zarray_velo.MAD_median = self.z1.MAD_median * abs(xres) / datedelta.days
vx_zarray_velo.MAD_std = self.z1.MAD_std * abs(xres) / datedelta.days
vx_zarray_velo.MAD_mean = self.z1.MAD_mean * abs(xres) / datedelta.days
vy_zarray_velo = self.z2[:] * abs(yres) / datedelta.days
vy_zarray_velo.MAD_median = self.z2.MAD_median * abs(yres) / datedelta.days
vy_zarray_velo.MAD_std = self.z2.MAD_std * abs(yres) / datedelta.days
vy_zarray_velo.MAD_mean = self.z2.MAD_mean * abs(yres) / datedelta.days
with open(self.ini.velocorrection['label_logfile'], 'w') as f:
f.write( 'Total points over bedrock = {:6n}\n'.format(self.z1.size) )
f.write( '-------- Unit: Pixels --------\n')
f.write( 'median_x_px = {:6.3f}\n'.format(float(self.z1.MAD_median)) )
f.write( 'median_y_px = {:6.3f}\n'.format(float(self.z2.MAD_median)) )
f.write( 'std_x_px = {:6.3f}\n'.format(float(self.z1.MAD_std)) )
f.write( 'std_y_px = {:6.3f}\n'.format(float(self.z2.MAD_std)) )
f.write( 'mean_x_px = {:6.3f}\n'.format(float(self.z1.MAD_mean)) )
f.write( 'mean_y_px = {:6.3f}\n'.format(float(self.z2.MAD_mean)) )
f.write( '-------- Unit: Velocity (L/T; most likely m/day) --------\n')
f.write( 'median_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_median)) )
f.write( 'median_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_median)) )
f.write( 'std_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_std)) )
f.write( 'std_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_std)) )
f.write( 'mean_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_mean)) )
f.write( 'mean_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_mean)) )
return vx_zarray_velo, vy_zarray_velo
class ZArray(np.ndarray):
# A subclass from ndarray, with some new attributes and fancier methods for our purposes
# please see
# https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html
# for more details.
#WARNING: NO NANs SHOULD BE FOUND IN ZArray !!! IT CAN GIVE YOU A BAD RESULT !!!
def __new__(cls, input_array):
# For now input_array should be a 1-d array
# Input array is an already formed ndarray instance
# We need first to cast to be our class type
obj = np.asarray(input_array).view(cls)
obj.MAD_idx = None
obj.MAD_mean = None
obj.MAD_median = None
obj.MAD_std = None
# obj.signal_val = None
# obj.signal_n = None
obj.signal_array = None
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.MAD_idx = getattr(obj, 'MAD_idx', None)
self.MAD_mean = getattr(obj, 'MAD_mean', None)
self.MAD_median = getattr(obj, 'MAD_median', None)
self.MAD_std = getattr(obj, 'MAD_std', None)
# self.signal_val = getattr(obj, 'signal_val', None)
# self.signal_n = getattr(obj, 'signal_n', None)
self.signal_array = getattr(obj, 'signal_array', None)
# =============================================================================================
# ==== The following functions represent new functions developed from =========================
# ==== StatisticOutput and HistWithOutliers. ==================================================
# =============================================================================================
def MADStats(self):
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.size <= 3:
print('WARNING: there are too few Z records (<= 3). Aborting the calculation.')
return [], np.nan, np.nan, np.nan
else:
val_median = np.median(self)
val_mad = mad(self)
lbound = val_median - 3. * val_mad
ubound = val_median + 3. * val_mad
idx = np.logical_and(self >= lbound, self <= ubound)
self.MAD_idx = idx
self.MAD_mean = np.mean(self[idx])
self.MAD_median = np.median(self[idx])
self.MAD_std = np.std(self[idx], ddof=1)
def MADHist(self, pngname):
nbins = len(self) // 4 + 1
nbins = 201 if nbins > 201 else nbins
bins = np.linspace(min(self), max(self), nbins)
plt.hist(self, bins=bins, color='xkcd:red')
plt.hist(self[self.MAD_idx], bins=bins, color='xkcd:blue')
plt.ylabel('N')
plt.xlabel('Value (pixel value unit)')
plt.savefig(pngname, format='png')
plt.cla()
# =============================================================================================
# ==== The functions above represent new functions developed from =============================
# ==== StatisticOutput and HistWithOutliers. ==================================================
# =============================================================================================
# =============================================================================================
# ==== The following functions are designed firstly for the functions in the class XYZFile ====
# ==== and later is has modified to a QGIS processing scripts called MAD_outlier_filter.py ====
# ==== now we have copied them back. ==========================================================
# =============================================================================================
# <NAME> on Oct 25, 2018, added the background correction
# the default of mad_multiplier was 3.0
# background correction redesigned on Nov 9, 2018 using more information from the PX
def StatisticOutput(self, plot=True, pngname=None, ini=None):
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.size == 0:
print('WARNING: there is no Z records.')
return [], np.nan, np.nan, np.nan
else:
# if ini is not None:
# ref_raster = SingleRaster(ini.imagepair['image1'])
# -> to be continued
mad_multiplier = ini.noiseremoval['peak_detection']
uniq, uniq_n = np.unique(self, return_counts=True)
uniq, uniq_n = fill_with_zero(uniq, uniq_n, ini.pxsettings['oversampling'])
uniq_n_est, _, _ = backcor(uniq, uniq_n, order=ini.noiseremoval['backcor_order'])
background_mad = mad(uniq_n - uniq_n_est) # this is actually the noise level
if background_mad == 0:
background_mad = np.median(abs(uniq_n - uniq_n_est))
print("Use the median of abs(uniq_n - uniq_n_est) as one SNR level since mad = 0")
background_threshold = uniq_n_est + mad_multiplier * background_mad
signal_idx = np.argwhere(uniq_n >= background_threshold)
signal_idx = np.ndarray.flatten(signal_idx)
signal_val = uniq[signal_idx]
# self.signal_val = uniq[signal_idx]
signal_n = uniq_n[signal_idx]
# self.signal_n = uniq_n[signal_idx]
self.signal_array = np.repeat(signal_val, signal_n.astype(int))
self.MAD_mean = self.signal_array.mean()
self.MAD_median = np.median(self.signal_array)
self.MAD_std = self.signal_array.std(ddof=1)
# offset_median = np.median(self.signal_array)
# offset_mad = mad(self.signal_array)
# if offset_mad == 0:
# # the case when over half of the numbers are at the median number,
# # we use the Median absolute deviation around the mean instead of around the median.
# offset_mad = 1.482 * np.median(abs(self.signal_array - np.mean(self.signal_array)))
# lbound = offset_median - mad_multiplier * offset_mad
# ubound = offset_median + mad_multiplier * offset_mad
# self.MAD_idx = np.logical_and(self.signal_array > lbound, self.signal_array < ubound)
# trimmed_numlist = self.signal_array[self.MAD_idx]
# self.MAD_mean = trimmed_numlist.mean()
# self.MAD_median = np.median(trimmed_numlist)
# self.MAD_std = trimmed_numlist.std(ddof=1)
if plot == True and pngname is not None:
self.VerifyBackcor(pngname, uniq, uniq_n, uniq_n_est, background_threshold)
self.HistWithOutliers(pngname)
pickle.dump(self, open(pngname.replace('.png', '.p'), 'wb'))
# return idx2, trimmed_numlist.mean(), np.median(trimmed_numlist), trimmed_numlist.std(ddof=1)
def VerifyBackcor(self, pngname, uniq, uniq_n, uniq_n_est, background_threshold):
import matplotlib.pyplot as plt
pngname = pngname.replace('.png', '-backcor.png')
plt.plot(uniq, uniq_n, label='Histogram', color='xkcd:plum')
plt.plot(uniq, uniq_n_est, label='Background', color='xkcd:lightgreen')
plt.plot(uniq, background_threshold, label='Detection Threshold', color='xkcd:coral')
# plt.xlim([min(uniq), max(uniq)])
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.legend(loc='best')
plt.savefig(pngname, format='png', dpi=200)
plt.cla()
def HistWithOutliers(self, pngname, histogram_bound=10):
import matplotlib.pyplot as plt
nbins = len(self) // 4 + 1
nbins = 201 if nbins > 201 else nbins
lbound = min(self) if (min(self) >= -histogram_bound) or (np.mean(self) < -histogram_bound) else -histogram_bound
rbound = max(self) if (max(self) <= histogram_bound) or (np.mean(self) > histogram_bound) else histogram_bound
if lbound >= rbound:
lbound = min(self)
rbound = max(self)
bins = np.linspace(lbound, rbound, nbins)
# trimmed_numlist = self.signal_array[self.MAD_idx]
trimmed_numlist = self.signal_array
N_outside_lbound_red = int(sum(self < lbound))
N_outside_rbound_red = int(sum(self > rbound))
N_outside_lbound_blue = int(sum(trimmed_numlist < lbound))
N_outside_rbound_blue = int(sum(trimmed_numlist > rbound))
title_str = '[Red|Blue] L outside: [{}|{}] R outside: [{}|{}]'.format(N_outside_lbound_red, N_outside_lbound_blue, N_outside_rbound_red, N_outside_rbound_blue)
# plot histograms
plt.hist(self, bins=bins, color=[0.95, 0.25, 0.1])
plt.hist(trimmed_numlist, bins=bins, color=[0.1, 0.25, 0.95])
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.title(title_str)
plt.savefig(pngname, format='png', dpi=200)
plt.cla()
# =============================================================================================
# ==== The functions above are designed firstly for the functions in the class XYZFile ========
# ==== and later is has modified to a QGIS processing scripts called MAD_outlier_filter.py ====
# ==== now we have copied them back. ==========================================================
# =============================================================================================
class XYZFile:
def __init__(self, fpath=None, refpts_path=None, dem_path=None):
self.fpath = fpath
self.refpts_path = refpts_path
self.dem_path = dem_path
self.data = None
self.diffval = None
self.diffval_trimmed = None
def Read(self):
"""
self.data will be usually a 3- or 4-column np.array
column 1: easting
column 2: northing
column 3: height of the 1st group (usually reference points)
column 4: height of the 2nd group (usually DEM points made from grdtrack)
"""
self.data = np.loadtxt(self.fpath)
def StatisticOutput(self, pngname):
# for getUncertaintyDEM
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.data.size == 0:
print('NOTE: ' + self.dem_path + ' does not cover any ref points.')
return [self.dem_path, '', '', '', '', '', '', self.refpts_path]
elif self.data.shape[1] == 4:
idx = ~np.isnan(self.data[:, 3])
self.diffval = self.data[idx, 3] - self.data[idx, 2]
offset_median = np.median(self.diffval)
offset_mad = mad(self.diffval)
lbound = offset_median - 3. * offset_mad
ubound = offset_median + 3. * offset_mad
idx2 = np.logical_and(self.diffval > lbound, self.diffval < ubound)
self.diffval_trimmed = self.diffval[idx2]
# The return value is ready for CsvTable.SaveData method.
# ['filename', 'date', 'uncertainty', 'mean_offset_wrt_refpts', \
# 'trimmed_N', 'trimming_lb', 'trimming_up', 'refpts_file']
# 'date' is an empty string since we don't specify any date string in .xyz file.
self.HistWithOutliers(pngname)
return [self.dem_path, '', self.diffval_trimmed.std(ddof=1), self.diffval_trimmed.mean(), \
len(self.diffval_trimmed), lbound, ubound, self.refpts_path]
elif self.data.shape[1] == 3:
print("Not yet designed.")
return []
else:
print("This program currently doesn't support the xyz file whose column number is not 3 or 4.")
return []
def HistWithOutliers(self, pngname):
# for getUncertaintyDEM
import matplotlib.pyplot as plt
nbins = len(self.diffval) // 5
nbins = 200 if nbins > 200 | |
<reponame>rambasnet/MAKE2
#Boa:MiniFrame:MDIChildTextMining
#-----------------------------------------------------------------------------
# Name: MDIChildEmails.py
# Purpose:
#
# Author: <NAME>
#
# Created: 2007/11/01
# Last Modified: 7/2/2009
# RCS-ID: $Id: MDIChildEmails.py,v 1.5 2008/03/17 04:18:38 rbasnet Exp $
# Copyright: (c) 2007
# Licence: All Rights Reserved.
#-----------------------------------------------------------------------------
import wx, sys, os, time
import re, string
import binascii
from wx.lib.anchors import LayoutAnchors
import wx.lib.mixins.listctrl as listmix
from SqliteDatabase import *
import Globals
import PlatformMethods
import CommonFunctions
import Constants
import DBFunctions
import EmailUtilities
import images
import dlgEmailMessageViewer
from Search import *
def create(parent):
return MDIChildTextMining(parent)
class CustomListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
[wxID_MDICHILDTEXTMINING, wxID_MDICHILDTEXTMININGBTNBATCHSEARCH,
wxID_MDICHILDTEXTMININGBTNDISPLAYTOPKEYWORDS,
wxID_MDICHILDTEXTMININGBTNDISPLAYTOPPHONES,
wxID_MDICHILDTEXTMININGBTNEXPORTALLTERMS,
wxID_MDICHILDTEXTMININGBTNEXPORTSEARCHRESULTS,
wxID_MDICHILDTEXTMININGBTNEXPORTSTEMMEDTERMS,
wxID_MDICHILDTEXTMININGBTNEXPORTTOPKEYWORDS,
wxID_MDICHILDTEXTMININGBTNEXPORTTOPPHONES,
wxID_MDICHILDTEXTMININGBTNPREPROCESSING,
wxID_MDICHILDTEXTMININGBTNSEARCHDOCUMENTS,
wxID_MDICHILDTEXTMININGCHOICEPAGENUM, wxID_MDICHILDTEXTMININGLBLTOTALRESULTS,
wxID_MDICHILDTEXTMININGNOTEBOOKTEXTMINING,
wxID_MDICHILDTEXTMININGPANPREPROCESSING, wxID_MDICHILDTEXTMININGPANREPORTS,
wxID_MDICHILDTEXTMININGPANSEARCH, wxID_MDICHILDTEXTMININGPANTOPTERMS,
wxID_MDICHILDTEXTMININGSTATICBOX1, wxID_MDICHILDTEXTMININGSTATICBOX2,
wxID_MDICHILDTEXTMININGSTATICBOXSEARCH, wxID_MDICHILDTEXTMININGSTATICTEXT2,
wxID_MDICHILDTEXTMININGSTATICTEXT3, wxID_MDICHILDTEXTMININGSTATICTEXT5,
wxID_MDICHILDTEXTMININGSTATICTEXT6, wxID_MDICHILDTEXTMININGTXTTOPKEYWORDS,
wxID_MDICHILDTEXTMININGTXTTOPPHONES,
] = [wx.NewId() for _init_ctrls in range(27)]
class MDIChildTextMining(wx.MDIChildFrame, listmix.ColumnSorterMixin):
def _init_coll_notebookTextMining_Pages(self, parent):
# generated method, don't edit
parent.AddPage(imageId=-1, page=self.panPreprocessing, select=False,
text=u'Preprocessing')
parent.AddPage(imageId=-1, page=self.panSearch, select=False,
text=u'Search')
parent.AddPage(imageId=-1, page=self.panTopTerms, select=False,
text=u'Top: Terms | Phone Numbers')
parent.AddPage(imageId=-1, page=self.panReports, select=True,
text=u'Reports')
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.MDIChildFrame.__init__(self, id=wxID_MDICHILDTEXTMINING,
name=u'MDIChildTextMining', parent=prnt, pos=wx.Point(148, 137),
size=wx.Size(1048, 714), style=wx.DEFAULT_FRAME_STYLE,
title=u'Text Mining')
self.SetClientSize(wx.Size(1040, 680))
self.SetBackgroundColour(wx.Colour(125, 152, 221))
self.SetAutoLayout(True)
self.notebookTextMining = wx.Notebook(id=wxID_MDICHILDTEXTMININGNOTEBOOKTEXTMINING,
name=u'notebookTextMining', parent=self, pos=wx.Point(4, 4),
size=wx.Size(1032, 672), style=0)
self.notebookTextMining.SetConstraints(LayoutAnchors(self.notebookTextMining,
True, True, True, True))
self.panSearch = wx.Panel(id=wxID_MDICHILDTEXTMININGPANSEARCH,
name=u'panSearch', parent=self.notebookTextMining, pos=wx.Point(0,
0), size=wx.Size(1024, 646), style=wx.TAB_TRAVERSAL)
self.panSearch.SetBackgroundColour(wx.Colour(225, 236, 255))
self.panSearch.SetAutoLayout(True)
self.panSearch.SetAutoLayout(True)
self.panPreprocessing = wx.Panel(id=wxID_MDICHILDTEXTMININGPANPREPROCESSING,
name=u'panPreprocessing', parent=self.notebookTextMining,
pos=wx.Point(0, 0), size=wx.Size(1024, 646),
style=wx.TAB_TRAVERSAL)
self.panPreprocessing.SetBackgroundColour(wx.Colour(225, 236, 255))
self.staticText2 = wx.StaticText(id=wxID_MDICHILDTEXTMININGSTATICTEXT2,
label='Preprocessing:', name='staticText2',
parent=self.panPreprocessing, pos=wx.Point(16, 16),
size=wx.Size(97, 16), style=0)
self.staticText2.SetForegroundColour(wx.Colour(0, 0, 187))
self.staticText2.SetFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD,
False, u'Tahoma'))
self.staticText2.SetConstraints(LayoutAnchors(self.staticText2, True,
True, False, False))
self.btnPreprocessing = wx.Button(id=wxID_MDICHILDTEXTMININGBTNPREPROCESSING,
label=u'Text Preprocessing...', name=u'btnPreprocessing',
parent=self.panPreprocessing, pos=wx.Point(144, 8),
size=wx.Size(168, 24), style=0)
self.btnPreprocessing.Bind(wx.EVT_BUTTON, self.OnBtnPreprocessingButton,
id=wxID_MDICHILDTEXTMININGBTNPREPROCESSING)
self.panTopTerms = wx.Panel(id=wxID_MDICHILDTEXTMININGPANTOPTERMS,
name=u'panTopTerms', parent=self.notebookTextMining,
pos=wx.Point(0, 0), size=wx.Size(1024, 646),
style=wx.TAB_TRAVERSAL)
self.panTopTerms.SetBackgroundColour(wx.Colour(225, 236, 255))
self.panTopTerms.SetAutoLayout(True)
self.staticBox1 = wx.StaticBox(id=wxID_MDICHILDTEXTMININGSTATICBOX1,
label=u'Terms', name='staticBox1', parent=self.panTopTerms,
pos=wx.Point(8, 8), size=wx.Size(392, 632), style=0)
self.staticBox1.SetConstraints(LayoutAnchors(self.staticBox1, True,
True, False, True))
self.staticBox2 = wx.StaticBox(id=wxID_MDICHILDTEXTMININGSTATICBOX2,
label='Phone Numbers', name='staticBox2', parent=self.panTopTerms,
pos=wx.Point(416, 8), size=wx.Size(424, 632), style=0)
self.staticBox2.SetConstraints(LayoutAnchors(self.staticBox2, True,
True, False, True))
self.btnDisplayTopPhones = wx.Button(id=wxID_MDICHILDTEXTMININGBTNDISPLAYTOPPHONES,
label='Display', name='btnDisplayTopPhones',
parent=self.panTopTerms, pos=wx.Point(592, 32), size=wx.Size(75,
23), style=0)
self.btnDisplayTopPhones.Bind(wx.EVT_BUTTON,
self.OnBtnDisplayTopPhonesButton,
id=wxID_MDICHILDTEXTMININGBTNDISPLAYTOPPHONES)
self.staticText3 = wx.StaticText(id=wxID_MDICHILDTEXTMININGSTATICTEXT3,
label='Top:', name='staticText3', parent=self.panTopTerms,
pos=wx.Point(16, 32), size=wx.Size(28, 16), style=0)
self.staticText3.SetForegroundColour(wx.Colour(0, 0, 187))
self.staticText3.SetFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD,
False, u'Tahoma'))
self.staticText3.SetConstraints(LayoutAnchors(self.staticText3, True,
True, False, False))
self.txtTopPhones = wx.TextCtrl(id=wxID_MDICHILDTEXTMININGTXTTOPPHONES,
name='txtTopPhones', parent=self.panTopTerms, pos=wx.Point(472,
32), size=wx.Size(100, 21), style=0, value='')
self.txtTopKeywords = wx.TextCtrl(id=wxID_MDICHILDTEXTMININGTXTTOPKEYWORDS,
name='txtTopKeywords', parent=self.panTopTerms, pos=wx.Point(48,
32), size=wx.Size(100, 21), style=0, value='')
self.staticText6 = wx.StaticText(id=wxID_MDICHILDTEXTMININGSTATICTEXT6,
label='Top:', name='staticText6', parent=self.panTopTerms,
pos=wx.Point(440, 32), size=wx.Size(28, 16), style=0)
self.staticText6.SetForegroundColour(wx.Colour(0, 0, 187))
self.staticText6.SetFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD,
False, u'Tahoma'))
self.staticText6.SetConstraints(LayoutAnchors(self.staticText6, True,
True, False, False))
self.btnExportTopKeywords = wx.Button(id=wxID_MDICHILDTEXTMININGBTNEXPORTTOPKEYWORDS,
label='Export...', name='btnExportTopKeywords',
parent=self.panTopTerms, pos=wx.Point(264, 32), size=wx.Size(75,
23), style=0)
self.btnExportTopKeywords.Bind(wx.EVT_BUTTON,
self.OnBtnExportTopKeywordsButton,
id=wxID_MDICHILDTEXTMININGBTNEXPORTTOPKEYWORDS)
self.btnDisplayTopKeywords = wx.Button(id=wxID_MDICHILDTEXTMININGBTNDISPLAYTOPKEYWORDS,
label='Display', name='btnDisplayTopKeywords',
parent=self.panTopTerms, pos=wx.Point(168, 32), size=wx.Size(75,
23), style=0)
self.btnDisplayTopKeywords.Bind(wx.EVT_BUTTON,
self.OnBtnDisplayTopKeywordsButton,
id=wxID_MDICHILDTEXTMININGBTNDISPLAYTOPKEYWORDS)
self.btnExportTopPhones = wx.Button(id=wxID_MDICHILDTEXTMININGBTNEXPORTTOPPHONES,
label='Export...', name='btnExportTopPhones',
parent=self.panTopTerms, pos=wx.Point(688, 32), size=wx.Size(75,
23), style=0)
self.btnExportTopPhones.Bind(wx.EVT_BUTTON,
self.OnBtnExportTopPhonesButton,
id=wxID_MDICHILDTEXTMININGBTNEXPORTTOPPHONES)
self.panReports = wx.Panel(id=wxID_MDICHILDTEXTMININGPANREPORTS,
name=u'panReports', parent=self.notebookTextMining,
pos=wx.Point(0, 0), size=wx.Size(1024, 646),
style=wx.TAB_TRAVERSAL)
self.panReports.SetBackgroundColour(wx.Colour(225, 236, 255))
self.panReports.SetAutoLayout(True)
self.staticText5 = wx.StaticText(id=wxID_MDICHILDTEXTMININGSTATICTEXT5,
label=u'Page', name='staticText5', parent=self.panSearch,
pos=wx.Point(8, 72), size=wx.Size(24, 13), style=0)
self.choicePageNum = wx.Choice(choices=[],
id=wxID_MDICHILDTEXTMININGCHOICEPAGENUM, name=u'choicePageNum',
parent=self.panSearch, pos=wx.Point(40, 72), size=wx.Size(64, 21),
style=0)
self.choicePageNum.Bind(wx.EVT_CHOICE, self.OnChoicePageNumChoice,
id=wxID_MDICHILDTEXTMININGCHOICEPAGENUM)
self.lblTotalResults = wx.StaticText(id=wxID_MDICHILDTEXTMININGLBLTOTALRESULTS,
label=u'of 1: Showing 0 Results', name=u'lblTotalResults',
parent=self.panSearch, pos=wx.Point(112, 80), size=wx.Size(113,
13), style=0)
self.staticBoxSearch = wx.StaticBox(id=wxID_MDICHILDTEXTMININGSTATICBOXSEARCH,
label='Search Documents Based on Keywords',
name=u'staticBoxSearch', parent=self.panSearch, pos=wx.Point(8,
8), size=wx.Size(656, 56), style=0)
self.btnSearchDocuments = wx.Button(id=wxID_MDICHILDTEXTMININGBTNSEARCHDOCUMENTS,
label='Search', name='btnSearchDocuments', parent=self.panSearch,
pos=wx.Point(592, 32), size=wx.Size(59, 23), style=0)
self.btnSearchDocuments.Bind(wx.EVT_BUTTON,
self.OnBtnSearchDocumentsButton,
id=wxID_MDICHILDTEXTMININGBTNSEARCHDOCUMENTS)
self.btnExportSearchResults = wx.Button(id=wxID_MDICHILDTEXTMININGBTNEXPORTSEARCHRESULTS,
label='Export Search Results', name='btnExportSearchResults',
parent=self.panSearch, pos=wx.Point(872, 56), size=wx.Size(144,
23), style=0)
self.btnExportSearchResults.SetConstraints(LayoutAnchors(self.btnExportSearchResults,
False, True, True, False))
self.btnExportSearchResults.Bind(wx.EVT_BUTTON,
self.OnBtnExportSearchResultsButton,
id=wxID_MDICHILDTEXTMININGBTNEXPORTSEARCHRESULTS)
self.btnExportAllTerms = wx.Button(id=wxID_MDICHILDTEXTMININGBTNEXPORTALLTERMS,
label=u'Export All Terms', name=u'btnExportAllTerms',
parent=self.panReports, pos=wx.Point(16, 16), size=wx.Size(168,
24), style=0)
self.btnExportAllTerms.Bind(wx.EVT_BUTTON,
self.OnBtnExportAllTermsButton,
id=wxID_MDICHILDTEXTMININGBTNEXPORTALLTERMS)
self.btnExportStemmedTerms = wx.Button(id=wxID_MDICHILDTEXTMININGBTNEXPORTSTEMMEDTERMS,
label=u'Export Stemmed Terms', name=u'btnExportStemmedTerms',
parent=self.panReports, pos=wx.Point(16, 56), size=wx.Size(168,
24), style=0)
self.btnExportStemmedTerms.Bind(wx.EVT_BUTTON,
self.OnBtnExportStemmedTerms,
id=wxID_MDICHILDTEXTMININGBTNEXPORTSTEMMEDTERMS)
self.btnBatchSearch = wx.Button(id=wxID_MDICHILDTEXTMININGBTNBATCHSEARCH,
label=u'Keywords Search Report...', name='btnBatchSearch',
parent=self.panSearch, pos=wx.Point(872, 8), size=wx.Size(144,
23), style=0)
self.btnBatchSearch.Bind(wx.EVT_BUTTON, self.OnBtnBatchSearchButton,
id=wxID_MDICHILDTEXTMININGBTNBATCHSEARCH)
self._init_coll_notebookTextMining_Pages(self.notebookTextMining)
def __init__(self, parent):
self._init_ctrls(parent)
self.SetIcon(images.getMAKE2Icon())
self.CreateSettingsTable()
self.AddSearchControl()
self.CreateTopKeywordsListControl()
self.CreateTopPhonesListControl()
self.AddResultsListControl()
self.Stopwords = []
try:
self.ReadStopwordsFromDB()
except:
pass
self.search = Search(Globals.TextCatFileName, self.Stopwords)
#self.AddKeywordsToTree()
def CreateSettingsTable(self):
db = SqliteDatabase(Globals.TextCatFileName)
if not db.OpenConnection():
return
query = "CREATE TABLE IF NOT EXISTS " + Constants.TextCatSettingsTable + " ( "
query += "Stemmer text, DirList text, CategoryList text )"
db.ExecuteNonQuery(query)
db.CloseConnection()
return None
def OnBtnPreprocessingButton(self, event):
import frmTextPreprocessing
textPreprocessing = frmTextPreprocessing.create(self)
textPreprocessing.Show()
event.Skip()
def OnBtnExportAllTermsButton(self, event):
db = SqliteDatabase(Globals.TextCatFileName)
if not db.OpenConnection():
return
dlg = wx.FileDialog(self, "Save Words List", ".", "", "*.csv", wx.SAVE)
try:
if dlg.ShowModal() == wx.ID_OK:
fileName = dlg.GetPath()
busy = wx.BusyInfo("It might take some time depending on the total number of unique words...")
wx.Yield()
fout = open(fileName, 'wb')
#query = "select ID, `Word` from " + Constants.TextCatWordsTable + " order by `ID`; "
query = "select words.word, count(WordLocation.WordID) as total from words left join WordLocation on words.rowid = wordlocation.wordid "
query += "group by wordlocation.wordid order by total desc;"
#print 'before'
rows = db.FetchAllRows(query)
#rint 'after'
i = 1
for row in rows:
fout.write(PlatformMethods.Encode(row[0]))
fout.write(" (%d)"%row[1])
fout.write(", ,")
i += 1
if i == 4:
i = 0
fout.write("\n")
db.CloseConnection()
fout.close()
except Exception, value:
db.CloseConnection()
if fout:
fout.close()
CommonFunctions.ShowErrorMessage(self, "Failed to Export Word List. Error: %s"%value)
finally:
dlg.Destroy()
db = None
def AddPageNumbersToPageChoice(self, totalResults):
self.TotalPages = (totalResults/Constants.MaxObjectsPerPage)
if (totalResults%Constants.MaxObjectsPerPage) > 0:
self.TotalPages += 1
self.choicePageNum.Clear()
for page in range(1, self.TotalPages+1):
self.choicePageNum.Append(str(page))
def OnBtnPreprocessingButton(self, event):
import frmEmailPreprocessing
textPreprocessing = frmEmailPreprocessing.create(self)
textPreprocessing.Show()
event.Skip()
def CreateTopKeywordsListControl(self):
"""
self.listCtrl1 = wx.ListCtrl(id=wxID_MDICHILDEMAILSLISTCTRL1,
name='listCtrl1', parent=self.panSearch, pos=wx.Point(16, 64),
size=wx.Size(376, 568), style=wx.LC_ICON)
"""
self.listTopKeywords = CustomListCtrl(self.panTopTerms, wx.NewId(),
pos=wx.Point(16, 64), size=wx.Size(376, 568),
style=wx.LC_REPORT
| wx.BORDER_NONE
)
self.listTopKeywords.SetConstraints(LayoutAnchors(self.listTopKeywords, True,
True, False, True))
self.listTopKeywords.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL,
False, u'Tahoma'))
self.AddTopKeywordsListColumnHeadings()
def CreateTopPhonesListControl(self):
"""
self.listCtrl1 = wx.ListCtrl(id=wxID_MDICHILDEMAILSLISTCTRL1,
name='listCtrl1', parent=self.panSearch, pos=wx.Point(424, 64),
size=wx.Size(408, 384), style=wx.LC_ICON)
"""
self.listTopPhones = CustomListCtrl(self.panTopTerms, wx.NewId(),
pos=wx.Point(424, 64), size=wx.Size(408, 568),
style=wx.LC_REPORT
| wx.BORDER_NONE
)
self.listTopPhones.SetConstraints(LayoutAnchors(self.listTopPhones, True,
True, False, True))
self.listTopPhones.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL,
False, u'Tahoma'))
self.AddTopPhonesListColumnHeadings()
def AddTopKeywordsListColumnHeadings(self):
#want to add images on the column header..
info = wx.ListItem()
info.m_mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.m_image = -1
info.m_format = 0
info.m_text = "Term"
self.listTopKeywords.InsertColumnInfo(0, info)
info.m_format = wx.LIST_FORMAT_LEFT
info.m_text = "Occurance"
self.listTopKeywords.InsertColumnInfo(1, info)
def AddTopPhonesListColumnHeadings(self):
#want to add images on the column header..
info = wx.ListItem()
info.m_mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.m_image = -1
info.m_format = 0
info.m_text = "Phone"
self.listTopPhones.InsertColumnInfo(0, info)
info.m_format = wx.LIST_FORMAT_LEFT
info.m_text = "Occurance"
self.listTopPhones.InsertColumnInfo(1, info)
# Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py
def GetListCtrl(self):
return self.listMessages
# Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py
def GetSortImages(self):
return (self.sm_dn, self.sm_up)
def OnListTopKeywordsColClick(self, event):
event.Skip()
def OnListTopPhonesColClick(self, event):
event.Skip()
def AddTopKeywordsToListView(self, top=20):
self.SetCursor(wx.HOURGLASS_CURSOR)
self.listTopKeywords.DeleteAllItems()
totalKeywords = 0
#MsgDict = {}
if top <=0:
limit = ""
else:
limit = "limit %d"%top
query = "select Word, Frequency from Words order by Frequency desc %s;"%limit
self.txtTopKeywords.SetValue(str(top))
db = SqliteDatabase(Globals.TextCatFileName)
if not db.OpenConnection():
return
rows = db.FetchAllRows(query)
for row in rows:
totalKeywords += 1
listItem = []
#listItem.append(PlatformMethods.Decode(row[0]))
#listItem.append(row[1])
#MsgDict[totalKeywords] = tuple(listItem)
index = self.listTopKeywords.InsertStringItem(sys.maxint, PlatformMethods.Decode(row[0]))
self.listTopKeywords.SetStringItem(index, 1, PlatformMethods.Decode(row[1]))
self.listTopKeywords.SetItemData(index, totalKeywords)
self.listTopKeywords.SetColumnWidth(0, 250)
self.listTopKeywords.SetColumnWidth(1, 70)
self.SetCursor(wx.STANDARD_CURSOR)
def AddTopPhonesToListView(self, top=20):
db = SqliteDatabase(Globals.TextCatFileName)
if not db.OpenConnection():
return
self.SetCursor(wx.HOURGLASS_CURSOR)
self.listTopPhones.DeleteAllItems()
totalKeywords = 0
#MsgDict = {}
if top <= 0:
limit = ""
else:
limit = "limit %d"%top
query = "select Phone, sum(Frequency) as total from Phones "
query += "group by Phone order by total desc %s;"%limit
self.txtTopPhones.SetValue(str(top))
rows = db.FetchAllRows(query)
for row in rows:
totalKeywords += 1
index = self.listTopPhones.InsertStringItem(sys.maxint, PlatformMethods.Decode(row[0]))
self.listTopPhones.SetStringItem(index, 1, PlatformMethods.Decode(row[1]))
self.listTopPhones.SetItemData(index, totalKeywords)
self.listTopPhones.SetColumnWidth(0, 250)
self.listTopPhones.SetColumnWidth(1, 120)
self.SetCursor(wx.STANDARD_CURSOR)
def OnBtnRefreshMessagesButton(self, event):
#self.AddMessagesToListView()
pass
def OnListFilesDoubleClick(self, event):
if self.IsMessageSelected():
msgV = dlgEmailMessageViewer.create(self, self.sender, self.recipient, self.date, self.subject)
msgV.ShowModal()
event.Skip()
def IsMessageSelected(self):
self.index = self.listMessages.GetFirstSelected()
if self.index >=0:
li = self.listMessages.GetItem(self.index, 0)
self.sender = li.GetText()
li = self.listMessages.GetItem(self.index, 1)
self.recipient = li.GetText()
li = self.listMessages.GetItem(self.index, 2)
self.date = | |
event_object.regvalue
else:
# TODO: Add a function for this to avoid repeating code.
keys = event_object.GetAttributes().difference(
event_object.COMPARE_EXCLUDE)
keys.discard(u'offset')
keys.discard(u'timestamp_desc')
attributes = {}
for key in keys:
attributes[key] = getattr(event_object, key)
for attribute, value in attributes.items():
self._output_writer.Write(u'\t')
self._output_writer.Write(format_string.format(attribute, value))
self._output_writer.Write(u'\n')
if show_hex and file_entry:
event_object.pathspec = file_entry.path_spec
hexadecimal_output = self._GetEventDataHexDump(event_object)
self.PrintHeader(u'Hexadecimal output from event.', character=u'-')
self._output_writer.Write(hexadecimal_output)
self._output_writer.Write(u'\n')
def _PrintEventHeader(self, event_object, descriptions, exclude_timestamp):
"""Writes a list of strings that contains a header for the event.
Args:
event_object: event object (instance of event.EventObject).
descriptions: list of strings describing the value of the header
timestamp.
exclude_timestamp: boolean. If it is set to True the method
will not include the timestamp in the header.
"""
format_string = self._GetFormatString(event_object)
self._output_writer.Write(u'Key information.\n')
if not exclude_timestamp:
for description in descriptions:
self._output_writer.Write(format_string.format(
description, timelib.Timestamp.CopyToIsoFormat(
event_object.timestamp)))
self._output_writer.Write(u'\n')
if hasattr(event_object, u'keyname'):
self._output_writer.Write(
format_string.format(u'Key Path', event_object.keyname))
self._output_writer.Write(u'\n')
if event_object.timestamp_desc != eventdata.EventTimestamp.WRITTEN_TIME:
self._output_writer.Write(format_string.format(
u'Description', event_object.timestamp_desc))
self._output_writer.Write(u'\n')
self.PrintHeader(u'Data', character=u'+')
def _PrintEventObjectsBasedOnTime(
self, event_objects, file_entry, show_hex=False):
"""Write extracted data from a list of event objects to an output writer.
This function groups together a list of event objects based on timestamps.
If more than one event are extracted with the same timestamp the timestamp
itself is not repeated.
Args:
event_objects: list of event objects (instance of EventObject).
file_entry: optional file entry object (instance of dfvfs.FileEntry).
Defaults to None.
show_hex: optional boolean to indicate that the hexadecimal representation
of the event should be included in the output.
"""
event_objects_and_timestamps = {}
for event_object in event_objects:
timestamp = event_object.timestamp
_ = event_objects_and_timestamps.setdefault(timestamp, [])
event_objects_and_timestamps[timestamp].append(event_object)
list_of_timestamps = sorted(event_objects_and_timestamps.keys())
if len(list_of_timestamps) > 1:
exclude_timestamp_in_header = True
else:
exclude_timestamp_in_header = False
first_timestamp = list_of_timestamps[0]
first_event = event_objects_and_timestamps[first_timestamp][0]
descriptions = set()
for event_object in event_objects_and_timestamps[first_timestamp]:
descriptions.add(getattr(event_object, u'timestamp_desc', u''))
self._PrintEventHeader(
first_event, list(descriptions), exclude_timestamp_in_header)
for event_timestamp in list_of_timestamps:
if exclude_timestamp_in_header:
date_time_string = timelib.Timestamp.CopyToIsoFormat(event_timestamp)
output_text = u'\n[{0:s}]\n'.format(date_time_string)
self._output_writer.Write(output_text)
for event_object in event_objects_and_timestamps[event_timestamp]:
self._PrintEventBody(
event_object, file_entry=file_entry, show_hex=show_hex)
def _PrintParsedRegistryFile(self, parsed_data, registry_helper):
"""Write extracted data from a Registry file to an output writer.
Args:
parsed_data: dict object returned from ParseRegisterFile.
registry_helper: Registry file object (instance of PregRegistryHelper).
"""
self.PrintHeader(u'Registry File', character=u'x')
self._output_writer.Write(u'\n')
self._output_writer.Write(
u'{0:>15} : {1:s}\n'.format(u'Registry file', registry_helper.path))
self._output_writer.Write(
u'{0:>15} : {1:s}\n'.format(
u'Registry file type', registry_helper.file_type))
if registry_helper.collector_name:
self._output_writer.Write(
u'{0:>15} : {1:s}\n'.format(
u'Registry Origin', registry_helper.collector_name))
self._output_writer.Write(u'\n\n')
for key_path, data in iter(parsed_data.items()):
self._PrintParsedRegistryInformation(
key_path, data, registry_helper.file_entry)
self.PrintSeparatorLine()
def _PrintParsedRegistryInformation(
self, key_path, parsed_data, file_entry=None):
"""Write extracted data from a Registry key to an output writer.
Args:
key_path: path of the parsed Registry key.
parsed_data: dict object returned from ParseRegisterFile.
file_entry: optional file entry object (instance of dfvfs.FileEntry).
"""
registry_key = parsed_data.get(u'key', None)
if registry_key:
self._output_writer.Write(u'{0:>15} : {1:s}\n'.format(
u'Key Name', key_path))
elif not self._quiet:
self._output_writer.Write(u'Unable to open key: {0:s}\n'.format(
key_path))
return
else:
return
self._output_writer.Write(
u'{0:>15} : {1:d}\n'.format(
u'Subkeys', registry_key.number_of_subkeys))
self._output_writer.Write(u'{0:>15} : {1:d}\n'.format(
u'Values', registry_key.number_of_values))
self._output_writer.Write(u'\n')
if self._verbose_output:
subkeys = parsed_data.get(u'subkeys', [])
for subkey in subkeys:
self._output_writer.Write(
u'{0:>15} : {1:s}\n'.format(u'Key Name', subkey.path))
key_data = parsed_data.get(u'data', None)
if not key_data:
return
self.PrintParsedRegistryKey(
key_data, file_entry=file_entry, show_hex=self._verbose_output)
def _ScanFileSystem(self, path_resolver):
"""Scans a file system for the Windows volume.
Args:
path_resolver: the path resolver (instance of dfvfs.WindowsPathResolver).
Returns:
True if the Windows directory was found, False otherwise.
"""
result = False
for windows_path in self._WINDOWS_DIRECTORIES:
windows_path_spec = path_resolver.ResolvePath(windows_path)
result = windows_path_spec is not None
if result:
self._windows_directory = windows_path
break
return result
def PrintHeader(self, text, character=u'*'):
"""Prints the header as a line with centered text.
Args:
text: The header text.
character: Optional header line character.
"""
self._output_writer.Write(u'\n')
format_string = u'{{0:{0:s}^{1:d}}}\n'.format(character, self._LINE_LENGTH)
header_string = format_string.format(u' {0:s} '.format(text))
self._output_writer.Write(header_string)
def PrintParsedRegistryKey(self, key_data, file_entry=None, show_hex=False):
"""Write extracted data returned from ParseRegistryKey to an output writer.
Args:
key_data: dict object returned from ParseRegisterKey.
file_entry: optional file entry object (instance of dfvfs.FileEntry).
show_hex: optional boolean to indicate that the hexadecimal representation
of the event should be included in the output.
"""
self.PrintHeader(u'Plugins', character=u'-')
for plugin, event_objects in iter(key_data.items()):
# TODO: make this a table view.
self.PrintHeader(u'Plugin: {0:s}'.format(plugin.plugin_name))
self._output_writer.Write(u'{0:s}\n'.format(plugin.DESCRIPTION))
if plugin.URLS:
self._output_writer.Write(
u'Additional information can be found here:\n')
for url in plugin.URLS:
self._output_writer.Write(u'{0:>17s} {1:s}\n'.format(u'URL :', url))
if not event_objects:
continue
self._PrintEventObjectsBasedOnTime(
event_objects, file_entry, show_hex=show_hex)
self.PrintSeparatorLine()
self._output_writer.Write(u'\n\n')
def GetWindowsRegistryPlugins(self):
"""Build a list of all available Windows Registry plugins.
Returns:
A plugins list (instance of PluginList).
"""
return self._front_end.GetWindowsRegistryPlugins()
def GetWindowsVolumeIdentifiers(self, scan_node, volume_identifiers):
"""Determines and returns back a list of Windows volume identifiers.
Args:
scan_node: the scan node (instance of dfvfs.ScanNode).
volume_identifiers: list of allowed volume identifiers.
Returns:
A list of volume identifiers that have Windows partitions.
"""
windows_volume_identifiers = []
for sub_node in scan_node.sub_nodes:
path_spec = getattr(sub_node, u'path_spec', None)
if not path_spec:
continue
if path_spec.TYPE_INDICATOR != definitions.TYPE_INDICATOR_TSK_PARTITION:
continue
location = getattr(path_spec, u'location', u'')
if not location:
continue
if location.startswith(u'/'):
location = location[1:]
if location not in volume_identifiers:
continue
selected_node = sub_node
while selected_node.sub_nodes:
selected_node = selected_node.sub_nodes[0]
file_system = resolver.Resolver.OpenFileSystem(selected_node.path_spec)
path_resolver = windows_path_resolver.WindowsPathResolver(
file_system, selected_node.path_spec)
if self._ScanFileSystem(path_resolver):
windows_volume_identifiers.append(location)
return windows_volume_identifiers
def ListPluginInformation(self):
"""Lists Registry plugin information."""
table_view = cli_views.CLITableView(title=u'Supported Plugins')
plugin_list = self._front_end.registry_plugin_list
for plugin_class in plugin_list.GetAllPlugins():
table_view.AddRow([plugin_class.NAME, plugin_class.DESCRIPTION])
table_view.Write(self._output_writer)
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
A boolean value indicating the arguments were successfully parsed.
"""
self._ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
additional_options = argument_parser.add_argument_group(
u'Additional Options')
additional_options.add_argument(
u'-r', u'--restore-points', u'--restore_points',
dest=u'restore_points', action=u'store_true', default=False,
help=u'Include restore points in the Registry file locations.')
self.AddVSSProcessingOptions(additional_options)
image_options = argument_parser.add_argument_group(u'Image Options')
image_options.add_argument(
u'-i', u'--image', dest=self._SOURCE_OPTION, action=u'store',
type=str, default=u'', metavar=u'IMAGE_PATH', help=(
u'If the Registry file is contained within a storage media image, '
u'set this option to specify the path of image file.'))
self.AddStorageMediaImageOptions(image_options)
info_options = argument_parser.add_argument_group(u'Informational Options')
info_options.add_argument(
u'--info', dest=u'show_info', action=u'store_true', default=False,
help=u'Print out information about supported plugins.')
info_options.add_argument(
u'-v', u'--verbose', dest=u'verbose', action=u'store_true',
default=False, help=u'Print sub key information.')
info_options.add_argument(
u'-q', u'--quiet', dest=u'quiet', action=u'store_true', default=False,
help=u'Do not print out key names that the tool was unable to open.')
mode_options = argument_parser.add_argument_group(u'Run Mode Options')
mode_options.add_argument(
u'-c', u'--console', dest=u'console', action=u'store_true',
default=False, help=(
u'Drop into a console session Instead of printing output '
u'to STDOUT.'))
mode_options.add_argument(
u'-k', u'--key', dest=u'key', action=u'store', default=u'',
type=str, metavar=u'REGISTRY_KEYPATH', help=(
u'A Registry key path that the tool should parse using all '
u'available plugins.'))
mode_options.add_argument(
u'-p', u'--plugins', dest=u'plugin_names', action=u'append', default=[],
type=str, metavar=u'PLUGIN_NAME', help=(
u'Substring match of the Registry plugin to be used, this '
u'parameter can be repeated to create a list of plugins to be '
u'run against, eg: "-p userassist -p rdp" or "-p userassist".'))
argument_parser.add_argument(
u'registry_file', action=u'store', metavar=u'REGHIVE', nargs=u'?',
help=(
u'The Registry hive to read key from (not needed if running '
u'using a plugin)'))
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write(u'\n')
self._output_writer.Write(argument_parser.format_help())
self._output_writer.Write(u'\n')
return False
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
logging.error(u'{0:s}'.format(exception))
self._output_writer.Write(u'\n')
self._output_writer.Write(argument_parser.format_help())
self._output_writer.Write(u'\n')
return False
return True
def ParseOptions(self, options):
"""Parses the options.
Args:
options: the command line arguments (instance of argparse.Namespace).
Raises:
BadConfigOption: if the options are invalid.
"""
if getattr(options, u'show_info', False):
self.run_mode = self.RUN_MODE_LIST_PLUGINS
return
registry_file = getattr(options, u'registry_file', None)
image = self.ParseStringOption(options, self._SOURCE_OPTION)
source_path = None
if image:
# TODO: refactor, there should be no need for separate code paths.
super(PregTool, self).ParseOptions(options)
source_path = image
self._front_end.SetSingleFile(False)
else:
self._ParseInformationalOptions(options)
source_path = registry_file
self._front_end.SetSingleFile(True)
if source_path is None:
raise errors.BadConfigOption(u'No source path set.')
self._front_end.SetSourcePath(source_path)
self._source_path = os.path.abspath(source_path)
if not image and not registry_file:
raise errors.BadConfigOption(u'Not enough parameters to proceed.')
if registry_file:
if not image and not os.path.isfile(registry_file):
raise errors.BadConfigOption(
u'Registry file: {0:s} does not exist.'.format(registry_file))
self._key_path = self.ParseStringOption(options, u'key')
self._parse_restore_points = getattr(options, u'restore_points', False)
self._quiet = getattr(options, u'quiet', False)
self._verbose_output = getattr(options, u'verbose', False)
if image:
file_to_check = image
else:
file_to_check = registry_file
is_file, reason = self._PathExists(file_to_check)
if not is_file:
raise errors.BadConfigOption(
u'Unable to read the input file with error: {0:s}'.format(reason))
# TODO: make sure encoded plugin names are handled correctly.
self.plugin_names = getattr(options, u'plugin_names', [])
self._front_end.SetKnowledgeBase(self._knowledge_base_object)
if getattr(options, u'console', False):
self.run_mode = self.RUN_MODE_CONSOLE
elif self._key_path and registry_file:
self.run_mode = self.RUN_MODE_REG_KEY
elif self.plugin_names:
self.run_mode = self.RUN_MODE_REG_PLUGIN
elif registry_file:
self.run_mode = self.RUN_MODE_REG_FILE
else:
raise errors.BadConfigOption(
u'Incorrect usage. You\'ll | |
b0, b1 = self._sweepBorder(index_sweep)
self._cache['snippet_'+sniptype][b0:b1,:] = snippet
self._cache['snippet_index_'+sniptype][b0:b1] = np.ones(b1-b0,
dtype = 'bool')
def _extract_all_snippet(self, sniptype):
pbar = pgb.ProgressBar(maxval=self.numSweeps(), term_width = 79).start()
for index_sweep in range(self.numSweeps()):
time = self._get_time(index_sweep)
spike = self._spikeTimes(index_sweep)
if not spike.size:
continue
sweep = self._get_input_sweep(index_sweep, dtype =self.get_param(
'dtype'))._data[1]
if sniptype == 'filtered':
sweep = self._ready_trace(sweep, time)
elif sniptype != 'raw':
raise ValueError('Unknown sniptype: %s'%sniptype)
snip = self._extract_snippet(spike, sweep, time)
self._saveSnip(index_sweep, snip, sniptype)
pbar.update(index_sweep)
pbar.finish()
def _getSnip(self, listindex, sniptype):
if not isinstance(listindex, list):
listindex = [int(listindex)]
snip_memory = self.get_param('snip_memory')
if snip_memory[0] == 'store':
if snip_memory[1] != 'all':
raise NotImplementedError()
# out = {}
# sweep_saved = self.get_cache('snippet_'+sniptype)
# if not sweep_saved is None:
# for i in listindex:
# swind, spind = self.findOriginFromIndex(i)
# not_saved = []
# for i in list_index:
# sweep
# saved_ind =[i for i in listindex if i in sweep_saved]
else:
inds = self.get_cache('snippet_index_'+sniptype)
if inds is None or any([not inds[i] for i in listindex]):
self._extract_all_snippet(sniptype)
return np.array(self._cache['snippet_'+sniptype][listindex,:])
raise NotImplementedError()
# not saved
# indices = [self.findOriginFromIndex(i) for i in listindex]
# time = self._get_time(index_sweep)
# time_list =self._time_list(index_sweep)
# spike = time_list._data
# sweep = self._get_input_sweep(index_sweep, dtype =self.get_param(
# 'dtype'))._data[1]
# if sniptype == 'filtered':
# sweep = self._ready_trace(sweep, time)
# elif sniptype != 'raw':
# raise ValueError('Unknown sniptype: %s'%sniptype)
# snip = self._extract_snippet(spike, sweep, time)
# self._saveSnip(index_sweep, snip, sniptype)
def _value_around_pic(self, list_index, props, sniptype):
snip = self._getSnip(list_index, sniptype)
if not isinstance(props, list):
props = [props]
out = np.zeros((len(props),snip.shape[0]), dtype = 'float')
for i, v in enumerate(props):
func = getattr(np, v)
if hasattr(func, '__call__'):
val = func(snip, axis = 1)
else:
print 'Prop is not callable, it might not be what you wanted'
out[i] = val
return out
def PCA(self, sniptype):
if self._cache.has_key('PCA_'+sniptype):
return self.get_cache('PCA_'+sniptype)
all_props = self.get_param('props')
PCAnode = mdp.nodes.PCANode(input_dim=len(all_props), output_dim=len(all_props)-1)
arr = self._getFilterArray(sniptype)
PCAnode.train(arr)
out = PCAnode(arr)
self.set_cache('PCA_'+sniptype, out)
return out
def _getFilterArray(self, sniptype):
if not self._cache.has_key('properties'):
self._cache['properties'] = {}
if self._cache['properties'].has_key(sniptype):
return self.get_cache('properties')[sniptype]
props = copy(self.get_param('props'))
out = np.zeros((self.numSpikes(keepMasked = True), len(props)),
dtype = 'float')
list_sweep = range(self.numSweeps())
pbar = pgb.ProgressBar(maxval=len(list_sweep), term_width = 79).start()
inds = range(len(props))
try:
indSw = props.index('sw_ind')
props.pop(indSw)
inds.pop(indSw)
N = 0
for i in list_sweep:
n = len(self._spikeTimes(i))
out[N:N+n, indSw] = np.ones(n, dtype = 'float')*i
N+=n
except ValueError:
print 'error'
pass
for i in list_sweep:
b0, b1 = self._sweepBorder(i)
if b0 != b1:
data = self._value_around_pic(range(b0,b1), props, sniptype)
out[b0:b1,inds] = data.T
pbar.update(i)
self._cache['properties'][sniptype] = out
return out
def _mask(self):
if self._cache.has_key('mask'):
return self._cache['mask']
mask = np.ones(self.numSpikes(keepMasked = True), dtype = bool)
Filter = self.get_param('filter')
for sniptype, prop, comp, value in Filter:
val = self._getDataToPlot(keepMasked=True, prop=prop,
sniptype=sniptype)
val -= value
if not comp: val *= -1
mask = np.logical_and(mask, val >= 0)
self._cache['mask'] = mask
return mask
def numSweeps(self):
'''return the number of sweeps'''
return self.in_numSweeps()
def chanNames(self, index = 0):
'''return the name of the channel used for the detection'''
return [self.get_param('chan')]
def origin(self, index):
return self.in_origin(index)
def filteredSweep(self, index_sweep, chan = None):
'''return the trace on wich the detection is done'''
sweep = self._get_input_sweep(index_sweep, dtype = self.get_param('dtype'))
time = self._get_time(index_sweep)
sweep._data = np.asarray(sweep._data, dtype = 'float64')
sweep._data[1] = self._ready_trace(sweep._data[1], time)
sweep._data[0] = time
sweep.tag = self.tag(index_sweep)
return sweep
def snippet_chanNames(self, index = 0):
return [self.get_param('chan')+i for i in ['_raw', '_filtered']]
def snippet_origin(self, index):
ind_sw, ind_sp = self.findOriginFromIndex(index)
return self.in_origin(ind_sw)+ ['Spike_'+str(ind_sp)]
def snippet_sweepInfo(self, index, keepMasked = False):
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index)
sw_inf = self.sweepInfo(sw_ind)
sw_inf.numChans = 2
chInf = [copy(sw_inf.channelInfo[0]) for i in (0,1)]
chInf[0].name = chInf[0].name + '_raw'
chInf[1].name = chInf[1].name + '_filtered'
sw_inf.channelInfo = chInf
dt = sw_inf.dt
win = self.get_param('snip_window')
sw_inf.numPoints = int((win[1] - win[0])/dt)
sw_inf.tend = sw_inf.numPoints
sw_inf.t0 = 0
sw_inf.dt = 1
return sw_inf
def snippet(self, index, chan = None, keepMasked = False):
"""return a snippet
Arguments:
- `index`:
- `chan`:
"""
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index)
snipRaw = self._getSnip(index, 'raw')[0]
snipFiltered = self._getSnip(index, 'filtered')[0]
data = np.zeros((3, snipRaw.size), dtype = self.get_param(
'dtype'))
data[0] = np.arange(data.shape[1])
data[1] = snipRaw
data[2] = snipFiltered
snipinf = self.snippet_sweepInfo(index, keepMasked)
return Sweep.Sweep('Snippet_'+str(index)+'in_'+self.name, data,
snipinf.channelInfo, tag = self.tag(index))
def snip_tag(self, index, keepMasked = False):
'''Return the tags of sweep or time_list'''
if not keepMasked:
index = self._findNotMaskedFromMaskedIndex(index)
sw_ind, sp_ind = self.findOriginFromIndex(index)
return self.in_tag(sw_ind)
def tag(self, index):
'''Return the tags of sweep or time_list'''
return self.in_tag(index)
def sweepInfo(self, index):
sw_inf = self.in_sweepInfo(index)
cname = self.get_param('chan')
ind = [i.name for i in sw_inf.channelInfo].index(cname)
sw_inf.channelInfo = [sw_inf.channelInfo[ind]]
sw_inf.numChans = 1
return sw_inf
def _findNotMaskedFromMaskedIndex(self, maskedIndex):
mask = self._mask()
index = np.arange(mask.size)
return index[mask][maskedIndex]
def _get_input_sweep(self, sw_ind, *args, **kwargs):
last = self.get_cache('last')
if last is None or last[0] != sw_ind:
if not kwargs.has_key('chan') and not args:
kwargs['chan'] = self.get_param('chan')
sw = self.in_sweep(sw_ind, *args, **kwargs)
self.set_cache('last', (sw_ind, sw), force = 1)
return copy(sw)
return copy(last[1])
def _get_time(self, index_sweep):
lasttime = self.get_cache('lasttime')
if lasttime is None or lasttime[0] != index_sweep:
time = self.in_sweep(index_sweep)._data[0]
self.set_cache('lasttime', (index_sweep, time), force =1)
return copy(time)
return copy(lasttime[1])
def save(self, what = ['SpikeTimes', 'Border', 'Mask', 'Prop'],
path = None, force = 0):
for i in what:
getattr(self, 'save'+i)(force = force, name = path)
def saveSpikeTimes(self, name = None, force = 0, mode = 'bn', delimiter =
',', keepMasked = True):
'''Save spike times in file 'name' (can only save ALL the spike times at
once)
'name' is absolute or relative to parent.home
if 'force', replace existing file
'mode' can be 'bn', 'csv', 'txt' or 'vert_csv':
'bn': binary, saved in .npz
'csv' or 'txt': text file, value separeted by 'delimiter' (default
',') saved in lines
'vert_csv': text file, value and separeted by 'delimiter' (default
',') saved in columns '''
import os
if name is None:
name = self.parent.name+'_'+self.name+'_spikeTimes'
if name[0] != '/':
path = self.parent.home + name
data = self.all_times(keepMasked = True, groupbysweep = True)
if mode == 'bn':
path += '.npz'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
kwargs = {}
for i, value in enumerate(data):
kwargs['Sw_'+str(i)] = value
np.savez(path, **kwargs)
elif mode == 'vert_csv':
path += '_vertical.csv'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
nspike = self.numSpikes()
out = file(path, 'w')
totspike = 0
index_spike = 0
while totspike < nspike:
for index_sweep in range(self.numSweeps()):
timelist = self._time_list(index_sweep)
if len(timelist) > index_spike:
out.write(str(timelist._data[index_spike]))
totspike+=1
out.write(str(delimiter))
out.write('\n')
index_spike += 1
totspike += 1
out.close()
elif mode == 'csv' or mode =='txt':
path += '.'+mode
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
np.savetxt(path, data, delimiter = delimiter)
else:
print 'unknown mode %s'%mode
def saveBorder(self, name = None, force =0):
import os
if name is None:
name = self.parent.name+'_'+self.name+'_borders'
if name[0] != '/':
path = self.parent.home + name
data = self._borders()
path += '.npz'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
kwargs = {}
for i, value in data.iteritems():
kwargs['Sw_'+str(i)] = value
np.savez(path, **kwargs)
def saveMask(self, name = None, force = 0):
import os
if name is None:
name = self.parent.name+'_'+self.name+'_mask'
if name[0] != '/':
path = self.parent.home + name
path += '.npy'
if not force:
if os.path.isfile(path):
print 'File %s already exist, change name or force'%path
return
data = self._mask()
np.save(path, data)
def saveProp(self, sniptype = ['raw', 'filtered'], name = None, force = 0):
if not isinstance(sniptype, list):
sniptype = [str(sniptype)]
import os
if name is None:
name = self.parent.name+'_'+self.name+'_prop'+'_'
if name[0] != '/':
path = self.parent.home + name
for sntp in sniptype:
p = path+sntp+ '.npy'
if not force:
if os.path.isfile(p):
print 'File %s already | |
SQLParser#keyOrIndex.
def exitKeyOrIndex(self, ctx:SQLParser.KeyOrIndexContext):
pass
# Enter a parse tree produced by SQLParser#constraintKeyType.
def enterConstraintKeyType(self, ctx:SQLParser.ConstraintKeyTypeContext):
pass
# Exit a parse tree produced by SQLParser#constraintKeyType.
def exitConstraintKeyType(self, ctx:SQLParser.ConstraintKeyTypeContext):
pass
# Enter a parse tree produced by SQLParser#indexHintClause.
def enterIndexHintClause(self, ctx:SQLParser.IndexHintClauseContext):
pass
# Exit a parse tree produced by SQLParser#indexHintClause.
def exitIndexHintClause(self, ctx:SQLParser.IndexHintClauseContext):
pass
# Enter a parse tree produced by SQLParser#indexList.
def enterIndexList(self, ctx:SQLParser.IndexListContext):
pass
# Exit a parse tree produced by SQLParser#indexList.
def exitIndexList(self, ctx:SQLParser.IndexListContext):
pass
# Enter a parse tree produced by SQLParser#indexListElement.
def enterIndexListElement(self, ctx:SQLParser.IndexListElementContext):
pass
# Exit a parse tree produced by SQLParser#indexListElement.
def exitIndexListElement(self, ctx:SQLParser.IndexListElementContext):
pass
# Enter a parse tree produced by SQLParser#updateStatement.
def enterUpdateStatement(self, ctx:SQLParser.UpdateStatementContext):
pass
# Exit a parse tree produced by SQLParser#updateStatement.
def exitUpdateStatement(self, ctx:SQLParser.UpdateStatementContext):
pass
# Enter a parse tree produced by SQLParser#transactionOrLockingStatement.
def enterTransactionOrLockingStatement(self, ctx:SQLParser.TransactionOrLockingStatementContext):
pass
# Exit a parse tree produced by SQLParser#transactionOrLockingStatement.
def exitTransactionOrLockingStatement(self, ctx:SQLParser.TransactionOrLockingStatementContext):
pass
# Enter a parse tree produced by SQLParser#transactionStatement.
def enterTransactionStatement(self, ctx:SQLParser.TransactionStatementContext):
pass
# Exit a parse tree produced by SQLParser#transactionStatement.
def exitTransactionStatement(self, ctx:SQLParser.TransactionStatementContext):
pass
# Enter a parse tree produced by SQLParser#beginWork.
def enterBeginWork(self, ctx:SQLParser.BeginWorkContext):
pass
# Exit a parse tree produced by SQLParser#beginWork.
def exitBeginWork(self, ctx:SQLParser.BeginWorkContext):
pass
# Enter a parse tree produced by SQLParser#transactionCharacteristic.
def enterTransactionCharacteristic(self, ctx:SQLParser.TransactionCharacteristicContext):
pass
# Exit a parse tree produced by SQLParser#transactionCharacteristic.
def exitTransactionCharacteristic(self, ctx:SQLParser.TransactionCharacteristicContext):
pass
# Enter a parse tree produced by SQLParser#savepointStatement.
def enterSavepointStatement(self, ctx:SQLParser.SavepointStatementContext):
pass
# Exit a parse tree produced by SQLParser#savepointStatement.
def exitSavepointStatement(self, ctx:SQLParser.SavepointStatementContext):
pass
# Enter a parse tree produced by SQLParser#lockStatement.
def enterLockStatement(self, ctx:SQLParser.LockStatementContext):
pass
# Exit a parse tree produced by SQLParser#lockStatement.
def exitLockStatement(self, ctx:SQLParser.LockStatementContext):
pass
# Enter a parse tree produced by SQLParser#lockItem.
def enterLockItem(self, ctx:SQLParser.LockItemContext):
pass
# Exit a parse tree produced by SQLParser#lockItem.
def exitLockItem(self, ctx:SQLParser.LockItemContext):
pass
# Enter a parse tree produced by SQLParser#lockOption.
def enterLockOption(self, ctx:SQLParser.LockOptionContext):
pass
# Exit a parse tree produced by SQLParser#lockOption.
def exitLockOption(self, ctx:SQLParser.LockOptionContext):
pass
# Enter a parse tree produced by SQLParser#xaStatement.
def enterXaStatement(self, ctx:SQLParser.XaStatementContext):
pass
# Exit a parse tree produced by SQLParser#xaStatement.
def exitXaStatement(self, ctx:SQLParser.XaStatementContext):
pass
# Enter a parse tree produced by SQLParser#xaConvert.
def enterXaConvert(self, ctx:SQLParser.XaConvertContext):
pass
# Exit a parse tree produced by SQLParser#xaConvert.
def exitXaConvert(self, ctx:SQLParser.XaConvertContext):
pass
# Enter a parse tree produced by SQLParser#xid.
def enterXid(self, ctx:SQLParser.XidContext):
pass
# Exit a parse tree produced by SQLParser#xid.
def exitXid(self, ctx:SQLParser.XidContext):
pass
# Enter a parse tree produced by SQLParser#replicationStatement.
def enterReplicationStatement(self, ctx:SQLParser.ReplicationStatementContext):
pass
# Exit a parse tree produced by SQLParser#replicationStatement.
def exitReplicationStatement(self, ctx:SQLParser.ReplicationStatementContext):
pass
# Enter a parse tree produced by SQLParser#resetOption.
def enterResetOption(self, ctx:SQLParser.ResetOptionContext):
pass
# Exit a parse tree produced by SQLParser#resetOption.
def exitResetOption(self, ctx:SQLParser.ResetOptionContext):
pass
# Enter a parse tree produced by SQLParser#masterResetOptions.
def enterMasterResetOptions(self, ctx:SQLParser.MasterResetOptionsContext):
pass
# Exit a parse tree produced by SQLParser#masterResetOptions.
def exitMasterResetOptions(self, ctx:SQLParser.MasterResetOptionsContext):
pass
# Enter a parse tree produced by SQLParser#replicationLoad.
def enterReplicationLoad(self, ctx:SQLParser.ReplicationLoadContext):
pass
# Exit a parse tree produced by SQLParser#replicationLoad.
def exitReplicationLoad(self, ctx:SQLParser.ReplicationLoadContext):
pass
# Enter a parse tree produced by SQLParser#changeMaster.
def enterChangeMaster(self, ctx:SQLParser.ChangeMasterContext):
pass
# Exit a parse tree produced by SQLParser#changeMaster.
def exitChangeMaster(self, ctx:SQLParser.ChangeMasterContext):
pass
# Enter a parse tree produced by SQLParser#changeMasterOptions.
def enterChangeMasterOptions(self, ctx:SQLParser.ChangeMasterOptionsContext):
pass
# Exit a parse tree produced by SQLParser#changeMasterOptions.
def exitChangeMasterOptions(self, ctx:SQLParser.ChangeMasterOptionsContext):
pass
# Enter a parse tree produced by SQLParser#masterOption.
def enterMasterOption(self, ctx:SQLParser.MasterOptionContext):
pass
# Exit a parse tree produced by SQLParser#masterOption.
def exitMasterOption(self, ctx:SQLParser.MasterOptionContext):
pass
# Enter a parse tree produced by SQLParser#privilegeCheckDef.
def enterPrivilegeCheckDef(self, ctx:SQLParser.PrivilegeCheckDefContext):
pass
# Exit a parse tree produced by SQLParser#privilegeCheckDef.
def exitPrivilegeCheckDef(self, ctx:SQLParser.PrivilegeCheckDefContext):
pass
# Enter a parse tree produced by SQLParser#tablePrimaryKeyCheckDef.
def enterTablePrimaryKeyCheckDef(self, ctx:SQLParser.TablePrimaryKeyCheckDefContext):
pass
# Exit a parse tree produced by SQLParser#tablePrimaryKeyCheckDef.
def exitTablePrimaryKeyCheckDef(self, ctx:SQLParser.TablePrimaryKeyCheckDefContext):
pass
# Enter a parse tree produced by SQLParser#masterTlsCiphersuitesDef.
def enterMasterTlsCiphersuitesDef(self, ctx:SQLParser.MasterTlsCiphersuitesDefContext):
pass
# Exit a parse tree produced by SQLParser#masterTlsCiphersuitesDef.
def exitMasterTlsCiphersuitesDef(self, ctx:SQLParser.MasterTlsCiphersuitesDefContext):
pass
# Enter a parse tree produced by SQLParser#masterFileDef.
def enterMasterFileDef(self, ctx:SQLParser.MasterFileDefContext):
pass
# Exit a parse tree produced by SQLParser#masterFileDef.
def exitMasterFileDef(self, ctx:SQLParser.MasterFileDefContext):
pass
# Enter a parse tree produced by SQLParser#serverIdList.
def enterServerIdList(self, ctx:SQLParser.ServerIdListContext):
pass
# Exit a parse tree produced by SQLParser#serverIdList.
def exitServerIdList(self, ctx:SQLParser.ServerIdListContext):
pass
# Enter a parse tree produced by SQLParser#changeReplication.
def enterChangeReplication(self, ctx:SQLParser.ChangeReplicationContext):
pass
# Exit a parse tree produced by SQLParser#changeReplication.
def exitChangeReplication(self, ctx:SQLParser.ChangeReplicationContext):
pass
# Enter a parse tree produced by SQLParser#filterDefinition.
def enterFilterDefinition(self, ctx:SQLParser.FilterDefinitionContext):
pass
# Exit a parse tree produced by SQLParser#filterDefinition.
def exitFilterDefinition(self, ctx:SQLParser.FilterDefinitionContext):
pass
# Enter a parse tree produced by SQLParser#filterDbList.
def enterFilterDbList(self, ctx:SQLParser.FilterDbListContext):
pass
# Exit a parse tree produced by SQLParser#filterDbList.
def exitFilterDbList(self, ctx:SQLParser.FilterDbListContext):
pass
# Enter a parse tree produced by SQLParser#filterTableList.
def enterFilterTableList(self, ctx:SQLParser.FilterTableListContext):
pass
# Exit a parse tree produced by SQLParser#filterTableList.
def exitFilterTableList(self, ctx:SQLParser.FilterTableListContext):
pass
# Enter a parse tree produced by SQLParser#filterStringList.
def enterFilterStringList(self, ctx:SQLParser.FilterStringListContext):
pass
# Exit a parse tree produced by SQLParser#filterStringList.
def exitFilterStringList(self, ctx:SQLParser.FilterStringListContext):
pass
# Enter a parse tree produced by SQLParser#filterWildDbTableString.
def enterFilterWildDbTableString(self, ctx:SQLParser.FilterWildDbTableStringContext):
pass
# Exit a parse tree produced by SQLParser#filterWildDbTableString.
def exitFilterWildDbTableString(self, ctx:SQLParser.FilterWildDbTableStringContext):
pass
# Enter a parse tree produced by SQLParser#filterDbPairList.
def enterFilterDbPairList(self, ctx:SQLParser.FilterDbPairListContext):
pass
# Exit a parse tree produced by SQLParser#filterDbPairList.
def exitFilterDbPairList(self, ctx:SQLParser.FilterDbPairListContext):
pass
# Enter a parse tree produced by SQLParser#slave.
def enterSlave(self, ctx:SQLParser.SlaveContext):
pass
# Exit a parse tree produced by SQLParser#slave.
def exitSlave(self, ctx:SQLParser.SlaveContext):
pass
# Enter a parse tree produced by SQLParser#slaveUntilOptions.
def enterSlaveUntilOptions(self, ctx:SQLParser.SlaveUntilOptionsContext):
pass
# Exit a parse tree produced by SQLParser#slaveUntilOptions.
def exitSlaveUntilOptions(self, ctx:SQLParser.SlaveUntilOptionsContext):
pass
# Enter a parse tree produced by SQLParser#slaveConnectionOptions.
def enterSlaveConnectionOptions(self, ctx:SQLParser.SlaveConnectionOptionsContext):
pass
# Exit a parse tree produced by SQLParser#slaveConnectionOptions.
def exitSlaveConnectionOptions(self, ctx:SQLParser.SlaveConnectionOptionsContext):
pass
# Enter a parse tree produced by SQLParser#slaveThreadOptions.
def enterSlaveThreadOptions(self, ctx:SQLParser.SlaveThreadOptionsContext):
pass
# Exit a parse tree produced by SQLParser#slaveThreadOptions.
def exitSlaveThreadOptions(self, ctx:SQLParser.SlaveThreadOptionsContext):
pass
# Enter a parse tree produced by SQLParser#slaveThreadOption.
def enterSlaveThreadOption(self, ctx:SQLParser.SlaveThreadOptionContext):
pass
# Exit a parse tree produced by SQLParser#slaveThreadOption.
def exitSlaveThreadOption(self, ctx:SQLParser.SlaveThreadOptionContext):
pass
# Enter a parse tree produced by SQLParser#groupReplication.
def enterGroupReplication(self, ctx:SQLParser.GroupReplicationContext):
pass
# Exit a parse tree produced by SQLParser#groupReplication.
def exitGroupReplication(self, ctx:SQLParser.GroupReplicationContext):
pass
# Enter a parse tree produced by SQLParser#preparedStatement.
def enterPreparedStatement(self, ctx:SQLParser.PreparedStatementContext):
pass
# Exit a parse tree produced by SQLParser#preparedStatement.
def exitPreparedStatement(self, ctx:SQLParser.PreparedStatementContext):
pass
# Enter a parse tree produced by SQLParser#executeStatement.
def enterExecuteStatement(self, ctx:SQLParser.ExecuteStatementContext):
pass
# Exit a parse tree produced by SQLParser#executeStatement.
def exitExecuteStatement(self, ctx:SQLParser.ExecuteStatementContext):
pass
# Enter a parse tree produced by SQLParser#executeVarList.
def enterExecuteVarList(self, ctx:SQLParser.ExecuteVarListContext):
pass
# Exit a parse tree produced by SQLParser#executeVarList.
def exitExecuteVarList(self, ctx:SQLParser.ExecuteVarListContext):
pass
# Enter a parse tree produced by SQLParser#cloneStatement.
def enterCloneStatement(self, ctx:SQLParser.CloneStatementContext):
pass
# Exit a parse tree produced by SQLParser#cloneStatement.
def exitCloneStatement(self, ctx:SQLParser.CloneStatementContext):
pass
# Enter a parse tree produced by SQLParser#dataDirSSL.
def enterDataDirSSL(self, ctx:SQLParser.DataDirSSLContext):
pass
# Exit a parse tree produced by SQLParser#dataDirSSL.
def exitDataDirSSL(self, ctx:SQLParser.DataDirSSLContext):
pass
# Enter a parse tree produced by SQLParser#ssl.
def enterSsl(self, ctx:SQLParser.SslContext):
pass
# Exit a parse tree produced by SQLParser#ssl.
def exitSsl(self, ctx:SQLParser.SslContext):
pass
# Enter a parse tree produced by SQLParser#accountManagementStatement.
def enterAccountManagementStatement(self, ctx:SQLParser.AccountManagementStatementContext):
pass
# Exit a parse tree produced by SQLParser#accountManagementStatement.
def exitAccountManagementStatement(self, ctx:SQLParser.AccountManagementStatementContext):
pass
# Enter a parse tree produced by SQLParser#alterUser.
def enterAlterUser(self, ctx:SQLParser.AlterUserContext):
pass
# Exit a parse tree produced by SQLParser#alterUser.
def exitAlterUser(self, ctx:SQLParser.AlterUserContext):
pass
# Enter a parse tree produced by SQLParser#alterUserTail.
def enterAlterUserTail(self, ctx:SQLParser.AlterUserTailContext):
pass
# Exit a parse tree produced by SQLParser#alterUserTail.
def exitAlterUserTail(self, ctx:SQLParser.AlterUserTailContext):
pass
# Enter a parse tree produced by SQLParser#userFunction.
def enterUserFunction(self, ctx:SQLParser.UserFunctionContext):
pass
# Exit a parse tree produced by SQLParser#userFunction.
def exitUserFunction(self, ctx:SQLParser.UserFunctionContext):
pass
# Enter a parse tree produced by SQLParser#createUser.
def enterCreateUser(self, ctx:SQLParser.CreateUserContext):
pass
# Exit a parse tree produced by SQLParser#createUser.
def exitCreateUser(self, ctx:SQLParser.CreateUserContext):
pass
# Enter a parse tree produced by SQLParser#createUserTail.
def enterCreateUserTail(self, ctx:SQLParser.CreateUserTailContext):
pass
# Exit a parse tree produced by SQLParser#createUserTail.
def exitCreateUserTail(self, ctx:SQLParser.CreateUserTailContext):
pass
# Enter a | |
def eval(self, env):
return self.left.eval(env) % self.right.eval(env)
class ComparisonExpression:
def __init__(self, left, right, cond):
self.left = left
self.right = right
self.cond = cond
def eval(self, env):
left = self.left.eval(env)
right = self.right.eval(env)
return eval_cond(left, self.cond, right)
class ChainedComparisonExpression:
def __init__(self, comps):
self.comps = comps
def eval(self, env):
return all(comp.eval(env) for comp in self.comps)
class ConditionalExpression:
def __init__(self, cond, left, right):
self.cond = cond
self.left = left
self.right = right
def eval(self, env):
return self.left.eval(env) if self.cond.eval(env) else self.right.eval(env)
class TryExpression:
def __init__(self, expr, catches):
self.expr = expr
self.catches = catches
def eval(self, env):
try:
return self.expr.eval(env)
except NeonException as x:
for exceptions, name, statements in self.catches:
# Match either unqualified or module qualified name.
if any(x.name[:len(h)] == h or x.name[:len(h)-1] == h[1:] for h in exceptions):
if isinstance(statements, list):
for s in statements:
s.declare(env)
for s in statements:
s.run(env)
else:
return statements.eval(env)
break
else:
raise
class TypeTestExpression:
def __init__(self, left, target):
self.left = left
self.target = target
def eval(self, env):
v = self.left.eval(env)
if isinstance(self.target, TypeSimple):
if self.target.name == "Boolean":
return isinstance(v, bool)
if self.target.name == "Number":
return isinstance(v, (int, float)) and not isinstance(v, bool)
if self.target.name == "String":
return isinstance(v, str)
if self.target.name == "Bytes":
return isinstance(v, bytes)
if self.target.name == "Object":
return True
assert False, "add type ISA support for target {}".format(self.target.name)
if isinstance(self.target, TypeParameterised):
if self.target.kind is ARRAY:
if not isinstance(v, list):
return False
if self.target.elementtype.name == "Number":
return all(isinstance(x, int) and not isinstance(x, bool) for x in v)
if self.target.elementtype.name == "Object":
return True
if self.target.kind is DICTIONARY:
if not isinstance(v, dict):
return False
if self.target.elementtype.name == "Number":
return all(isinstance(x, int) and not isinstance(x, bool) for x in v.values())
if self.target.elementtype.name == "Object":
return True
if isinstance(v, ClassChoice.Instance):
return v._choice == self.target.name[-1]
assert False, "add type ISA support for target {}".format(self.target)
class MembershipExpression:
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
return self.left.eval(env) in self.right.eval(env)
class ConjunctionExpression:
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
return self.left.eval(env) and self.right.eval(env)
class DisjunctionExpression:
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
return self.left.eval(env) or self.right.eval(env)
class ValidPointerExpression:
def __init__(self, tests):
self.tests = tests
def eval(self, env):
return all(x[0].eval(env) for x in self.tests)
class NativeFunction:
def __init__(self, name, returntype, args, varargs):
self.name = name
self.returntype = returntype
self.args = args
self.varargs = varargs
def declare(self, env):
f = globals()["neon_{}_{}".format(env.module(), self.name)]
outs = [x.mode is not IN for x in self.args]
if any(outs):
f._outs = outs
env.declare(self.name, ClassFunction(self.returntype.resolve(env) if self.returntype else None, self.args), f)
def run(self, env):
pass
class NativeVariable:
def __init__(self, name, type):
self.name = name
self.type = type
def declare(self, env):
if self.name == "args":
env.declare(self.name, self.type.resolve(env), sys.argv[g_arg_start:])
elif self.name == "stdin":
if env.module_name == "io":
env.declare(self.name, self.type.resolve(env), sys.stdin.buffer)
elif env.module_name == "textio":
env.declare(self.name, self.type.resolve(env), sys.stdin)
else:
assert False
elif self.name == "stdout":
if env.module_name == "io":
env.declare(self.name, self.type.resolve(env), sys.stdout.buffer)
elif env.module_name == "textio":
env.declare(self.name, self.type.resolve(env), sys.stdout)
else:
assert False
elif self.name == "stderr":
if env.module_name == "io":
env.declare(self.name, self.type.resolve(env), sys.stderr.buffer)
elif env.module_name == "textio":
env.declare(self.name, self.type.resolve(env), sys.stderr)
else:
assert False
else:
assert False, self.name
def run(self, env):
pass
class FunctionParameter:
def __init__(self, name, type, mode, default):
self.name = name
self.type = type
self.mode = mode
self.default = default
class FunctionDeclaration:
def __init__(self, type, name, returntype, args, varargs, statements):
self.type = type
self.name = name
self.returntype = returntype
self.args = args
self.varargs = varargs
self.statements = statements
def declare(self, env):
type = ClassFunction(self.returntype.resolve(env) if self.returntype else None, self.args)
def func(env2, *a):
e = Environment(env)
for i, arg in enumerate(self.args):
e.declare(arg.name, None, self.args[i].type.resolve(e).default(e) if self.args[i].mode is OUT else a[i] if i < len(a) else arg.default.eval(e))
for s in self.statements:
s.declare(e)
r = None
try:
for s in self.statements:
s.run(e)
except ReturnException as x:
r = x.expr
if hasattr(func, "_outs"):
return [r] + [e.get_value(arg.name) for i, arg in enumerate(self.args) if func._outs[i]]
else:
return r
outs = [x.mode is not IN for x in self.args]
if any(outs):
func._outs = outs
func._varargs = self.varargs
if self.type is not None:
env.get_value(self.type).methods[self.name] = func
else:
env.declare(self.name, type, func)
def run(self, env):
pass
class FunctionCallExpression:
def __init__(self, func, args):
self.func = func
self.args = args
def eval(self, env):
args = [a[1].eval(env) for a in self.args]
if self.args and self.args[-1][2]: # spread
args = args[:-1] + args[-1]
obj = None
if isinstance(self.func, DotExpression) and isinstance(self.func.expr, IdentifierExpression) and isinstance(env.get_value(self.func.expr.name), ClassChoice):
r = ClassChoice.Instance(self.func.field, args[0])
return r
if isinstance(self.func, (DotExpression, ArrowExpression)):
# Evaluate and save obj once so we don't evaluate it twice for one call.
obj = self.func.expr.eval(env)
f = self.func.eval_obj(env, obj)
else:
f = self.func.eval(env)
if callable(f):
e = env
while e.parent is not None:
e = e.parent
funcenv = Environment(e)
if isinstance(self.func, DotExpression) and not (isinstance(self.func.expr, IdentifierExpression) and isinstance(env.get_type(self.func.expr.name), ClassModule)):
args = [obj] + args
elif isinstance(self.func, ArrowExpression):
args = [obj] + args
if hasattr(f, "_varargs") and f._varargs is not None:
args = args[:f._varargs] + [args[f._varargs:]]
r = f(funcenv, *args)
if hasattr(f, "_outs"):
j = 1
for i, out in enumerate(f._outs):
if out:
if i < len(self.args):
self.args[i][1].set(env, r[j])
j += 1
r = r[0]
return r
elif isinstance(f, ClassRecord):
return f.make(env, [x[0] for x in self.args], args)
assert False, (self.func, f)
class ExpressionStatement:
def __init__(self, expr):
self.expr = expr
def declare(self, env):
pass
def run(self, env):
self.expr.eval(env)
class AssertStatement:
def __init__(self, expr, parts):
self.expr = expr
self.parts = parts
def declare(self, env):
pass
def run(self, env):
assert self.expr.eval(env), [x.eval(env) for x in self.parts]
class AssignmentStatement:
def __init__(self, var, rhs):
self.var = var
self.rhs = rhs
def declare(self, env):
pass
def run(self, env):
x = self.rhs.eval(env)
import _io
if not isinstance(x, (_io.TextIOWrapper, _io.BufferedWriter)):
x = copy.deepcopy(x)
self.var.set(env, x)
def eval(self, env):
# This is used in the rewrite of a.append(b) to a := a & b.
r = copy.deepcopy(self.rhs.eval(env))
self.var.set(env, r)
return r
class CaseStatement:
class ComparisonWhenCondition:
def __init__(self, op, value):
self.op = op
self.value = value
def check(self, env, x):
return eval_cond(x, self.op, self.value.eval(env))
class RangeWhenCondition:
def __init__(self, low, high):
self.low = low
self.high = high
def check(self, env, x):
return self.low.eval(env) <= x <= self.high.eval(env)
class TypeTestWhenCondition:
def __init__(self, target):
self.target = target
def check(self, env, x):
if isinstance(self.target, TypeSimple):
if self.target.name == "Boolean":
return isinstance(x, bool)
if self.target.name == "Number":
return isinstance(x, (int, float))
if self.target.name == "String":
return isinstance(x, str)
if isinstance(self.target, TypeParameterised):
if self.target.elementtype.name == "Number":
return all(isinstance(t, int) for t in x)
if self.target.elementtype.name == "Object":
return True
if isinstance(self.target, TypeCompound):
return x._choice == self.target.name[1]
def __init__(self, expr, clauses):
self.expr = expr
self.clauses = clauses
def declare(self, env):
pass
def run(self, env):
expr = self.expr.eval(env)
for conds, statements in self.clauses:
if conds is None or any(c.check(env, expr) for c in conds):
for s in statements:
s.declare(env)
for s in statements:
s.run(env)
break
class ExitStatement:
def __init__(self, label, arg):
self.label = label
self.arg = arg
def declare(self, env):
pass
def run(self, env):
if self.label == "FUNCTION":
raise ReturnException(None)
elif self.label == "PROCESS":
sys.exit(self.arg == "FAILURE")
else:
raise ExitException(self.label)
class ForStatement:
def __init__(self, var, start, end, step, label, statements):
self.var = var
self.start = start
self.end = end
self.step = step
self.label = label
self.statements = statements
def declare(self, env):
pass
def run(self, env):
env = Environment(env)
for s in self.statements:
s.declare(env)
i = self.start.eval(env)
end = self.end.eval(env)
step = self.step.eval(env) if self.step else 1
env.declare(self.var, ClassNumber(), i)
try:
while (step > 0 and i <= end) or (step < 0 and i >= end):
env.set(self.var, i)
try:
for s in self.statements:
s.run(env)
except NextException as x:
if x.label != self.label:
raise
i += step
except ExitException as x:
if x.label != self.label:
raise
env = env.parent
class ForeachStatement:
def __init__(self, var, array, index, label, statements):
self.var = var
self.array = array
self.index = index
self.label = label
self.statements = statements
def declare(self, env):
pass
def run(self, env):
env = Environment(env)
for s in self.statements:
s.declare(env)
env.declare(self.var, None, None)
if self.index:
env.declare(self.index, ClassNumber(), None)
try:
for i, | |
key='AchievementItems.dat',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Unknown0',
type='ref|list|int',
),
),
),
'DelveCatchupDepths.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
),
),
'DelveCraftingModifierDescriptions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Description',
type='ref|string',
),
),
),
'DelveCraftingModifiers.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='AddedModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='NegativeWeight_TagsKeys',
type='ref|list|ulong',
key='Tags.dat',
),
Field(
name='NegativeWeight_Values',
type='ref|list|int',
),
Field(
name='ForcedAddModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='ForbiddenDelveCraftingTagsKeys',
type='ref|list|ulong',
key='DelveCraftingTags.dat',
),
Field(
name='AllowedDelveCraftingTagsKeys',
type='ref|list|ulong',
key='DelveCraftingTags.dat',
),
Field(
name='CanMirrorItem',
type='bool',
),
Field(
name='CorruptedEssenceChance',
type='int',
),
Field(
name='CanImproveQuality',
type='bool',
),
Field(
name='CanRollEnchant',
type='bool',
),
Field(
name='HasLuckyRolls',
type='bool',
),
Field(
name='SellPrice_ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='CanRollWhiteSockets',
type='bool',
),
Field(
name='Weight_TagsKeys',
type='ref|list|ulong',
key='Tags.dat',
),
Field(
name='Weight_Values',
type='ref|list|int',
),
Field(
name='DelveCraftingModifierDescriptionsKeys',
type='ref|list|ulong',
key='DelveCraftingModifierDescriptions.dat',
),
Field(
name='BlockedDelveCraftingModifierDescriptionsKeys',
type='ref|list|ulong',
key='DelveCraftingModifierDescriptions.dat',
),
),
),
'DelveCraftingTags.dat': File(
fields=(
Field(
name='TagsKey',
type='ulong',
key='Tags.dat',
),
Field(
name='ItemClass',
type='ref|string',
),
),
),
'DelveDynamite.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Flare_MiscObjectsKey',
type='ulong',
key='MiscObjects.dat',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Dynamite_MiscObjectsKey',
type='ulong',
key='MiscObjects.dat',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='MiscAnimatedKey',
type='ulong',
key='MiscAnimated.dat',
),
Field(
name='Unknown16',
type='int',
),
),
),
'DelveFeatures.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Name',
type='ref|string',
),
Field(
name='SpawnWeight',
type='ref|list|int',
),
Field(
name='WorldAreasKey',
type='ulong',
key='WorldAreas.dat',
),
Field(
name='Image',
type='ref|string',
file_path=True,
),
Field(
name='AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat'
),
# Not entirely sure
Field(
name='MinTier',
type='int',
),
Field(
name='Type',
type='ref|string',
),
Field(
name='MinDepth',
type='ref|list|int',
),
Field(
name='Description',
type='ref|string',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Data1',
type='ref|list|int',
),
Field(
name='Data2',
type='ref|list|int',
),
# 3.8
Field(
name='Data3',
type='ref|list|int',
),
),
),
'DelveFlares.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
),
),
'DelveLevelScaling.dat': File(
fields=(
Field(
name='Depth',
type='int',
),
Field(
name='MonsterLevel',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='SulphiteCost',
type='int',
),
Field(
name='MonsterLevel2',
type='int',
),
# Probably monster HP/DMG
Field(
name='MoreMonsterLife',
type='int',
),
Field(
name='MoreMonsterDamage',
type='int',
),
Field(
name='DarknessResistance',
type='int',
),
Field(
name='LightRadius',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
),
),
'DelveMonsterSpawners.dat': File(
fields=(
Field(
name='BaseMetadata',
type='ref|string',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='ref|list|ulong',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Flag2',
type='byte',
),
Field(
name='Flag3',
type='byte',
),
Field(
name='Flag4',
type='byte',
),
Field(
name='Flag5',
type='byte',
),
Field(
name='Flag6',
type='byte',
),
Field(
name='Flag7',
type='byte',
),
Field(
name='Flag8',
type='byte',
),
Field(
name='Flag9',
type='byte',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Unknown17',
type='int',
),
Field(
name='Unknown18',
type='int',
),
Field(
name='Flag10',
type='byte',
),
Field(
name='Flag11',
type='byte',
),
Field(
name='Unknown19',
type='int',
),
Field(
name='Script',
type='ref|string',
),
Field(
name='Flag12',
type='byte',
),
),
),
'DelveResourcePerLevel.dat': File(
fields=(
Field(
name='AreaLevel',
type='int',
),
Field(
name='Sulphite',
type='int',
),
),
),
'DelveRooms.dat': File(
fields=(
Field(
name='DelveBiomesKey',
type='ulong',
key='DelveBiomes.dat',
),
Field(
name='DelveFeaturesKey',
type='ulong',
key='DelveFeatures.dat',
),
Field(
name='ARMFile',
type='ref|string',
file_path=True,
file_ext='.arm',
),
),
),
'DelveStashTabLayout.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='X',
type='int',
),
Field(
name='Y',
type='int',
),
Field(
name='IntId',
type='int',
unique=True,
),
Field(
name='Width',
type='int',
),
Field(
name='Height',
type='int',
),
Field(
name='StackSize',
type='int',
),
),
),
'DelveUpgradeType.dat': File(
fields=(
),
),
'DelveUpgrades.dat': File(
fields=(
Field(
name='DelveUpgradeTypeKey',
type='int',
# key='DelveUpgradeType.dat',
enum="DELVE_UPGRADE_TYPE",
),
Field(
name='UpgradeLevel',
type='int',
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat',
),
Field(
name='StatValues',
type='ref|list|int',
),
Field(
name='Cost',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='AchievementItemsKey',
type='ulong',
key='AchievementItems.dat'
),
Field(
name='Unknown3',
type='int',
),
),
virtual_fields=(
VirtualField(
name='Stats',
fields=('StatsKeys', 'StatValues'),
zip=True,
),
),
),
'DescentStarterChest.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='CharactersKey',
type='ulong',
key='Characters.dat',
),
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='SocketColours',
# TODO Virtual for constants.SOCKET_COLOUR
type='ref|string',
),
Field(
name='WorldAreasKey',
type='ulong',
key='WorldAreas.dat',
),
),
),
'DialogueEvent.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Timer',
type='int',
),
),
),
'Directions.dat': File(
fields=(
),
),
'DisplayMinionMonsterType.dat': File(
fields=(
Field(
name='Id',
type='int',
unique=True,
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
),
),
'DivinationCardArt.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='VirtualFile',
type='ref|string',
file_path=True,
),
#3.11
Field(
name='Unknown2',
type='ref|list|int',
),
),
),
'DivinationCardStashTabLayout.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
unique=True,
),
Field(
name='IsEnabled',
type='bool',
),
),
),
'Doors.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Flag0',
type='byte',
),
),
),
'DropEffects.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique='True',
),
Field(
name='AOFile',
type='ref|string',
file_path=True,
file_ext='.ao',
),
),
),
'DropPool.dat': File(
fields=(
Field(
name='Group',
type='ref|string',
unique=True,
),
Field(
name='Weight',
type='int',
),
Field(
name='Data0',
type='ref|list|int',
),
),
),
'EclipseMods.dat': File(
fields=(
Field(
name='Key',
type='ref|string',
unique=True,
),
Field(
name='SpawnWeight_TagsKeys',
type='ref|list|ulong',
key='Tags.dat',
),
Field(
name='SpawnWeight_Values',
type='ref|list|int',
),
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='MaxLevel',
type='int',
),
Field(
name='IsPrefix',
type='bool',
),
),
),
'EffectDrivenSkill.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='ref|list|ulong',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Flag2',
type='byte',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Flag3',
type='byte',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Flag4',
type='byte',
),
Field(
name='Flag5',
type='byte',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Flag6',
type='byte',
),
Field(
name='Flag7',
type='byte',
),
Field(
name='Flag8',
type='byte',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Flag9',
type='byte',
),
# 3.11
Field(
name='Flag10',
type='byte',
),
),
),
'Effectiveness.dat': File(
fields=(
),
),
'EffectivenessCostConstants.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Multiplier',
type='float',
description='Rounded',
display_type='{0:.6f}',
),
),
),
'EinharMissions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
),
),
'EinharPackFallback.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
),
),
'ElderBossArenas.dat': File(
fields=(
Field(
name='WorldAreasKey',
type='ulong',
key='WorldAreas.dat',
unique=True,
),
Field(
name='Unknown1',
type='int',
),
Field(
name='AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat',
),
),
),
'ElderMapBossOverride.dat': File(
fields=(
Field(
name='WorldAreasKey',
type='ulong',
key='WorldAreas.dat',
unique=True,
),
Field(
name='MonsterVarietiesKeys',
type='ref|list|ulong',
key='MonsterVarieties.dat',
),
Field(
name='TerrainMetadata',
type='ref|string',
file_path=True,
),
),
),
'EndlessLedgeChests.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='WorldAreasKey',
type='ulong',
key='WorldAreas.dat',
),
Field(
name='BaseItemTypesKeys',
type='ref|list|ulong',
key='BaseItemTypes.dat',
),
Field(
name='SocketColour',
# TODO Virtual constants.SOCKET_COLOUR
type='ref|string',
),
),
),
'EnvironmentTransitions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
Field(
name='OTFiles',
type='ref|list|ref|string',
file_path=True,
file_ext='.ot',
),
Field(
name='Data0',
type='ref|list|ref|string',
),
),
),
'Environments.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Base_ENVFile',
type='ref|string',
file_path=True,
file_ext='.env',
),
Field(
name='Corrupted_ENVFile',
type='ref|string',
file_path=True,
file_ext='.env',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='EnvironmentTransitionsKey',
type='ulong',
key='EnvironmentTransitions.dat',
),
# 3.7
Field(
name='Key0',
type='ulong',
),
),
),
'EssenceStashTabLayout.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='X',
type='int',
),
Field(
name='Y',
type='int',
),
Field(
name='IntId',
type='int',
unique=True,
),
Field(
name='SlotWidth',
type='int',
),
Field(
name='SlotHeight',
type='int',
),
Field(
name='IsUpgradableEssenceSlot',
type='bool',
),
),
),
'EssenceType.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
Field(
name='EssenceType',
type='int',
),
Field(
name='IsCorruptedEssence',
type='bool',
),
Field(
name='WordsKey',
type='ulong',
key='Words.dat',
),
),
),
'Essences.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
unique=True,
),
Field(
name='Unknown1',
type='ulong',
),
Field(
name='Unknown2',
type='ulong',
),
Field(
name='Unknown3',
type='ulong',
),
Field(
name='Unknown4',
type='ulong',
),
Field(
name='Unknown5',
type='ulong',
),
Field(
name='Unknown6',
type='ulong',
),
Field(
name='Unknown7',
type='ulong',
),
Field(
name='Unknown8',
type='ulong',
),
Field(
name='Unknown9',
type='ulong',
),
Field(
name='Unknown10',
type='ulong',
),
Field(
name='Unknown11',
type='ulong',
),
Field(
name='Display_Wand_ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Display_Bow_ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Display_Quiver_ModsKey',
| |
np.log(series / (1 - series))
else:
return np.exp(series) / (1 + np.exp(series))
if self.link == "Identity":
return series
def extract_params(self):
""" Returns the summary statistics from the statsmodel GLM.
"""
summary = pd.read_html(
self.results.summary().__dict__["tables"][1].as_html(), header=0
)[0].iloc[:, 0]
out = pd.DataFrame()
out["field"] = summary.str.split(",").str[0].str.replace("C\(", "")
out["value"] = summary.str.split(".").str[1].astype(str).str.replace("]", "")
out["param"] = self.results.__dict__["_results"].__dict__["params"]
# Assumes normal distribution of the parameters
out["CI offset"] = (
self.results.__dict__["_results"].__dict__["_cache"]["bse"]
* 1.95996398454005
)
return out
def score_detail(self, data, key_column):
""" Gets score detail for factor transparency
DO NOT USE for anything other than Log Link
Also not tested on Interactions"""
source_fields = self.formula["source_fields"]
intercept_index = (
self.base_dict[source_fields[0]].replace("'", "")
if type(self.base_dict[source_fields[0]]) is str
else self.base_dict[source_fields[0]]
)
intercept = self._link_transform(self.extract_params()[self.extract_params()['field']=='Intercept']['param'].values,
transform_to='predicted value')
out_data = pd.DataFrame()
out_data[key_column] = data[key_column]
for item in source_fields:
out_data[item + " value"] = data[item]
out_data[item + " model"] = np.round(
self._link_transform(data[item].map(dict(self.PDP[item]["Model"])),
transform_to='predicted value') / intercept , 4
)
out_data["Total model"] = np.round(
np.product(
out_data[[item for item in out_data.columns if item[-5:] == "model"]],
axis=1,
),
4,
)
return out_data
def create_variate(self, name, source, degree, dictionary={}):
"""method to create a variate, i.e. a polynomial smoothing of a simple factor
Parameters:
name : str
A unique name for the variate being created
source : str
The column in `data` to which you want to apply polynomial smoothing
degree : int
The number of degrees you want in the polynomial
dictionary: dict
A mapping of the original column values to variate arguments. The
values of the dictionary must be numeric.
"""
sub_dict = {}
Z, norm2, alpha = self._ortho_poly_fit(
x=self.data[source], degree=degree, dictionary=dictionary
)
Z = pd.DataFrame(Z)
Z.columns = [name + "_p" + str(idx) for idx in range(degree + 1)]
sub_dict["Z"] = Z
sub_dict["norm2"] = norm2
sub_dict["alpha"] = alpha
sub_dict["source"] = source
sub_dict["degree"] = degree
sub_dict["dictionary"] = dictionary
self.variates[name] = sub_dict
def create_custom(self, name, source, dictionary):
"""method to bin levels of a simple factor to a more aggregate level
Parameters:
name : str
A unique name for the custom feature being created
source : str
The column in `data` to which you want to apply custom feature
dictionary: dict
A mapping of the original column values to custom binning."""
temp = self.data[source].map(dictionary).rename(name)
self.base_dict[name] = self._set_base_level(temp,{})
self.customs[name] = {"source": source, "Z": temp, "dictionary": dictionary}
def fit(self, simple=[], customs=[], variates=[], interactions=[], offsets=[]):
"""Method to fit the GLM using the statsmodels packageself.
Parameters:
simple : list
A list of all of the simple features you want fitted in the model.
customs : list
A list of all of the custom features you want fitted in the model.
variates : list
A list of all of the variates you want fitted in the model.
interactions : list
A list of all of the interactions you want fitted in the model.
offsets : list
A list of all of the offsets you want fitted in the model.
"""
link_dict = {
"Identity": sm.families.links.identity,
"Log": sm.families.links.log,
"Logit": sm.families.links.logit,
"Square": sm.families.links.sqrt,
"Probit": sm.families.links.probit,
"Cauchy": sm.families.links.cauchy,
"Cloglog": sm.families.links.cloglog,
"Inverse": sm.families.links.inverse_power,
}
link = link_dict[self.link]
if self.family == "Poisson":
error_term = sm.families.Poisson(link)
elif self.family == "Binomial":
error_term = sm.families.Binomial(link)
elif self.family == "Normal":
error_term = sm.families.Gaussian(link)
elif self.family == "Gaussian":
error_term = sm.families.Gaussian(link)
elif self.family == "Gamma":
error_term = sm.families.Gamma(link)
elif self.family == "Tweedie":
error_term = sm.families.Tweedie(link, self.tweedie_var_power)
self.set_formula(
simple=simple,
customs=customs,
variates=variates,
interactions=interactions,
offsets=offsets,
)
self.transformed_data = self.transform_data()
self.model = sm.GLM.from_formula(
formula=self.formula["formula"],
data=self.transformed_data,
family=error_term,
freq_weights=self.transformed_data[self.weight],
offset=self.transformed_data["offset"],
)
self.results = self.model.fit(scale=self.scale)
fitted = (
self.results.predict(
self.transformed_data, offset=self.transformed_data["offset"]
)
* self.transformed_data[self.weight]
)
fitted.name = "Fitted Avg"
self.transformed_data = pd.concat((self.transformed_data, fitted), axis=1)
self.fitted_factors = {
"simple": simple,
"customs": customs,
"variates": variates,
"interactions": interactions,
"offsets": offsets,
}
self._set_PDP()
def set_formula(
self, simple=[], customs=[], variates=[], interactions=[], offsets=[]
):
"""
Sets the Patsy Formula for the GLM.
Todo:
Custom factors need a base level
"""
simple_str = " + ".join(
["C(" + item + ", Treatment(reference="
+ str(self.base_dict[item]) + "))"
for item in simple])
variate_str = " + ".join(
[" + ".join(self.variates[item]["Z"].columns[1:])
for item in variates])
custom_str = " + ".join(
["C(" + item + ", Treatment(reference=" + str(self.base_dict[item])
+ "))" for item in customs])
interaction_str = " + ".join([self.interactions[item] for item in interactions])
if simple_str != "" and variate_str != "":
variate_str = " + " + variate_str
if simple_str + variate_str != "" and custom_str != "":
custom_str = " + " + custom_str
# Only works for simple factors
if simple_str + variate_str + custom_str != "" and interaction_str != "":
interaction_str = " + " + interaction_str
self.formula["simple"] = simple
self.formula["customs"] = customs
self.formula["variates"] = variates
self.formula["interactions"] = interactions
self.formula["offsets"] = offsets
self.formula["formula"] = (
"_response ~ " + simple_str + variate_str + custom_str + interaction_str
)
# Intercept only model
if simple_str + variate_str + custom_str + interaction_str == "":
self.formula["formula"] = self.formula["formula"] + "1"
self.formula["source_fields"] = list(
set(
self.formula["simple"]
+ [self.customs[item]["source"] for item in self.formula["customs"]]
+ [self.variates[item]["source"] for item in self.formula["variates"]]
+ [self.offsets[item]["source"] for item in self.formula["offsets"]]
)
)
def transform_data(self, data=None):
"""Method to add any customs, variates, interactions, and offsets to a
generic dataset so that it can be used in the GLM object"""
if data is None:
# Used for training dataset
transformed_data = self.data[
self.independent + [self.weight] + [self.dependent]
]
transformed_data = copy.deepcopy(transformed_data)
transformed_data["_response"] = (
transformed_data[self.dependent] / transformed_data[self.weight]
)
for i in range(len(self.formula["variates"])):
transformed_data = pd.concat(
(transformed_data, self.variates[self.formula["variates"][i]]["Z"]),
axis=1,
)
for i in range(len(self.formula["customs"])):
transformed_data = pd.concat(
(transformed_data, self.customs[self.formula["customs"][i]]["Z"]),
axis=1,
)
transformed_data["offset"] = 0
if len(self.formula["offsets"]) > 0:
offset = self.offsets[self.formula["offsets"][0]][
"Z"
] # This works for train, but need to apply to test
for i in range(len(self.formula["offsets"]) - 1):
offset = (
offset + self.offsets[self.formula["offsets"][i + 1]]["Z"]
) # This works for train, but need to apply to test
transformed_data["offset"] = offset
else:
transformed_data = data[
list(
set(data.columns).intersection(
self.independent + [self.weight] + [self.dependent]
)
)
]
for i in range(len(self.formula["variates"])):
name = self.formula["variates"][i]
temp = pd.DataFrame(
self._ortho_poly_predict(
x=data[self.variates[name]["source"]].str.replace("'","").map(
self.variates[name]["dictionary"]
),
variate=name,
),
columns=[
name + "_p" + str(idx)
for idx in range(self.variates[name]["degree"] + 1)
],
)
transformed_data = pd.concat((transformed_data, temp), axis=1)
for i in range(len(self.formula["customs"])):
name = self.formula["customs"][i]
temp = data[self.customs[name]["source"]].str.replace("'","").map(
self.customs[name]["dictionary"]
)
temp.name = name
transformed_data = pd.concat((transformed_data, temp), axis=1)
transformed_data = transformed_data.copy()
transformed_data["offset"] = 0
if len(self.formula["offsets"]) > 0:
temp = data[self.offsets[self.formula["offsets"][0]]["source"]].str.replace("'","").map(
self.offsets[self.formula["offsets"][0]]["dictionary"]
)
temp = self._link_transform(temp)
for i in range(len(self.formula["offsets"]) - 1):
try:
offset = data[
self.offsets[self.formula["offsets"][i + 1]]["source"].str.replace("'","")
].map(
self.offsets[self.formula["offsets"][i + 1]]["dictionary"]
) # This works for train, but need to apply to test
except:
offset = data[
self.offsets[self.formula["offsets"][i + 1]]["source"]
].map(
self.offsets[self.formula["offsets"][i + 1]]["dictionary"]
) # This works for train, but need to apply to test
temp = temp + self._link_transform(offset)
transformed_data["offset"] = temp
return transformed_data
def predict(self, data=None):
"""Makes predicitons off of the fitted GLM"""
if isinstance(data, pd.Series):
data = data.to_frame().T
data = self.transform_data(data)
fitted = self.results.predict(data, offset=data["offset"])
fitted.name = "Fitted Avg"
return pd.concat((data, fitted), axis=1)
# User callable
def create_interaction(self, name, interaction):
""" Creates an interaction term to be fit in the GLM"""
temp = {
**{item: "simple" for item in self.independent},
**{item: "variate" for item in self.variates.keys()},
**{item: "custom" for item in self.customs.keys()},
}
interaction_type = [temp.get(item) for item in interaction]
transformed_interaction = copy.deepcopy(interaction)
for i in range(len(interaction)):
if interaction_type[i] == "variate":
transformed_interaction[i] = list(
self.variates[interaction[i]]["Z"].columns
)
elif interaction_type[i] == "custom":
transformed_interaction[i] = list(
self.customs[interaction[i]]["Z"].columns
)
else:
transformed_interaction[i] = [interaction[i]]
# Only designed to work with 2-way interaction
self.interactions[name] = " + ".join(
[
val1 + ":" + val2
for val1 in transformed_interaction[0]
for val2 in transformed_interaction[1]
]
)
def create_offset(self, name, source, dictionary):
""" Creates an offset term to be fit in the GLM"""
self.data
temp = self.data[source].map(dictionary)
rescale = sum(self.data[self.weight] * temp) / sum(self.data[self.weight])
temp = temp / rescale
# This assumes that offset values are put in on real terms and not on linear predictor terms
# We may make the choice of linear predictor and predicted value as a future argument
temp | |
State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.RUNNING
ti3.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
session)
self.assertEqual(0, len(res))
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = SimpleDagBag([])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = dag_file_processor.create_dag_run(dag)
ti3 = TaskInstance(task1, dr2.execution_date)
ti4 = TaskInstance(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag)
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for _ in range(0, 4):
dr = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr.execution_date)
ti2 = TaskInstance(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag)
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@pytest.mark.quarantined
@pytest.mark.xfail(condition=True, reason="The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag1 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag1))
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag2 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag2))
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
dag3 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag3))
session = settings.Session()
dr1 = dag1.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TaskInstance(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = MockExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti) # pylint: disable=no-value-for-parameter
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_reset_state_for_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
scheduler.processor_agent = processor
scheduler.reset_state_for_orphaned_tasks()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_scheduler_loop_should_change_state_for_tis_without_dagrun(self,
initial_task_state,
expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = MockExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._run_scheduler_loop()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None): # pylint: disable=unused-argument
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = dag_file_processor.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
# This needs a _REAL_ dag, not the serialized version
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TaskInstance(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as | |
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.compute.models import StorageAccountTypes
from azure.mgmt.compute.models import SnapshotStorageAccountTypes
from azure.storage.blob import BlockBlobService
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute.models import DiskCreateOption
from azure.mgmt.compute.models import DiskCreateOptionTypes
import glob
from .BaseClient import BaseClient
from ..models.Snapshot import Snapshot
from ..models.Volume import Volume
from ..models.Attachment import Attachment
class AzureClient(BaseClient):
def __init__(self, operation_name, configuration, directory_persistent, directory_work_list, poll_delay_time,
poll_maximum_time):
super(AzureClient, self).__init__(operation_name, configuration, directory_persistent, directory_work_list,
poll_delay_time, poll_maximum_time)
if configuration['credhub_url'] is None:
self.__setCredentials(
configuration['client_id'], configuration['client_secret'], configuration['tenant_id'])
self.resource_group = configuration['resource_group']
self.storage_account_name = configuration['storageAccount']
self.storage_account_key = configuration['storageAccessKey']
self.subscription_id = configuration['subscription_id']
else:
self.logger.info('fetching creds from credhub')
azure_config = self._get_credentials_from_credhub(configuration)
self.__setCredentials(
azure_config['client_id'], azure_config['client_secret'], azure_config['tenant_id'])
self.resource_group = azure_config['resource_group']
self.storage_account_name = azure_config['storageAccount']
self.storage_account_key = azure_config['storageAccessKey']
self.subscription_id = azure_config['subscription_id']
self.block_blob_service = BlockBlobService(
account_name=self.storage_account_name, account_key=self.storage_account_key)
# +-> Check whether the given container exists and accessible
if (not self.get_container()) or (not self.access_container()):
msg = 'Could not find or access the given container.'
self.last_operation(msg, 'failed')
raise Exception(msg)
# skipping some actions for blob operation
if operation_name != 'blob_operation':
self.compute_client = ComputeManagementClient(
self.__azureCredentials, self.subscription_id)
self.instance_location = self.get_instance_location(
configuration['instance_id'])
if not self.instance_location:
msg = 'Could not retrieve the location of the instance.'
self.last_operation(msg, 'failed')
raise Exception(msg)
# scsi_host_number would be used to determine lun to device mapping
# scsi_host_number would be same for all data volumes/disks
self.scsi_host_number = self.get_host_number_of_data_volumes()
if not self.scsi_host_number:
msg = 'Could not determine SCSI host number for data volume'
self.last_operation(msg, 'failed')
raise Exception(msg)
self.availability_zones = self._get_availability_zone_of_server(configuration['instance_id'])
self.max_block_size = 100 * 1024 * 1024
#list of regions where ZRS is supported
self.zrs_supported_regions = ['westeurope', 'centralus','southeastasia', 'eastus2', 'northeurope', 'francecentral']
def __setCredentials(self, client_id, client_secret, tenant_id):
self.__azureCredentials = ServicePrincipalCredentials(
client_id=client_id,
secret=client_secret,
tenant=tenant_id
)
def get_container(self):
try:
container_props = self.block_blob_service.get_container_properties(
self.CONTAINER)
return container_props
except Exception as error:
self.logger.error('[Azure] [STORAGE] ERROR: Unable to find container {}.\n{}'.format(
self.CONTAINER, error))
return None
def access_container(self):
# Test if the container is accessible
try:
key = '{}/{}'.format(self.BLOB_PREFIX,
'AccessTestByServiceFabrikPythonLibrary')
self.block_blob_service.create_blob_from_text(
self.CONTAINER,
key,
'This is a sample text'
)
self.block_blob_service.delete_blob(
self.CONTAINER,
key
)
return True
except Exception as error:
self.logger.error('[Azure] [STORAGE] ERROR: Unable to access container {}.\n{}'.format(
self.CONTAINER, error))
return False
def _get_availability_zone_of_server(self, instance_id):
try:
instance = self.compute_client.virtual_machines.get(self.resource_group, instance_id)
return instance.zones
except Exception as error:
self.logger.error('[Azure] ERROR: Unable to find or access attached volume for instance_id {}.{}'.format(instance_id, error))
return None
def _get_snapshot(self, snapshot_name):
try:
snapshot = self.compute_client.snapshots.get(
self.resource_group, snapshot_name)
return Snapshot(snapshot.name, snapshot.disk_size_gb, snapshot.time_created, snapshot.provisioning_state)
except Exception as error:
self.logger.error(
'[Azure] ERROR: Unable to find or access snapshot {}.\n{}'.format(
snapshot_name, error))
return None
def _get_volume(self, volume_name):
try:
volume = self.compute_client.disks.get(
self.resource_group, volume_name)
return Volume(volume.name, volume.provisioning_state, volume.disk_size_gb)
except Exception as error:
self.logger.error(
'[Azure] ERROR: Unable to find or access volume/disk {}.\n{}'.format(
volume_name, error))
return None
def get_host_number_of_data_volumes(self):
'''
This particual funtion is specific for Azure.
This determines the scsi host number for the persistent disk.
The host number along with lun would be required to find out device deterministic way.
'''
host_number = None
try:
device_persistent_volume = self.shell(
'cat {} | grep {}'
.format(self.FILE_MOUNTS, self.DIRECTORY_PERSISTENT)).split(' ')[0][5:-1]
device_paths = glob.glob(
'/sys/bus/scsi/devices/*:*:*:*/block/{}'.format(device_persistent_volume))
if len(device_paths) > 1:
raise Exception('Invalid device paths for device {}'.format(
device_persistent_volume))
# Success: Go only one device path
host_number = device_paths[0][22:-len(
'/block/{}'.format(device_persistent_volume))].split(':')[0]
except Exception as error:
self.logger.error(
'[ERROR] [SCSI HOST NUMBER] [DATA VOLUME] Error while determining SCSI host number'
'of persistent volume directory {}.{}'.format(self.DIRECTORY_PERSISTENT, error))
return host_number
def get_instance_location(self, instance_id):
try:
instance = self.compute_client.virtual_machines.get(
self.resource_group, instance_id)
return instance.location
except Exception as error:
self.logger.error(
'[Azure] ERROR: Unable to get location for instance_id {}.{}'.format(
instance_id, error))
return None
def get_attached_volumes_for_instance(self, instance_id):
try:
instance = self.compute_client.virtual_machines.get(
self.resource_group, instance_id)
self.availability_zones = instance.zones
volume_list = []
for disk in instance.storage_profile.data_disks:
device = None
device_path = glob.glob(
self.DEVICE_PATH_TEMPLATE.format(self.scsi_host_number, disk.lun))
if len(device_path) != 1:
raise Exception(
'Expected number of device path not matching 1 != {} fo lun {}'.format(
len(device_path), disk.lun))
device = '/dev/{}'.format(self.shell(
'ls {}'.format(device_path[0])).rstrip())
volume_list.append(
Volume(disk.name, 'none', disk.disk_size_gb, device))
return volume_list
except Exception as error:
self.logger.error(
'[Azure] ERROR: Unable to find or access attached volume for instance_id {}.{}'.format(
instance_id, error))
return []
def get_persistent_volume_for_instance(self, instance_id):
try:
device = self.shell(
'cat {} | grep {}'.format(self.FILE_MOUNTS, self.DIRECTORY_PERSISTENT)).split(' ')[0][:8]
for volume in self.get_attached_volumes_for_instance(instance_id):
if volume.device == device:
self._add_volume_device(volume.id, device)
return volume
return None
except Exception as error:
self.logger.error(
'[ERROR] [GET PRESISTENT VOLUME] Unable to find persistent volume for instance {}.{}'.format(
instance_id, error))
return None
def location_supports_zrs(self, location):
return location in self.zrs_supported_regions
def _create_snapshot(self, volume_id):
log_prefix = '[SNAPSHOT] [CREATE]'
snapshot = None
self.logger.info(
'{} START for volume id {} with tags {}'.format(log_prefix, volume_id, self.tags))
try:
disk_info = self.compute_client.disks.get(
self.resource_group, volume_id)
snapshot_name = self.generate_name_by_prefix(self.SNAPSHOT_PREFIX)
if self.location_supports_zrs(disk_info.location):
snapshot_creation_operation = self.compute_client.snapshots.create_or_update(
self.resource_group,
snapshot_name,
{
'location': disk_info.location,
'tags': self.tags,
'creation_data': {
'create_option': DiskCreateOption.copy,
'source_uri': disk_info.id
},
'sku': {
'name': 'Standard_ZRS'
}
}
)
else:
snapshot_creation_operation = self.compute_client.snapshots.create_or_update(
self.resource_group,
snapshot_name,
{
'location': disk_info.location,
'tags': self.tags,
'creation_data': {
'create_option': DiskCreateOption.copy,
'source_uri': disk_info.id
},
'sku': {
'name': 'Standard_LRS'
}
}
)
self._wait('Waiting for snapshot {} to get ready...'.format(snapshot_name),
lambda operation: operation.done() is True,
None,
snapshot_creation_operation)
snapshot_info = snapshot_creation_operation.result()
self.logger.info(
'Snapshot creation response: {}'.format(snapshot_info))
snapshot = Snapshot(
snapshot_info.name, snapshot_info.disk_size_gb, snapshot_info.time_created, snapshot_info.provisioning_state)
self._add_snapshot(snapshot.id)
self.logger.info(
'{} SUCCESS: snapshot-id={}, volume-id={} , tags={} '.format(log_prefix, snapshot.id, volume_id, self.tags))
self.output_json['snapshotId'] = snapshot.id
except Exception as error:
message = '{} ERROR: volume-id={}\n{}'.format(
log_prefix, volume_id, error)
self.logger.error(message)
if snapshot:
self.delete_snapshot(snapshot.id)
snapshot = None
raise Exception(message)
return snapshot
def _copy_snapshot(self, snapshot_id):
return self._get_snapshot(snapshot_id)
def _delete_snapshot(self, snapshot_id):
log_prefix = '[SNAPSHOT] [DELETE]'
try:
snapshot_deletion_operation = self.compute_client.snapshots.delete(
self.resource_group, snapshot_id)
# TODO: can be implemented the following wait as 'operation.done() is True'
self._wait('Waiting for snapshot {} to be deleted...'.format(snapshot_id),
lambda id: not self._get_snapshot(id),
None,
snapshot_id)
snapshot_delete_response = snapshot_deletion_operation.result()
self._remove_snapshot(snapshot_id)
self.logger.info(
'{} SUCCESS: snapshot-id={}\n{}'.format(
log_prefix, snapshot_id, snapshot_delete_response))
return True
except Exception as error:
message = '{} ERROR: snapshot_id={}\n{}'.format(
log_prefix, snapshot_id, error)
if error.status_code == 404:
self.logger.info(message)
self.logger.info('ignoring this error for delete operation..')
return True
self.logger.error(message)
raise Exception(message)
def _create_volume(self, size, snapshot_id=None):
log_prefix = '[VOLUME] [CREATE]'
volume = None
try:
disk_creation_operation = None
disk_name = None
if snapshot_id is not None:
snapshot = self.compute_client.snapshots.get(
self.resource_group, snapshot_id)
disk_name = self.generate_name_by_prefix(self.DISK_PREFIX)
disk_creation_operation = self.compute_client.disks.create_or_update(
self.resource_group,
disk_name,
{
'location': self.instance_location,
'tags': self.tags,
'creation_data': {
'create_option': DiskCreateOption.copy,
'source_uri': snapshot.id
},
'zones': self.availability_zones
}
)
else:
disk_name = self.generate_name_by_prefix(self.DISK_PREFIX)
disk_creation_operation = self.compute_client.disks.create_or_update(
self.resource_group,
disk_name,
{
'location': self.instance_location,
'tags': self.tags,
'disk_size_gb': size,
'creation_data': {
'create_option': DiskCreateOption.empty
},
'account_type': StorageAccountTypes.standard_lrs,
'zones': self.availability_zones
}
)
self._wait('Waiting for volume {} to get ready...'.format(disk_name),
lambda operation: operation.done() is True,
None,
disk_creation_operation)
disk = disk_creation_operation.result()
volume = Volume(disk.name, 'none', disk.disk_size_gb)
self._add_volume(volume.id)
self.logger.info(
'{} SUCCESS: volume-id={} with tags={} '.format(log_prefix, volume.id, self.tags))
except Exception as error:
message = '{} ERROR: size={}\n{}'.format(log_prefix, size, error)
self.logger.error(message)
if volume:
self.delete_volume(volume.id)
volume = None
raise Exception(message)
return volume
def _delete_volume(self, volume_id):
log_prefix = '[VOLUME] [DELETE]'
try:
disk_deletion_operation = self.compute_client.disks.delete(
self.resource_group, volume_id)
self._wait('Waiting for volume {} to be deleted...'.format(volume_id),
lambda operation: operation.done() is True,
None,
disk_deletion_operation)
delete_response = disk_deletion_operation.result()
self._remove_volume(volume_id)
self.logger.info(
'{} SUCCESS: volume-id={} with tags={}\n{}'.format(
log_prefix, volume_id, self.tags, delete_response))
return True
except Exception as error:
message = '{} ERROR: volume-id={}\n{}'.format(
log_prefix, volume_id, error)
if error.status_code == 404:
self.logger.info(message)
self.logger.info('ignoring this error for delete operation..')
return True
self.logger.error(message)
raise Exception(message)
def _create_attachment(self, volume_id, instance_id):
log_prefix = '[ATTACHMENT] [CREATE]'
attachment = None
try:
virtual_machine = self.compute_client.virtual_machines.get(
self.resource_group,
instance_id
)
volume = self.compute_client.disks.get(
self.resource_group, volume_id)
all_data_disks = virtual_machine.storage_profile.data_disks
# traversing through all disks and finding next balnk lun
next_lun = 0
for disk in all_data_disks:
if disk.lun == next_lun:
next_lun += 1
existing_devices_path = glob.glob(
self.DEVICE_PATH_TEMPLATE.format(self.scsi_host_number, next_lun))
virtual_machine.storage_profile.data_disks.append({
'lun': next_lun,
'name': volume.name,
'create_option': DiskCreateOptionTypes.attach,
'managed_disk': {
'id': volume.id
}
})
disk_attach_operation = self.compute_client.virtual_machines.create_or_update(
self.resource_group,
virtual_machine.name,
virtual_machine
)
self._wait('Waiting for attachment of volume {} to get ready...'.format(volume_id),
lambda operation: operation.done() is True,
None,
disk_attach_operation)
updated_vm = disk_attach_operation.result()
all_devices_path = glob.glob(
self.DEVICE_PATH_TEMPLATE.format(self.scsi_host_number, next_lun))
new_devices_path = list(set(all_devices_path) -
set(existing_devices_path))
if len(new_devices_path) > 1:
raise Exception(
'Found more than one new devices while attaching volume!')
device = '/dev/{}'.format(self.shell(
'ls {}'.format(new_devices_path[0])).rstrip())
self._add_volume_device(volume_id, device)
attachment = Attachment(0, volume_id, instance_id)
self._add_attachment(volume_id, instance_id)
self.logger.info(
'{} SUCCESS: volume-id={}, instance-id={}\n Updated vm:{}'.format(
log_prefix, volume_id, instance_id, updated_vm))
except Exception as error:
message = '{} ERROR: volume-id={}, instance-id={}\n{}'.format(
log_prefix, volume_id, instance_id, error)
self.logger.error(message)
# The following lines are a | |
<filename>geological_toolbox/requests.py
"""
This module hosts the class Requests, which provides functionality for special (geo-)database requests.
"""
import sqlalchemy as sq
from sqlalchemy.orm.session import Session
from typing import List, Tuple
from geological_toolbox.exceptions import DatabaseException, DatabaseRequestException, FaultException, \
ListOrderException
from geological_toolbox.geometries import GeoPoint
from geological_toolbox.properties import Property, PropertyTypes
from geological_toolbox.stratigraphy import StratigraphicObject
from geological_toolbox.wells import WellMarker
class Requests:
"""
The class Requests, which provides functionality for special (geo-)database requests.
"""
def __init__(self):
# type: () -> None
"""
Initialise the class
-> Currently nothing to do...
"""
pass
@staticmethod
def check_extent(extent: List[float]) -> None:
"""
checks, if the given extent has the right format
:param extent: value to be checked
:type extent:
:return: Nothing
:raises TypeError: if extent is not a list
:raises ValueError: if on list element is not compatible to float or number of elements is not 4
:raises ListOrderException: if the ordering of the extent list [min_easting, max_easting, min_northing,
max_northing] is wrong.
"""
if extent is None:
return
if not isinstance(extent, List):
raise TypeError("extent is not an instance of List()!")
if len(extent) != 4:
raise ValueError("Number of extension list elements is not 4!")
for i in range(len(extent)):
try:
extent[i] = float(extent[i])
except ValueError as e:
raise ValueError("At least on extent element cannot be converted to float!\n{}".format(e))
if extent[0] > extent[1]:
raise ListOrderException("min easting > max easting")
if extent[2] > extent[3]:
raise ListOrderException("min northing > max northing")
@staticmethod
def create_thickness_point(sorted_dict: dict, well_id: int, marker_1: int, marker_2: int, session: Session,
use_faulted: bool = False, fault_name: str = "",
add_properties: Tuple = tuple()) -> GeoPoint:
"""
Generate a new GeoPoint with thickness property from 2 well marker
:param sorted_dict: dictionary containing well_id / WellMarker data
:param well_id: current well_id
:param marker_1: id of marker 1
:param marker_2: id of marker 2
:param session: current SQLAlchemy session
:param use_faulted: should faulted sequence be included?
:param fault_name: name of fault stratigraphic unit (default: "Fault")
:param add_properties: Adds the properties to the GeoPoint. Format for each property: (value, type, name, unit)
:return: new GeoPoint Object
:raises FaultException: if a fault is inside the section and use_faulted is False
:raises ValueError: if a property in the add_property tuple has less than 3 entries
"""
min_depth = sorted_dict[well_id][marker_1].depth
max_depth = sorted_dict[well_id][marker_2].depth
faults = session.query(WellMarker).join(StratigraphicObject). \
filter(WellMarker.horizon_id == StratigraphicObject.id). \
filter(StratigraphicObject.unit_name == fault_name). \
filter(WellMarker.well_id == well_id)
if min_depth > max_depth:
faults = faults.filter(sq.between(WellMarker.drill_depth, max_depth, min_depth))
else:
faults = faults.filter(sq.between(WellMarker.drill_depth, min_depth, max_depth))
if (faults.count() > 0) and (use_faulted is False):
raise FaultException("Fault inside section")
point = sorted_dict[well_id][marker_1].to_geopoint()
thickness = Property(max_depth - min_depth, PropertyTypes.FLOAT, "thickness", "m", session)
point.add_property(thickness)
if use_faulted:
faulted = Property(faults.count(), PropertyTypes.INT, "faulted", "count", session)
point.add_property(faulted)
for prop in add_properties:
if len(prop) < 4:
raise ValueError("property tuple has less than 4 entries!")
new_property = Property(prop[0], PropertyTypes[prop[1]], prop[2], prop[3], session)
point.add_property(new_property)
return point
@staticmethod
def well_markers_to_thickness(session: Session, marker_1: str, marker_2: str, summarise_multiple: bool = False,
extent: Tuple[int, int, int, int] or None = None, use_faulted: bool = False,
fault_name: str = "Fault") -> List[GeoPoint]:
"""
Static method for generating a point set including a thickness property derived from the committed well marker
:param session: The SQLAlchemy session connected to the database storing the geodata
:param marker_1: First WellMarker unit name
:param marker_2: Second WellMarker unit name
:param summarise_multiple: Summarise multiple occurrences of marker_1 and marker_2 to a maximum thickness.
If this parameter is False (default value) create multiple points.
:param extent: extension rectangle as list which borders the well distribution. The list has the following
order: (min easting, max easting, min northing, max northing)
:param use_faulted: if True, also sections with faults between marker_1 and marker_2 are returned
:param fault_name: unit name of fault marker (default: "Fault")
:return: A list of GeoPoints each with a thickness property
:raises AttributeError: if marker_1 and marker_2 are equal
:raises DatabaseException: if the database query results in less than 2 marker of a well_id
:raises DatabaseRequestException: if an unexpected query result occurs
:raises TypeError: if session is not an instance of SQLAlchemy session
:raises ValueError: if a parameter is not compatible with the required type
for further raises see :meth:`Requests.check_extent`
Query for selecting markers:
.. code-block:: sql
:linenos:
SELECT wm1.* FROM well_marker wm1
JOIN stratigraphy st1
ON wm1.horizon_id = st1.id
WHERE st1.unit_name IN ("mu", "so")
AND EXISTS
(
SELECT 1 FROM well_marker wm2
JOIN stratigraphy st2
ON wm2.horizon_id = st2.id
WHERE st2.unit_name IN ("mu", "so")
AND wm1.well_id = wm2.well_id
AND st1.unit_name <> st2.unit_name
)
ORDER BY wm1.well_id,wm1.drill_depth
"""
if not isinstance(session, Session):
raise TypeError("session is not of type SQLAlchemy Session")
if marker_1 == marker_2:
raise AttributeError("marker_1 and marker_2 cannot be equal!")
if extent is not None:
Requests.check_extent(extent)
result = session.query(WellMarker)
if extent is None:
statement = sq.text("SELECT wm1.* FROM well_marker wm1 " +
"JOIN stratigraphy st1 ON wm1.horizon_id = st1.id " +
"WHERE st1.unit_name IN (:marker1, :marker2) " +
"AND EXISTS " +
"( SELECT 1 FROM well_marker wm2 JOIN stratigraphy st2 ON wm2.horizon_id = st2.id " +
"WHERE st2.unit_name IN (:marker1, :marker2) AND wm1.well_id = wm2.well_id " +
"AND st1.unit_name <> st2.unit_name) ORDER BY wm1.well_id,wm1.drill_depth")
result = result.from_statement(statement). \
params(marker1=marker_1, marker2=marker_2). \
all()
else:
statement = sq.text("SELECT wm1.* FROM well_marker wm1 " +
"JOIN wells ON wm1.well_id = wells.id " +
"JOIN stratigraphy st1 ON wm1.horizon_id = st1.id " +
"WHERE wells.east BETWEEN :east_min AND :east_max " +
"AND wells.north BETWEEN :north_min AND :north_max " +
"AND st1.unit_name IN (:marker1, :marker2)" +
"AND EXISTS " +
"( SELECT 1 FROM well_marker wm2 JOIN stratigraphy st2 ON wm2.horizon_id = st2.id " +
"WHERE st2.unit_name IN (:marker1, :marker2) AND wm1.well_id = wm2.well_id " +
"AND st1.unit_name <> st2.unit_name) ORDER BY wm1.well_id,wm1.drill_depth")
result = result.from_statement(statement). \
params(marker1=marker_1, marker2=marker_2, east_min=extent[0], east_max=extent[1], north_min=extent[2],
north_max=extent[3]). \
all()
# first: sort by well_id for simpler multiple marker check
sorted_dict = dict()
for i in range(len(result)):
if result[i].well_id not in sorted_dict:
sorted_dict[result[i].well_id] = list()
sorted_dict[result[i].well_id].append(result[i])
del result
# generate the resulting list of GeoPoints
geopoints = list()
for well_id in sorted_dict:
if len(sorted_dict[well_id]) < 2:
raise DatabaseException("Not enough well marker in dictionary")
if len(sorted_dict[well_id]) == 2:
sorted_dict[well_id][0].session = session
try:
if summarise_multiple:
point = Requests.create_thickness_point(
sorted_dict, well_id, 0, 1, session, use_faulted, fault_name,
((0, "INT", "summarised", "bool"),))
geopoints.append(point)
else:
point = Requests.create_thickness_point(
sorted_dict, well_id, 0, 1, session, use_faulted, fault_name,
((0, "INT", "multiple marker", "bool"),))
geopoints.append(point)
# FaultException -> do nothing except catching the exception
except FaultException:
continue
# don't test anything else for this well_id
continue
# last case: more than 2 values found:
if summarise_multiple:
first_index = -1
last_index = -1
for i in range(len(sorted_dict[well_id])):
if sorted_dict[well_id][i].horizon.unit_name == marker_1:
first_index = i
break
for j in reversed(range(len(sorted_dict[well_id]))):
if sorted_dict[well_id][j].horizon.unit_name == marker_2:
last_index = j
break
if (first_index == -1) or (last_index == -1):
raise DatabaseRequestException("Didn't find two different markers. Shouldn't be possible. " +
"Please excuse this error and forward it to me.")
# wrong order -> nothing to do...
if last_index < first_index:
continue
try:
sorted_dict[well_id][first_index].session = session
point = Requests.create_thickness_point(
sorted_dict, well_id, first_index, last_index, session, use_faulted, fault_name,
((1, "INT", "summarised", "bool"),))
geopoints.append(point)
# FaultException -> do nothing except catching the exception
except FaultException:
continue
# finished summarise section -> continue to avoid second round without summarise
continue
# don't summarise
first_index = -1
for index in range(len(sorted_dict[well_id])):
if sorted_dict[well_id][index].horizon.unit_name == marker_1:
first_index = index
elif (first_index != -1) and (sorted_dict[well_id][index].horizon.unit_name == marker_2):
try:
sorted_dict[well_id][first_index].session = session
point = Requests.create_thickness_point(
sorted_dict, well_id, first_index, index, session, use_faulted, fault_name,
((1, "INT", "multiple marker", "bool"),))
geopoints.append(point)
# FaultException -> do nothing except catching the exception
except FaultException:
continue
finally:
first_index = -1
return geopoints
@staticmethod
def interpolate_geopoints(points: GeoPoint, property_name: str, method: str) -> None:
"""
Interpolate the property values of the given GeoPoints using the interpolation method "method"
.. todo:: - Integrate functionality
- define interpolation methods
- define return statement
:param points: List of GeoPoints as interpolation base
:param property_name: Name of the property to interpolate
:param method: Interpolation method
:return: Currently | |
tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=axis
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=axis
)
except:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.squeeze(
inp,
axis=-1
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=-1
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=-1
)
### Gather
elif layer.attrib['type'] == 'Gather':
axis = int(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 2)])
temp = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]
input_shape = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].shape[0]
output_shape = [int(v.text) for v in layer.find("output").find("port")]
batch_dims = 0
if not data is None and 'batch_dims' in data.attrib:
batch_dims = int(data.attrib['batch_dims'])
if batch_dims == 0:
batch_dims = None
indices = []
if type(temp) == np.ndarray:
if temp.ndim == 1:
for idx, dim in enumerate(temp):
indices.append(dim)
else:
indices = temp
else:
# TODO
if len(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].shape) < len(temp.shape):
for idx, dim in enumerate(temp):
indices.append(dim)
elif len(temp.shape) == 1:
indices = temp
else:
shape = tf.shape(temp)
for idx, dim in enumerate(shape):
if idx == 0:
indices.append(0)
elif idx == input_shape - 1:
indices.append(1)
else:
indices.append(dim + 1)
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
if isinstance(inp, tf.Tensor):
tf_layers_dict[layer_id] = tf.gather(
inp,
indices,
axis=axis,
batch_dims=batch_dims
)
else:
try:
if indices == [0] and axis == 0:
try:
tmp = tf.squeeze(
inp,
axis=axis
)
if tmp.type_spec.shape == []:
tf_layers_dict[layer_id] = tf.expand_dims(tmp, axis=0)
except:
tf_layers_dict[layer_id] = tf.gather(
inp,
indices,
axis=axis,
batch_dims=batch_dims
)
else:
tf_layers_dict[layer_id] = tf.gather(
inp,
indices,
axis=axis,
batch_dims=batch_dims
)
except:
tf_layers_dict[layer_id] = tf.gather(
inp,
indices,
axis=axis,
batch_dims=batch_dims
)
if batch_dims is None and axis == 0 and tf_layers_dict[layer_id].shape[0] == 1:
tf_layers_dict[layer_id] = tf_layers_dict[layer_id][0]
elif batch_dims is None and (axis == -1 or axis == (len(tf_layers_dict[layer_id].shape) - 1)) and tf_layers_dict[layer_id].shape[-1] == 1:
tf_layers_dict[layer_id] = tf.squeeze(tf_layers_dict[layer_id], axis=axis)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
if isinstance(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)], tf.Tensor):
inp = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
else:
try:
if indices == [0] and axis == 0:
try:
inp = tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=axis
)
if tf_layers_dict[layer_id].type_spec.shape == []:
inp = tf.expand_dims(tf_layers_dict[layer_id], axis=0)
except:
inp = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
else:
inp = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
except:
inp = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
if batch_dims is None and axis == 0 and inp.shape[0] == 1:
inp = inp[0]
elif batch_dims is None and (axis == -1 or axis == (len(inp.shape) - 1)) and inp.shape[-1] == 1:
inp = tf.squeeze(inp, axis=axis)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
if isinstance(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)], tf.Tensor):
tf_layers_dict[layer_id] = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
else:
try:
if indices == [0] and axis == 0:
try:
tf_layers_dict[layer_id] = tf.squeeze(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
axis=axis
)
if tf_layers_dict[layer_id].type_spec.shape == []:
tf_layers_dict[layer_id] = tf.expand_dims(tf_layers_dict[layer_id], axis=0)
except:
tf_layers_dict[layer_id] = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
else:
tf_layers_dict[layer_id] = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
if batch_dims is None and axis == 0 and tf_layers_dict[layer_id].shape[0] == 1:
tf_layers_dict[layer_id] = tf_layers_dict[layer_id][0]
elif batch_dims is None and (axis == -1 or axis == (len(tf_layers_dict[layer_id].shape) - 1)) and tf_layers_dict[layer_id].shape[-1] == 1:
tf_layers_dict[layer_id] = tf.squeeze(tf_layers_dict[layer_id], axis=axis)
except:
tf_layers_dict[layer_id] = tf.gather(
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
indices,
axis=axis,
batch_dims=batch_dims
)
if batch_dims is None and axis == 0 and tf_layers_dict[layer_id].shape[0] == 1:
tf_layers_dict[layer_id] = tf_layers_dict[layer_id][0]
elif batch_dims is None and (axis == -1 or axis == (len(tf_layers_dict[layer_id].shape) - 1)) and tf_layers_dict[layer_id].shape[-1] == 1:
tf_layers_dict[layer_id] = tf.squeeze(tf_layers_dict[layer_id], axis=axis)
### GatherND
elif layer.attrib['type'] == 'GatherND':
batch_dims = int(data.attrib['batch_dims'])
params = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
indices_tmp = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]
if indices_tmp.dtype is tf.float32 or indices_tmp.dtype is tf.float64:
indices = tf.cast(indices_tmp, tf.int32)
else:
indices = indices_tmp
def barracuda_gather_nd(params, indices):
idx_shape = indices.shape
params_shape = params.shape
idx_dims = idx_shape[-1]
gather_shape = params_shape[idx_dims:]
params_flat = tf.reshape(params, tf.concat([[-1], gather_shape], axis=0))
axis_step = tf.math.cumprod(params_shape[:idx_dims], exclusive=True, reverse=True)
mul = tf.math.multiply(indices, axis_step)
indices_flat = tf.reduce_sum(mul, axis=-1)
result_flat = tf.gather(params_flat, indices_flat)
return tf.reshape(result_flat, tf.concat([idx_shape[:-1], gather_shape], axis=0))
if optimizing_barracuda and batch_dims > 0:
print(f'{Color.RED}ERROR:{Color.RESET} When optimize_barracuda = True, batch_dims > 0 is not supported. layer_id: {layer_id}')
sys.exit(-1)
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
if not optimizing_barracuda:
tf_layers_dict[layer_id] = tf.gather_nd(inp, indices, batch_dims=batch_dims)
else:
tf_layers_dict[layer_id] = barracuda_gather_nd(inp, indices)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = None
if not optimizing_barracuda:
inp = tf.gather_nd(params, indices, batch_dims=batch_dims)
else:
inp = barracuda_gather_nd(params, indices)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
if not optimizing_barracuda:
tf_layers_dict[layer_id] = tf.gather_nd(params, indices, batch_dims=batch_dims)
else:
tf_layers_dict[layer_id] = barracuda_gather_nd(params, indices)
### ReduceMean, ReduceMax, ReduceMin, ReduceSum, ReduceProd, ReduceL2 - TODO
elif layer.attrib['type'] == 'ReduceMean' or layer.attrib['type'] == 'ReduceMax' or layer.attrib['type'] == 'ReduceMin' or \
layer.attrib['type'] == 'ReduceSum' or layer.attrib['type'] == 'ReduceProd' or \
layer.attrib['type'] == 'ReduceL1' or layer.attrib['type'] == 'ReduceL2':
keep_dims = True if (data.attrib['keep_dims'] == "True" or data.attrib['keep_dims'] == "true") else False
axis = None
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]) == np.ndarray and len(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]) == 1:
axis = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].astype(np.int32)
if axis == 1:
axis = -1
elif axis >= 2:
axis -= 1
elif type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]) != np.ndarray and len(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].shape) == 1:
try:
if (tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].numpy() == [1, 2]).all():
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]).dtype != tf.int32:
axis = tf.cast(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)], tf.int32)
else:
axis = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]
else:
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]).dtype != tf.int32:
axis = tf.cast(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)] - 1, tf.int32)
else:
axis = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)] - 1
except:
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]).dtype != tf.int32:
axis = tf.cast(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)] - 1, tf.int32)
else:
axis = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)] - 1
else:
for idx, part_axis in enumerate(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]):
if part_axis == 1:
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)][idx] = -1
elif part_axis >= 2:
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)][idx] -= 1
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]).dtype != tf.int32:
axis = tf.cast(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)], tf.int32)
else:
axis = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]
inp = None
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
else:
inp = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
if layer.attrib['type'] == 'ReduceMean':
inp = tf.math.reduce_mean(
inp,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceMax':
inp = tf.math.reduce_max(
inp,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceMin':
inp = tf.math.reduce_min(
inp,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceSum':
inp = tf.math.reduce_sum(
inp,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceProd':
inp = tf.math.reduce_prod(
inp,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceL1':
reduceL1_abs = tf.math.abs(
inp
)
inp = tf.math.reduce_sum(
reduceL1_abs,
axis=axis,
keepdims=keep_dims
)
elif layer.attrib['type'] == 'ReduceL2':
reduceL2_square = tf.math.square(
inp
)
reduceL2_sum = tf.math.reduce_sum(
reduceL2_square,
axis=axis,
keepdims=keep_dims
)
inp = tf.math.sqrt(reduceL2_sum)
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
tf_layers_dict[layer_id] = inp
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = inp
### MatMul
elif layer.attrib['type'] == 'MatMul':
if not data is None and 'a' in data.attrib:
transpose_a = True if int(data.attrib['a']) == 1 else False
if not data is None and 'b' in data.attrib:
transpose_b = True if int(data.attrib['b']) == 1 else False
if not data is None and 'transpose_a' in data.attrib:
try:
transpose_a = True if int(data.attrib['transpose_a']) == 1 else False
except:
transpose_a = True if (data.attrib['transpose_a'] == 'True' or data.attrib['transpose_a'] == 'true') else False
if not data is None and 'transpose_b' in data.attrib:
try:
transpose_b = True if int(data.attrib['transpose_b']) == 1 else False
except:
transpose_b = True if (data.attrib['transpose_b'] == 'True'or data.attrib['transpose_b'] == 'true') else False
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.linalg.matmul(
inp,
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
transpose_a,
transpose_b
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
| |
<filename>txmsgpackrpc/protocol.py
# http://github.com/donalm/txMsgpack
# Copyright (c) 2013 <NAME>
# https://github.com/jakm/txmsgpackrpc
# Copyright (c) 2015 <NAME>
from __future__ import print_function
import logging
import msgpack
import sys
from collections import defaultdict, deque, namedtuple
from twisted.internet import defer, protocol
from twisted.protocols import policies
from twisted.python import failure, log
from txmsgpackrpc.error import (ConnectionError, ResponseError, InvalidRequest,
InvalidResponse, InvalidData, TimeoutError,
SerializationError)
MSGTYPE_REQUEST=0
MSGTYPE_RESPONSE=1
MSGTYPE_NOTIFICATION=2
Context = namedtuple('Context', ['peer'])
class MsgpackBaseProtocol(object):
"""
msgpack rpc client/server protocol - base implementation
"""
def __init__(self, sendErrors=False, packerEncoding="utf-8", unpackerEncoding="utf-8", useList=True):
"""
@param sendErrors: forward any uncaught Exception details to remote peer.
@type sendErrors: C{bool}.
@param packerEncoding: encoding used to encode Python str and unicode. Default is 'utf-8'.
@type packerEncoding: C{str}
@param unpackerEncoding: encoding used for decoding msgpack bytes. Default is 'utf-8'.
@type unpackerEncoding: C{str}.
@param useList: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple.
@type useList: C{bool}.
"""
self._sendErrors = sendErrors
self._incoming_requests = {}
self._outgoing_requests = {}
self._next_msgid = 0
self._packer = msgpack.Packer(encoding=packerEncoding)
self._unpacker = msgpack.Unpacker(encoding=unpackerEncoding, unicode_errors='strict', use_list=useList)
def isConnected(self):
raise NotImplementedError('Must be implemented in descendant')
def writeRawData(self, message, context):
raise NotImplementedError('Must be implemented in descendant')
def getRemoteMethod(self, protocol, methodName):
raise NotImplementedError('Must be implemented in descendant')
def getClientContext(self):
raise NotImplementedError('Must be implemented in descendant')
def createRequest(self, method, params):
"""
Create new RPC request. If protocol is not connected, errback with
C{ConnectionError} will be called.
Possible exceptions:
* C{error.ConnectionError}: all connection attempts failed
* C{error.ResponseError}: remote method returned error value
* C{error.TimeoutError}: waitTimeout expired during request processing
* C{t.i.e.ConnectionClosed}: connection closed during request processing
@param method: RPC method name
@type method: C{str}
@param params: RPC method parameters
@type params: C{tuple} or C{list}
@return Returns Deferred that callbacks with result of RPC method or
errbacks with C{error.MsgpackError}.
@rtype C{t.i.d.Deferred}
"""
if not self.isConnected():
raise ConnectionError("Not connected")
msgid = self.getNextMsgid()
message = (MSGTYPE_REQUEST, msgid, method, params)
ctx = self.getClientContext()
self.writeMessage(message, ctx)
df = defer.Deferred()
self._outgoing_requests[msgid] = df
return df
def createNotification(self, method, params):
"""
Create new RPC notification. If protocol is not connected, errback with
C{ConnectionError} will be called.
Possible exceptions:
* C{error.ConnectionError}: all connection attempts failed
* C{t.i.e.ConnectionClosed}: connection closed during request processing
@param method: RPC method name
@type method: C{str}
@param params: RPC method parameters
@type params: C{tuple} or C{list}
@return Returns Deferred that callbacks with result of RPC method or
errbacks with C{error.MsgpackError}.
@rtype C{t.i.d.Deferred}
"""
if not self.isConnected():
raise ConnectionError("Not connected")
if not type(params) in (list, tuple):
params = (params,)
message = (MSGTYPE_NOTIFICATION, method, params)
ctx = self.getClientContext()
self.writeMessage(message, ctx)
def getNextMsgid(self):
self._next_msgid += 1
return self._next_msgid
def rawDataReceived(self, data, context=None):
try:
self._unpacker.feed(data)
for message in self._unpacker:
self.messageReceived(message, context)
except Exception:
log.err()
def messageReceived(self, message, context):
if message[0] == MSGTYPE_REQUEST:
return self.requestReceived(message, context)
if message[0] == MSGTYPE_RESPONSE:
return self.responseReceived(message)
if message[0] == MSGTYPE_NOTIFICATION:
return self.notificationReceived(message)
return self.undefinedMessageReceived(message)
def requestReceived(self, message, context):
try:
(msgType, msgid, methodName, params) = message
except ValueError:
if self._sendErrors:
raise
if not len(message) == 4:
raise InvalidData("Incorrect message length. Expected 4; received %s" % len(message))
raise InvalidData("Failed to unpack request.")
except Exception:
if self._sendErrors:
raise
raise InvalidData("Unexpected error. Failed to unpack request.")
if msgid in self._incoming_requests:
raise InvalidRequest("Request with msgid '%s' already exists" % msgid)
result = defer.maybeDeferred(self.callRemoteMethod, msgid, methodName, params)
self._incoming_requests[msgid] = (result, context)
result.addCallback(self.respondCallback, msgid)
result.addErrback(self.respondErrback, msgid)
result.addBoth(self.endRequest, msgid)
return result
def callRemoteMethod(self, msgid, methodName, params):
try:
method = self.getRemoteMethod(self, methodName)
except Exception:
if self._sendErrors:
raise
raise InvalidRequest("Client attempted to call unimplemented method: remote_%s" % methodName)
send_msgid = False
try:
# If the remote_method has a keyword argment called msgid, then pass
# it the msgid as a keyword argument. 'params' is always a list.
if sys.version_info.major == 2:
method_arguments = method.func_code.co_varnames
elif sys.version_info.major == 3:
method_arguments = method.__code__.co_varnames
else:
raise NotImplementedError('Unsupported Python version %s' % sys.version_info.major)
if 'msgid' in method_arguments:
send_msgid = True
except Exception:
pass
try:
if send_msgid:
result = method(*params, msgid=msgid)
else:
result = method(*params)
except TypeError:
if self._sendErrors:
raise
raise InvalidRequest("Wrong number of arguments for %s" % methodName)
return result
def endRequest(self, result, msgid):
if msgid in self._incoming_requests:
del self._incoming_requests[msgid]
return result
def responseReceived(self, message):
try:
(msgType, msgid, error, result) = message
except Exception as e:
if self._sendErrors:
raise
raise InvalidResponse("Failed to unpack response: %s" % e)
try:
df = self._outgoing_requests.pop(msgid)
except KeyError:
# Response can be delivered after timeout, code below is for debugging
# if self._sendErrors:
# raise
# raise InvalidResponse("Failed to find dispatched request with msgid %s to match incoming repsonse" % msgid)
return
if error is not None:
# The remote host returned an error, so we need to create a Failure
# object to pass into the errback chain. The Failure object in turn
# requires an Exception
ex = ResponseError(error)
df.errback(failure.Failure(exc_value=ex))
else:
df.callback(result)
def respondCallback(self, result, msgid):
try:
_, ctx = self._incoming_requests[msgid]
except KeyError:
ctx = None
error = None
response = (MSGTYPE_RESPONSE, msgid, error, result)
return self.writeMessage(response, ctx)
def respondErrback(self, f, msgid):
result = None
if self._sendErrors:
error = f.getBriefTraceback()
else:
error = f.getErrorMessage()
self.respondError(msgid, error, result)
def respondError(self, msgid, error, result=None):
try:
_, ctx = self._incoming_requests[msgid]
except KeyError:
ctx = None
response = (MSGTYPE_RESPONSE, msgid, error, result)
self.writeMessage(response, ctx)
def writeMessage(self, message, context):
try:
message = self._packer.pack(message)
except Exception:
self._packer.reset()
if self._sendErrors:
raise
raise SerializationError("ERROR: Failed to write message: %s" % message)
self.writeRawData(message, context)
def notificationReceived(self, message):
# Notifications don't expect a return value, so they don't supply a msgid
msgid = None
try:
(msgType, methodName, params) = message
except Exception:
# Log the error - there's no way to return it for a notification
log.err()
try:
result = defer.maybeDeferred(self.callRemoteMethod, msgid, methodName, params)
result.addBoth(self.notificationCallback)
except Exception:
# Log the error - there's no way to return it for a notification
log.err()
return None
def notificationCallback(self, result):
# Log the result if required
pass
def undefinedMessageReceived(self, message):
raise NotImplementedError("Msgpack received a message of type '%s', " \
"and no method has been specified to " \
"handle this." % message[0])
def callbackOutgoingRequests(self, func):
while self._outgoing_requests:
msgid, d = self._outgoing_requests.popitem()
func(d)
class MsgpackStreamProtocol(protocol.Protocol, policies.TimeoutMixin, MsgpackBaseProtocol):
"""
msgpack rpc client/server stream protocol
@ivar factory: The L{MsgpackClientFactory} or L{MsgpackServerFactory} which created this L{Msgpack}.
"""
def __init__(self, factory, sendErrors=False, timeout=None, packerEncoding="utf-8", unpackerEncoding="utf-8", useList=True):
"""
@param factory: factory which created this protocol.
@type factory: C{protocol.Factory}.
@param sendErrors: forward any uncaught Exception details to remote peer.
@type sendErrors: C{bool}.
@param timeout: idle timeout in seconds before connection will be closed.
@type timeout: C{int}
@param packerEncoding: encoding used to encode Python str and unicode. Default is 'utf-8'.
@type packerEncoding: C{str}
@param unpackerEncoding: encoding used for decoding msgpack bytes. Default is 'utf-8'.
@type unpackerEncoding: C{str}.
@param useList: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple.
@type useList: C{bool}.
"""
super(MsgpackStreamProtocol, self).__init__(sendErrors, packerEncoding, unpackerEncoding, useList)
self.factory = factory
self.setTimeout(timeout)
self.connected = 0
def isConnected(self):
return self.connected == 1
def writeRawData(self, message, context):
# transport.write returns None
self.transport.write(message)
def getRemoteMethod(self, protocol, methodName):
return self.factory.getRemoteMethod(self, methodName)
def getClientContext(self):
return None
def dataReceived(self, data):
self.resetTimeout()
self.rawDataReceived(data)
def connectionMade(self):
# log.msg("connectionMade", logLevel=logging.DEBUG)
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, reason=protocol.connectionDone):
# log.msg("connectionLost", logLevel=logging.DEBUG)
self.connected = 0
self.factory.delConnection(self)
self.callbackOutgoingRequests(lambda d: d.errback(reason))
def timeoutConnection(self):
# log.msg("timeoutConnection", logLevel=logging.DEBUG)
self.callbackOutgoingRequests(lambda d: d.errback(TimeoutError("Request timed out")))
policies.TimeoutMixin.timeoutConnection(self)
def closeConnection(self):
self.transport.loseConnection()
class MsgpackDatagramProtocol(protocol.DatagramProtocol, MsgpackBaseProtocol):
"""
msgpack rpc client/server datagram protocol
"""
def __init__(self, address=None, handler=None, sendErrors=False, timeout=None, packerEncoding="utf-8",
unpackerEncoding="utf-8", useList=True):
"""
@param address: tuple(host,port) containing address of client where protocol will connect to.
@type address: C{tuple}.
@param handler: object of RPC server that will process requests and notifications.
@type handler: C{server.MsgpackRPCServer}
@param sendErrors: forward any uncaught Exception details to remote peer.
@type sendErrors: C{bool}.
@param timeout: idle timeout in seconds before connection will be closed.
@type timeout: C{int}
@param packerEncoding: encoding used to encode Python str and unicode. Default is 'utf-8'.
@type packerEncoding: C{str}
@param unpackerEncoding: encoding used for decoding msgpack bytes. Default is 'utf-8'.
@type unpackerEncoding: C{str}.
@param useList: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple.
@type useList: C{bool}.
"""
super(MsgpackDatagramProtocol, self).__init__(sendErrors, packerEncoding, unpackerEncoding, useList)
if address:
if | |
u=300)
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_d, i_q, u_d, u_q, omega * i_d, omega * i_q
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[-mp['psi_p'] * mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[ mp['p'], 0, 0, 0, 0, 0, 0],
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
if mp['l_d'] == mp['l_q']:
return self.torque([0, self._limits['i_sq'], 0])
else:
i_n = self.nominal_values['i']
_p = mp['psi_p'] / (2 * (mp['l_d'] - mp['l_q']))
_q = - i_n ** 2 / 2
i_d_opt = - _p / 2 - np.sqrt( (_p / 2) ** 2 - _q)
i_q_opt = np.sqrt(i_n ** 2 - i_d_opt ** 2)
return self.torque([i_d_opt, i_q_opt, 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
return (
np.array([ # dx'/dx
[-mp['r_s'] / mp['l_d'], mp['l_q']/mp['l_d'] * omega * mp['p'], 0],
[-mp['l_d'] / mp['l_q'] * omega * mp['p'], - mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([ # dx'/dw
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp['l_q'],
mp['p']
]),
np.array([ # dT/dx
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]),
0
])
)
class InductionMotor(ThreePhaseMotor):
"""
The InductionMotor and its subclasses implement the technical system of a three phase induction motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Voltage through branch a
u_sb V Voltage through branch b
u_sc V Voltage through branch c
u_salpha V Voltage in alpha axis
u_sbeta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SALPHA_IDX = 0
I_SBETA_IDX = 1
PSI_RALPHA_IDX = 2
PSI_RBETA_IDX = 3
EPSILON_IDX = 4
CURRENTS_IDX = [0, 1]
FLUX_IDX = [2, 3]
CURRENTS = ['i_salpha', 'i_sbeta']
FLUXES = ['psi_ralpha', 'psi_rbeta']
STATOR_VOLTAGES = ['u_salpha', 'u_sbeta']
IO_VOLTAGES = ['u_sa', 'u_sb', 'u_sc', 'u_salpha', 'u_sbeta', 'u_sd',
'u_sq']
IO_CURRENTS = ['i_sa', 'i_sb', 'i_sc', 'i_salpha', 'i_sbeta', 'i_sd',
'i_sq']
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_model_constants = None
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
_initializer = None
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
# Docstring of superclass
# convert placeholder i and u to actual IO quantities
_nominal_values = self._default_nominal_values.copy()
_nominal_values.update({u: _nominal_values['u'] for u in self.IO_VOLTAGES})
_nominal_values.update({i: _nominal_values['i'] for i in self.IO_CURRENTS})
del _nominal_values['u'], _nominal_values['i']
_nominal_values.update(nominal_values or {})
# same for limits
_limit_values = self._default_limits.copy()
_limit_values.update({u: _limit_values['u'] for u in self.IO_VOLTAGES})
_limit_values.update({i: _limit_values['i'] for i in self.IO_CURRENTS})
del _limit_values['u'], _limit_values['i']
_limit_values.update(limit_values or {})
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer, initial_limits)
self._update_model()
self._update_limits(_limit_values, _nominal_values)
def reset(self,
state_space,
state_positions,
omega=None):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self._update_initial_limits(omega=omega)
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + len(self.FLUXES) + 1)
def electrical_ode(self, state, u_sr_alphabeta, omega, *args):
"""
The differential equation of the Induction Motor.
Args:
state: The momentary state of the motor. [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon]
omega: The mechanical load
u_sr_alphabeta: The input voltages [u_salpha, u_sbeta, u_ralpha, u_rbeta]
Returns:
The derivatives of the state vector d/dt( [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon])
"""
return np.matmul(self._model_constants, np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
omega,
state[self.I_SALPHA_IDX],
state[self.I_SBETA_IDX],
state[self.PSI_RALPHA_IDX],
state[self.PSI_RBETA_IDX],
omega * state[self.PSI_RALPHA_IDX],
omega * state[self.PSI_RBETA_IDX],
u_sr_alphabeta[0, 0],
u_sr_alphabeta[0, 1],
u_sr_alphabeta[1, 0],
u_sr_alphabeta[1, 1],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m'] ** 2/(mp['l_m']+mp['l_sigr']) * self._limits['i_sd'] * self._limits['i_sq'] / 2
def torque(self, states):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m']/(mp['l_m'] + mp['l_sigr']) * (states[self.PSI_RALPHA_IDX] * states[self.I_SBETA_IDX] - states[self.PSI_RBETA_IDX] * states[self.I_SALPHA_IDX])
def _flux_limit(self, omega=0, eps_mag=0, u_q_max=0.0, u_rq_max=0.0):
"""
Calculate Flux limits for given current and magnetic-field angle
Args:
omega(float): speed given by mechanical load
eps_mag(float): magnetic field angle
u_q_max(float): maximal strator voltage in q-system
u_rq_max(float): maximal rotor voltage in q-system
returns:
maximal flux values(list) in alpha-beta-system
"""
mp = self.motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
l_mr = mp['l_m'] / l_r
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
# limiting flux for a low omega
if omega == 0:
psi_d_max = mp['l_m'] * self._nominal_values['i_sd']
else:
i_d, i_q = self.q_inv([self._initial_states['i_salpha'],
self._initial_states['i_sbeta']],
eps_mag)
psi_d_max = mp['p'] * omega * sigma * l_s * i_d + \
(mp['r_s'] + mp['r_r'] * l_mr**2) * i_q + \
u_q_max + \
l_mr * u_rq_max
psi_d_max /= - mp['p'] * omega * l_mr
# clipping flux and setting nominal limit
psi_d_max = 0.9 * np.clip(psi_d_max, a_min=0, a_max=np.abs(mp['l_m'] * i_d))
# returning flux in alpha, beta system
return self.q([psi_d_max, 0], eps_mag)
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
l_s = mp['l_m']+mp['l_sigs']
l_r = mp['l_m']+mp['l_sigr']
sigma = (l_s*l_r-mp['l_m']**2) /(l_s*l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
self._model_constants = np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, | |
isinstance(read_only_, bool):
raise Exception("Expected read_only_ to be a bool, received: {}".format(type(read_only_)))
self.bus_address = bus_address_
self.device_link = device_link_
self.device_name = device_name_
self.plan_info = plan_info_
self.read_only = read_only_
self.unknown_fields = unknown_fields
class VolumeAttachmentParams(Type):
_toSchema = {'instance_id': 'instance-id', 'machine_tag': 'machine-tag', 'provider': 'provider', 'read_only': 'read-only', 'volume_id': 'volume-id', 'volume_tag': 'volume-tag'}
_toPy = {'instance-id': 'instance_id', 'machine-tag': 'machine_tag', 'provider': 'provider', 'read-only': 'read_only', 'volume-id': 'volume_id', 'volume-tag': 'volume_tag'}
def __init__(self, instance_id=None, machine_tag=None, provider=None, read_only=None, volume_id=None, volume_tag=None, **unknown_fields):
'''
instance_id : str
machine_tag : str
provider : str
read_only : bool
volume_id : str
volume_tag : str
'''
instance_id_ = instance_id
machine_tag_ = machine_tag
provider_ = provider
read_only_ = read_only
volume_id_ = volume_id
volume_tag_ = volume_tag
# Validate arguments against known Juju API types.
if instance_id_ is not None and not isinstance(instance_id_, (bytes, str)):
raise Exception("Expected instance_id_ to be a str, received: {}".format(type(instance_id_)))
if machine_tag_ is not None and not isinstance(machine_tag_, (bytes, str)):
raise Exception("Expected machine_tag_ to be a str, received: {}".format(type(machine_tag_)))
if provider_ is not None and not isinstance(provider_, (bytes, str)):
raise Exception("Expected provider_ to be a str, received: {}".format(type(provider_)))
if read_only_ is not None and not isinstance(read_only_, bool):
raise Exception("Expected read_only_ to be a bool, received: {}".format(type(read_only_)))
if volume_id_ is not None and not isinstance(volume_id_, (bytes, str)):
raise Exception("Expected volume_id_ to be a str, received: {}".format(type(volume_id_)))
if volume_tag_ is not None and not isinstance(volume_tag_, (bytes, str)):
raise Exception("Expected volume_tag_ to be a str, received: {}".format(type(volume_tag_)))
self.instance_id = instance_id_
self.machine_tag = machine_tag_
self.provider = provider_
self.read_only = read_only_
self.volume_id = volume_id_
self.volume_tag = volume_tag_
self.unknown_fields = unknown_fields
class VolumeAttachmentParamsResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : VolumeAttachmentParams
'''
error_ = Error.from_json(error) if error else None
result_ = VolumeAttachmentParams.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, VolumeAttachmentParams)):
raise Exception("Expected result_ to be a VolumeAttachmentParams, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class VolumeAttachmentParamsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~VolumeAttachmentParamsResult]
'''
results_ = [VolumeAttachmentParamsResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class VolumeAttachmentPlan(Type):
_toSchema = {'block_device': 'block-device', 'life': 'life', 'machine_tag': 'machine-tag', 'plan_info': 'plan-info', 'volume_tag': 'volume-tag'}
_toPy = {'block-device': 'block_device', 'life': 'life', 'machine-tag': 'machine_tag', 'plan-info': 'plan_info', 'volume-tag': 'volume_tag'}
def __init__(self, block_device=None, life=None, machine_tag=None, plan_info=None, volume_tag=None, **unknown_fields):
'''
block_device : BlockDevice
life : str
machine_tag : str
plan_info : VolumeAttachmentPlanInfo
volume_tag : str
'''
block_device_ = BlockDevice.from_json(block_device) if block_device else None
life_ = life
machine_tag_ = machine_tag
plan_info_ = VolumeAttachmentPlanInfo.from_json(plan_info) if plan_info else None
volume_tag_ = volume_tag
# Validate arguments against known Juju API types.
if block_device_ is not None and not isinstance(block_device_, (dict, BlockDevice)):
raise Exception("Expected block_device_ to be a BlockDevice, received: {}".format(type(block_device_)))
if life_ is not None and not isinstance(life_, (bytes, str)):
raise Exception("Expected life_ to be a str, received: {}".format(type(life_)))
if machine_tag_ is not None and not isinstance(machine_tag_, (bytes, str)):
raise Exception("Expected machine_tag_ to be a str, received: {}".format(type(machine_tag_)))
if plan_info_ is not None and not isinstance(plan_info_, (dict, VolumeAttachmentPlanInfo)):
raise Exception("Expected plan_info_ to be a VolumeAttachmentPlanInfo, received: {}".format(type(plan_info_)))
if volume_tag_ is not None and not isinstance(volume_tag_, (bytes, str)):
raise Exception("Expected volume_tag_ to be a str, received: {}".format(type(volume_tag_)))
self.block_device = block_device_
self.life = life_
self.machine_tag = machine_tag_
self.plan_info = plan_info_
self.volume_tag = volume_tag_
self.unknown_fields = unknown_fields
class VolumeAttachmentPlanInfo(Type):
_toSchema = {'device_attributes': 'device-attributes', 'device_type': 'device-type'}
_toPy = {'device-attributes': 'device_attributes', 'device-type': 'device_type'}
def __init__(self, device_attributes=None, device_type=None, **unknown_fields):
'''
device_attributes : typing.Mapping[str, str]
device_type : str
'''
device_attributes_ = device_attributes
device_type_ = device_type
# Validate arguments against known Juju API types.
if device_attributes_ is not None and not isinstance(device_attributes_, dict):
raise Exception("Expected device_attributes_ to be a Mapping, received: {}".format(type(device_attributes_)))
if device_type_ is not None and not isinstance(device_type_, (bytes, str)):
raise Exception("Expected device_type_ to be a str, received: {}".format(type(device_type_)))
self.device_attributes = device_attributes_
self.device_type = device_type_
self.unknown_fields = unknown_fields
class VolumeAttachmentPlanResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : VolumeAttachmentPlan
'''
error_ = Error.from_json(error) if error else None
result_ = VolumeAttachmentPlan.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, VolumeAttachmentPlan)):
raise Exception("Expected result_ to be a VolumeAttachmentPlan, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class VolumeAttachmentPlanResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~VolumeAttachmentPlanResult]
'''
results_ = [VolumeAttachmentPlanResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class VolumeAttachmentPlans(Type):
_toSchema = {'volume_plans': 'volume-plans'}
_toPy = {'volume-plans': 'volume_plans'}
def __init__(self, volume_plans=None, **unknown_fields):
'''
volume_plans : typing.Sequence[~VolumeAttachmentPlan]
'''
volume_plans_ = [VolumeAttachmentPlan.from_json(o) for o in volume_plans or []]
# Validate arguments against known Juju API types.
if volume_plans_ is not None and not isinstance(volume_plans_, (bytes, str, list)):
raise Exception("Expected volume_plans_ to be a Sequence, received: {}".format(type(volume_plans_)))
self.volume_plans = volume_plans_
self.unknown_fields = unknown_fields
class VolumeAttachmentResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : VolumeAttachment
'''
error_ = Error.from_json(error) if error else None
result_ = VolumeAttachment.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, VolumeAttachment)):
raise Exception("Expected result_ to be a VolumeAttachment, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class VolumeAttachmentResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~VolumeAttachmentResult]
'''
results_ = [VolumeAttachmentResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class VolumeAttachments(Type):
_toSchema = {'volume_attachments': 'volume-attachments'}
_toPy = {'volume-attachments': 'volume_attachments'}
def __init__(self, volume_attachments=None, **unknown_fields):
'''
volume_attachments : typing.Sequence[~VolumeAttachment]
'''
volume_attachments_ = [VolumeAttachment.from_json(o) for o in volume_attachments or []]
# Validate arguments against known Juju API types.
if volume_attachments_ is not None and not isinstance(volume_attachments_, (bytes, str, list)):
raise Exception("Expected volume_attachments_ to be a Sequence, received: {}".format(type(volume_attachments_)))
self.volume_attachments = volume_attachments_
self.unknown_fields = unknown_fields
class VolumeDetails(Type):
_toSchema = {'info': 'info', 'life': 'life', 'machine_attachments': 'machine-attachments', 'status': 'status', 'storage': 'storage', 'unit_attachments': 'unit-attachments', 'volume_tag': 'volume-tag'}
_toPy = {'info': 'info', 'life': 'life', 'machine-attachments': 'machine_attachments', 'status': 'status', 'storage': 'storage', 'unit-attachments': 'unit_attachments', 'volume-tag': 'volume_tag'}
def __init__(self, info=None, life=None, machine_attachments=None, status=None, storage=None, unit_attachments=None, volume_tag=None, **unknown_fields):
'''
info : VolumeInfo
life : str
machine_attachments : typing.Mapping[str, ~VolumeAttachmentDetails]
status : EntityStatus
storage : StorageDetails
unit_attachments : typing.Mapping[str, ~VolumeAttachmentDetails]
volume_tag : str
'''
info_ = VolumeInfo.from_json(info) if info else None
life_ = life
machine_attachments_ = machine_attachments
status_ = EntityStatus.from_json(status) if status else None
storage_ = StorageDetails.from_json(storage) if storage else None
unit_attachments_ = unit_attachments
volume_tag_ = volume_tag
# Validate arguments against known Juju API types.
if info_ is not None and not isinstance(info_, (dict, VolumeInfo)):
raise Exception("Expected info_ to be a VolumeInfo, received: {}".format(type(info_)))
if life_ is not None and not isinstance(life_, (bytes, str)):
raise Exception("Expected life_ to be a str, received: {}".format(type(life_)))
if machine_attachments_ is not None and not isinstance(machine_attachments_, dict):
raise Exception("Expected machine_attachments_ to be a | |
<filename>src/sims4communitylib/enums/tags_enum.py<gh_stars>0
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonGameTag(CommonInt):
"""Identifiers for vanilla game tags (These have been gathered dynamically from the :class:`.Tag` enum).
"""
INVALID: 'CommonGameTag' = 0
AGE_APPROPRIATE_ADULT: 'CommonGameTag' = 84
AGE_APPROPRIATE_CHILD: 'CommonGameTag' = 85
AGE_APPROPRIATE_ELDER: 'CommonGameTag' = 72
AGE_APPROPRIATE_TEEN: 'CommonGameTag' = 291
AGE_APPROPRIATE_TODDLER: 'CommonGameTag' = 1657
AGE_APPROPRIATE_YOUNG_ADULT: 'CommonGameTag' = 71
APPEARANCE_MODIFIER_HAIR_MAKEUP_CHAIR_MAKEUP: 'CommonGameTag' = 61609
APPEARANCE_MODIFIER_HAIR_MAKEUP_CHAIR_HAIR_STYLE: 'CommonGameTag' = 61494
APPROPRIATENESS_BARTENDING: 'CommonGameTag' = 406
APPROPRIATENESS_BATHING: 'CommonGameTag' = 402
APPROPRIATENESS_CAKE: 'CommonGameTag' = 605
APPROPRIATENESS_CALL_TO_MEAL: 'CommonGameTag' = 1170
APPROPRIATENESS_CLEANING: 'CommonGameTag' = 404
APPROPRIATENESS_COMPUTER: 'CommonGameTag' = 1373
APPROPRIATENESS_COOKING: 'CommonGameTag' = 405
APPROPRIATENESS_DANCING: 'CommonGameTag' = 603
APPROPRIATENESS_EATING: 'CommonGameTag' = 604
APPROPRIATENESS_FRONT_DESK: 'CommonGameTag' = 12413
APPROPRIATENESS_GRAB_SNACK: 'CommonGameTag' = 939
APPROPRIATENESS_GUEST: 'CommonGameTag' = 367
APPROPRIATENESS_HIRED_WORKER: 'CommonGameTag' = 368
APPROPRIATENESS_HOST: 'CommonGameTag' = 370
APPROPRIATENESS_NOT_DURING_WORK: 'CommonGameTag' = 1274
APPROPRIATENESS_NOT_DURING_WORK_LUNCH: 'CommonGameTag' = 1275
APPROPRIATENESS_PHONE: 'CommonGameTag' = 1594
APPROPRIATENESS_PHONE_GAME: 'CommonGameTag' = 1626
APPROPRIATENESS_PLAY_INSTRUMENT: 'CommonGameTag' = 2156
APPROPRIATENESS_PLAYING: 'CommonGameTag' = 1539
APPROPRIATENESS_READ_BOOKS: 'CommonGameTag' = 1276
APPROPRIATENESS_SERVICE_NPC: 'CommonGameTag' = 369
APPROPRIATENESS_SHOWER: 'CommonGameTag' = 352
APPROPRIATENESS_SINGING: 'CommonGameTag' = 55385
APPROPRIATENESS_SLEEPING: 'CommonGameTag' = 403
APPROPRIATENESS_SNOW_SHOVELING: 'CommonGameTag' = 69706
APPROPRIATENESS_SOCIAL_PICKER: 'CommonGameTag' = 1645
APPROPRIATENESS_STEREO: 'CommonGameTag' = 530
APPROPRIATENESS_TIP: 'CommonGameTag' = 2155
APPROPRIATENESS_TOUCHING: 'CommonGameTag' = 1526
APPROPRIATENESS_TRASH: 'CommonGameTag' = 12423
APPROPRIATENESS_TV_WATCHING: 'CommonGameTag' = 1273
APPROPRIATENESS_VIEW: 'CommonGameTag' = 12428
APPROPRIATENESS_VISITOR: 'CommonGameTag' = 1497
APPROPRIATENESS_WORK_SCIENTIST: 'CommonGameTag' = 12297
APPROPRIATENESS_WORKOUT: 'CommonGameTag' = 1277
ARCHETYPE_AFRICAN: 'CommonGameTag' = 73
ARCHETYPE_ASIAN: 'CommonGameTag' = 75
ARCHETYPE_CAUCASIAN: 'CommonGameTag' = 76
ARCHETYPE_ISLAND: 'CommonGameTag' = 2206
ARCHETYPE_LATIN: 'CommonGameTag' = 312
ARCHETYPE_MIDDLE_EASTERN: 'CommonGameTag' = 74
ARCHETYPE_NORTH_AMERICAN: 'CommonGameTag' = 89
ARCHETYPE_SOUTH_ASIAN: 'CommonGameTag' = 88
AT_PO_BEACH: 'CommonGameTag' = 2194
AT_PO_BEACH_WALKBY: 'CommonGameTag' = 2204
AT_PO_BLOSSOM_GURU: 'CommonGameTag' = 55386
AT_PO_BUSKER: 'CommonGameTag' = 1571
AT_PO_DYNAMIC_SPAWN_POINT: 'CommonGameTag' = 1915
AT_PO_FIREWORKS: 'CommonGameTag' = 55399
AT_PO_FLEA_MARKET_VENDOR: 'CommonGameTag' = 55334
AT_PO_GO_FOR_WALK: 'CommonGameTag' = 1916
AT_PO_GO_FOR_WALK_LONG: 'CommonGameTag' = 57394
AT_PO_GO_FOR_WALK_LONG_02: 'CommonGameTag' = 57432
AT_PO_GO_FOR_WALK_LONG_03: 'CommonGameTag' = 57433
AT_PO_GO_FOR_WALK_MED_02: 'CommonGameTag' = 57436
AT_PO_GO_FOR_WALK_MED_03: 'CommonGameTag' = 57437
AT_PO_GO_FOR_WALK_MEDIUM: 'CommonGameTag' = 57393
AT_PO_GO_FOR_WALK_SHORT: 'CommonGameTag' = 57389
AT_PO_GO_FOR_WALK_SHORT_02: 'CommonGameTag' = 57434
AT_PO_GO_FOR_WALK_SHORT_03: 'CommonGameTag' = 57435
AT_PO_GUITAR: 'CommonGameTag' = 2158
AT_PO_MAGIC_DUELING: 'CommonGameTag' = 2222
AT_PO_PROTESTER: 'CommonGameTag' = 1582
AT_PO_TOURIST: 'CommonGameTag' = 1570
AT_PO_UNIVERSITY_QUAD: 'CommonGameTag' = 2230
BOTTOM_BIKINI: 'CommonGameTag' = 1235
BOTTOM_CROPPED: 'CommonGameTag' = 945
BOTTOM_JEANS: 'CommonGameTag' = 382
BOTTOM_LEGGINGS: 'CommonGameTag' = 381
BOTTOM_PANTS: 'CommonGameTag' = 152
BOTTOM_SHORTS: 'CommonGameTag' = 154
BOTTOM_SKIRT: 'CommonGameTag' = 153
BOTTOM_SWIMSHORT: 'CommonGameTag' = 1238
BOTTOM_SWIMWEAR: 'CommonGameTag' = 1544
BOTTOM_UNDERWEAR: 'CommonGameTag' = 1543
BOTTOM_UNDERWEAR_FEMALE: 'CommonGameTag' = 946
BOTTOM_UNDERWEAR_MALE: 'CommonGameTag' = 1040
BREED_CAT_ABYSSINIAN: 'CommonGameTag' = 1830
BREED_CAT_AMERICAN_BOBTAIL: 'CommonGameTag' = 1831
BREED_CAT_AMERICAN_LONGHAIR: 'CommonGameTag' = 1931
BREED_CAT_AMERICAN_SHORTHAIR: 'CommonGameTag' = 1833
BREED_CAT_AMERICAN_WIREHAIR: 'CommonGameTag' = 1834
BREED_CAT_BALINESE: 'CommonGameTag' = 1835
BREED_CAT_BENGAL: 'CommonGameTag' = 1836
BREED_CAT_BIRMAN: 'CommonGameTag' = 1837
BREED_CAT_BLACK_CAT: 'CommonGameTag' = 1838
BREED_CAT_BOMBAY: 'CommonGameTag' = 1839
BREED_CAT_BRITISH_LONGHAIR: 'CommonGameTag' = 1840
BREED_CAT_BRITISH_SHORTHAIR: 'CommonGameTag' = 1841
BREED_CAT_BURMESE: 'CommonGameTag' = 1842
BREED_CAT_CALICO: 'CommonGameTag' = 1843
BREED_CAT_CHARTREUX: 'CommonGameTag' = 1844
BREED_CAT_COLORPOINT_SHORTHAIR: 'CommonGameTag' = 1845
BREED_CAT_CORNISH_REX: 'CommonGameTag' = 1832
BREED_CAT_DEVON_REX: 'CommonGameTag' = 1846
BREED_CAT_EGYPTIAN_MAU: 'CommonGameTag' = 1847
BREED_CAT_GERMAN_REX: 'CommonGameTag' = 1848
BREED_CAT_HAVANA_BROWN: 'CommonGameTag' = 1849
BREED_CAT_HIMALAYAN: 'CommonGameTag' = 1850
BREED_CAT_JAPANESE_BOBTAIL: 'CommonGameTag' = 1851
BREED_CAT_JAVANESE: 'CommonGameTag' = 1852
BREED_CAT_KORAT: 'CommonGameTag' = 1853
BREED_CAT_KURILIAN_BOBTAIL: 'CommonGameTag' = 1854
BREED_CAT_LA_PERM: 'CommonGameTag' = 1855
BREED_CAT_LYKOI: 'CommonGameTag' = 1975
BREED_CAT_MAINE_COON: 'CommonGameTag' = 1856
BREED_CAT_MANX: 'CommonGameTag' = 1857
BREED_CAT_MIXED: 'CommonGameTag' = 1926
BREED_CAT_NORWEGIAN_FOREST: 'CommonGameTag' = 1858
BREED_CAT_OCICAT: 'CommonGameTag' = 1859
BREED_CAT_ORIENTAL: 'CommonGameTag' = 1860
BREED_CAT_ORIENTAL_SHORTHAIR: 'CommonGameTag' = 1861
BREED_CAT_PERSIAN: 'CommonGameTag' = 1862
BREED_CAT_RACCOON: 'CommonGameTag' = 1974
BREED_CAT_RAGDOLL: 'CommonGameTag' = 1863
BREED_CAT_RUSSIAN_BLUE: 'CommonGameTag' = 1864
BREED_CAT_SAVANNAH: 'CommonGameTag' = 1865
BREED_CAT_SCOTTISH_FOLD: 'CommonGameTag' = 1866
BREED_CAT_SHORTHAIR_TABBY: 'CommonGameTag' = 1867
BREED_CAT_SIAMESE: 'CommonGameTag' = 1868
BREED_CAT_SIBERIAN: 'CommonGameTag' = 1869
BREED_CAT_SINGAPURA: 'CommonGameTag' = 1870
BREED_CAT_SOMALI: 'CommonGameTag' = 1871
BREED_CAT_SPHYNX: 'CommonGameTag' = 1886
BREED_CAT_TONKINESE: 'CommonGameTag' = 1872
BREED_CAT_TURKISH_ANGORA: 'CommonGameTag' = 1873
BREED_CAT_TUXEDO_CAT: 'CommonGameTag' = 1874
BREED_GROUP_HERDING: 'CommonGameTag' = 1893
BREED_GROUP_HOUND: 'CommonGameTag' = 1894
BREED_GROUP_NON_SPORTING: 'CommonGameTag' = 1911
BREED_GROUP_SPORTING: 'CommonGameTag' = 1895
BREED_GROUP_TERRIER: 'CommonGameTag' = 1896
BREED_GROUP_TOY: 'CommonGameTag' = 1897
BREED_GROUP_WORKING: 'CommonGameTag' = 1898
BREED_LARGE_DOG_AFGHAN_HOUND: 'CommonGameTag' = 1814
BREED_LARGE_DOG_AIREDALE_TERRIER: 'CommonGameTag' = 1745
BREED_LARGE_DOG_AKITA: 'CommonGameTag' = 1746
BREED_LARGE_DOG_ALASKAN_MALAMUTE: 'CommonGameTag' = 1747
BREED_LARGE_DOG_AMERICAN_ESKIMO: 'CommonGameTag' = 1748
BREED_LARGE_DOG_AMERICAN_FOXHOUND: 'CommonGameTag' = 1797
BREED_LARGE_DOG_AUSTRALIAN_CATTLE_DOG: 'CommonGameTag' = 1750
BREED_LARGE_DOG_AUSTRALIAN_SHEPHERD: 'CommonGameTag' = 1735
BREED_LARGE_DOG_BEDLINGTON_TERRIER: 'CommonGameTag' = 1950
BREED_LARGE_DOG_BERNESE_MOUNTAIN_DOG: 'CommonGameTag' = 1751
BREED_LARGE_DOG_BLACK_AND_TAN_COONHOUND: 'CommonGameTag' = 1798
BREED_LARGE_DOG_BLACK_RUSSIAN_TERRIER: 'CommonGameTag' = 1961
BREED_LARGE_DOG_BLOODHOUND: 'CommonGameTag' = 1753
BREED_LARGE_DOG_BLUETICK_COONHOUND: 'CommonGameTag' = 1796
BREED_LARGE_DOG_BORDER_COLLIE: 'CommonGameTag' = 1736
BREED_LARGE_DOG_BORZOI: 'CommonGameTag' = 1826
BREED_LARGE_DOG_BOXER: 'CommonGameTag' = 1755
BREED_LARGE_DOG_BRITTANY: 'CommonGameTag' = 1816
BREED_LARGE_DOG_BULLMASTIFF: 'CommonGameTag' = 1951
BREED_LARGE_DOG_CANAAN: 'CommonGameTag' = 1952
BREED_LARGE_DOG_CHESAPEAKE_BAY_RETRIEVER: 'CommonGameTag' = 1795
BREED_LARGE_DOG_CHOW_CHOW: 'CommonGameTag' = 1759
BREED_LARGE_DOG_CHOW_LAB_MIX: 'CommonGameTag' = 1953
BREED_LARGE_DOG_COLLIE: 'CommonGameTag' = 1740
BREED_LARGE_DOG_CURLY_COATED_RETRIEVER: 'CommonGameTag' = 1794
BREED_LARGE_DOG_DALMATIAN: 'CommonGameTag' = 1741
BREED_LARGE_DOG_DINGO: 'CommonGameTag' = 1954
BREED_LARGE_DOG_DOBERMAN: 'CommonGameTag' = 1742
BREED_LARGE_DOG_DOBERMAN_PINSCHER: 'CommonGameTag' = 1761
BREED_LARGE_DOG_ENGLISH_FOXHOUND: 'CommonGameTag' = 1821
BREED_LARGE_DOG_ENGLISH_SETTER: 'CommonGameTag' = 1819
BREED_LARGE_DOG_ENGLISH_SPRINGER_SPANIEL: 'CommonGameTag' = 1762
BREED_LARGE_DOG_FIELD_SPANIEL: 'CommonGameTag' = 1801
BREED_LARGE_DOG_GERMAN_POINTER: 'CommonGameTag' = 1737
BREED_LARGE_DOG_GERMAN_SHEPHERD: 'CommonGameTag' = 1743
BREED_LARGE_DOG_GIANT_SCHNAUZER: 'CommonGameTag' = 1792
BREED_LARGE_DOG_GOLDEN_DOODLE: 'CommonGameTag' = 1800
BREED_LARGE_DOG_GOLDEN_RETRIEVER: 'CommonGameTag' = 1731
BREED_LARGE_DOG_GREAT_DANE: 'CommonGameTag' = 1734
BREED_LARGE_DOG_GREAT_PYRANEES: 'CommonGameTag' = 1955
BREED_LARGE_DOG_GREYHOUND: 'CommonGameTag' = 1764
BREED_LARGE_DOG_HUSKY: 'CommonGameTag' = 1744
BREED_LARGE_DOG_IBIZAN: 'CommonGameTag' = 1738
BREED_LARGE_DOG_IRISH_RED_AND_WHITE_SETTER: 'CommonGameTag' = 1802
BREED_LARGE_DOG_IRISH_SETTER: 'CommonGameTag' = 1803
BREED_LARGE_DOG_IRISH_TERRIER: 'CommonGameTag' = 1828
BREED_LARGE_DOG_IRISH_WOLFHOUND: 'CommonGameTag' = 1827
BREED_LARGE_DOG_KEESHOND: 'CommonGameTag' = 1767
BREED_LARGE_DOG_KERRY_BLUE_TERRIER: 'CommonGameTag' = 1956
BREED_LARGE_DOG_LABRADOODLE: 'CommonGameTag' = 1957
BREED_LARGE_DOG_LABRADOR_RETRIEVER: 'CommonGameTag' = 1768
BREED_LARGE_DOG_MASTIFF: 'CommonGameTag' = 1804
BREED_LARGE_DOG_MIXED: 'CommonGameTag' = 1928
BREED_LARGE_DOG_NEWFOUNDLAND: 'CommonGameTag' = 1769
BREED_LARGE_DOG_NORWEGIAN_ELK_SHEPHERD: 'CommonGameTag' = 1958
BREED_LARGE_DOG_OLD_ENGLISH_SHEEPDOG: 'CommonGameTag' = 1771
BREED_LARGE_DOG_OTTERHOUND: 'CommonGameTag' = 1772
BREED_LARGE_DOG_PHARAOH_HOUND: 'CommonGameTag' = 1774
BREED_LARGE_DOG_PIT_BULL: 'CommonGameTag' = 1749
BREED_LARGE_DOG_POINTER: 'CommonGameTag' = 1775
BREED_LARGE_DOG_POLISH_LOWLAND_SHEEPDOG: 'CommonGameTag' = 1807
BREED_LARGE_DOG_POODLE: 'CommonGameTag' = 1777
BREED_LARGE_DOG_PORTUGUESE_WATER_DOG: 'CommonGameTag' = 1791
BREED_LARGE_DOG_REDBONE_COONHOUND: 'CommonGameTag' = 1810
BREED_LARGE_DOG_RHODESIAN_RIDGEBACK: 'CommonGameTag' = 1815
BREED_LARGE_DOG_ROTTWEILER: 'CommonGameTag' = 1779
BREED_LARGE_DOG_SAINT_BERNARD: 'CommonGameTag' = 1780
BREED_LARGE_DOG_SAMOYED: 'CommonGameTag' = 1781
BREED_LARGE_DOG_SCHNAUZER: 'CommonGameTag' = 1732
BREED_LARGE_DOG_SHAR_PEI: 'CommonGameTag' = 1959
BREED_LARGE_DOG_SIBERIAN_HUSKY: 'CommonGameTag' = 1812
BREED_LARGE_DOG_TIBETAN_MASTIFF: 'CommonGameTag' = 1960
BREED_LARGE_DOG_VIZSLA: 'CommonGameTag' = 1809
BREED_LARGE_DOG_WEIMARANER: 'CommonGameTag' = 1788
BREED_LARGE_DOG_WELSH_SPRINGER_SPANIEL: 'CommonGameTag' = 1808
BREED_LARGE_DOG_WHEATENS_TERRIER: 'CommonGameTag' = 1962
BREED_NONE: 'CommonGameTag' = 1733
BREED_SMALL_DOG_BASENJI: 'CommonGameTag' = 1817
BREED_SMALL_DOG_BEAGLE: 'CommonGameTag' = 1739
BREED_SMALL_DOG_BICHON_FRISE: 'CommonGameTag' = 1752
BREED_SMALL_DOG_BOCKER: 'CommonGameTag' = 1963
BREED_SMALL_DOG_BOSTON_TERRIER: 'CommonGameTag' = 1754
BREED_SMALL_DOG_BULL_TERRIER: 'CommonGameTag' = 1829
BREED_SMALL_DOG_BULLDOG: 'CommonGameTag' = 1756
BREED_SMALL_DOG_CARDIGAN_WELSH_CORGI: 'CommonGameTag' = 1964
BREED_SMALL_DOG_CAVALIER_KING_CHARLES_SPANIEL: 'CommonGameTag' = 1757
BREED_SMALL_DOG_CHIHUAHUA: 'CommonGameTag' = 1758
BREED_SMALL_DOG_COCKER_SPANIEL: 'CommonGameTag' = 1760
BREED_SMALL_DOG_COCKAPOO: 'CommonGameTag' = 1965
BREED_SMALL_DOG_DASCHUND: 'CommonGameTag' = 1966
BREED_SMALL_DOG_ENGLISH_COCKER_SPANIEL: 'CommonGameTag' = 1818
BREED_SMALL_DOG_ENGLISH_TOY_SPANIEL: 'CommonGameTag' = 1967
BREED_SMALL_DOG_FOX: 'CommonGameTag' = 1968
BREED_SMALL_DOG_FRENCH_BULLDOG: 'CommonGameTag' = 1763
BREED_SMALL_DOG_HAVANESE: 'CommonGameTag' = 1793
BREED_SMALL_DOG_ICELANDIC_SHEEP_DOG: 'CommonGameTag' = 1993
BREED_SMALL_DOG_ITALIAN_GREYHOUND: 'CommonGameTag' = 1825
BREED_SMALL_DOG_JACK_RUSSEL_TERRIER: 'CommonGameTag' = 1766
BREED_SMALL_DOG_LHASA_APSO: 'CommonGameTag' = 1823
BREED_SMALL_DOG_MALTESE: 'CommonGameTag' = 1943
BREED_SMALL_DOG_MINIATURE_PINSCHER: 'CommonGameTag' = 1805
BREED_SMALL_DOG_MINIATURE_POODLE: 'CommonGameTag' = 1969
BREED_SMALL_DOG_MINIATURE_SCHNAUZER: 'CommonGameTag' = 1806
BREED_SMALL_DOG_MIXED: 'CommonGameTag' = 1927
BREED_SMALL_DOG_NORWEGIAN_BUHUND: 'CommonGameTag' = 1992
BREED_SMALL_DOG_PAPILLON: 'CommonGameTag' = 1773
BREED_SMALL_DOG_PARSON_RUSSEL_TERRIER: 'CommonGameTag' = 1970
BREED_SMALL_DOG_PEKINGESE: 'CommonGameTag' = 1770
BREED_SMALL_DOG_PEMBROKE_WELSH_CORGI: 'CommonGameTag' = 1971
BREED_SMALL_DOG_POMERANIAN: 'CommonGameTag' = 1776
BREED_SMALL_DOG_PUG: 'CommonGameTag' = 1778
BREED_SMALL_DOG_PUGGLE: 'CommonGameTag' = 1820
BREED_SMALL_DOG_SCHIPPERKE: 'CommonGameTag' = 1782
BREED_SMALL_DOG_SCHNOODLE: 'CommonGameTag' = 1972
BREED_SMALL_DOG_SCOTTISH_TERRIER: 'CommonGameTag' = 1783
BREED_SMALL_DOG_SHETLAND_SHEEPDOG: 'CommonGameTag' = 1811
BREED_SMALL_DOG_SHIBA_INU: 'CommonGameTag' = 1784
BREED_SMALL_DOG_SHIH_TZU: 'CommonGameTag' = 1785
BREED_SMALL_DOG_SILKY_TERRIER: 'CommonGameTag' = 1973
BREED_SMALL_DOG_SMOOTH_FOX_TERRIER: 'CommonGameTag' = 1813
BREED_SMALL_DOG_SPITZ: 'CommonGameTag' = 1991
BREED_SMALL_DOG_STAFFORDSHIRE_BULL_TERRIER: 'CommonGameTag' = 1824
BREED_SMALL_DOG_STANDARD_SCHNAUZER: 'CommonGameTag' = 1786
BREED_SMALL_DOG_TOY_FOX_TERRIER: 'CommonGameTag' = 1787
BREED_SMALL_DOG_WEST_HIGHLAND_WHITE_TERRIER: 'CommonGameTag' = 1822
BREED_SMALL_DOG_WHIPPET: 'CommonGameTag' = 1799
BREED_SMALL_DOG_WIRE_FOX_TERRIER: 'CommonGameTag' = 1789
BREED_SMALL_DOG_YORKSHIRE_TERRIER: 'CommonGameTag' = 1790
BUFF_APPEARANCE_MODIFIER_MAKEUP: 'CommonGameTag' = 2154
BUFF_BUSINESS_CUSTOMER_STAR_RATING: 'CommonGameTag' = 1551
BUFF_BUSINESS_EMPLOYEE_TRAINING: 'CommonGameTag' = 1548
BUFF_CAULDRON_POTION_MAKE_GLOWY_FAILURE_VFX: 'CommonGameTag' = 49168
BUFF_DAY_NIGHT_TRACKING: 'CommonGameTag' = 1678
BUFF_HUMANOID_ROBOT_MOOD_VFX: 'CommonGameTag' = 65653
BUFF_MYSTICAL_RELIC_CURSE: 'CommonGameTag' = 45079
BUFF_OWNABLE_RESTAURANT_CUSTOMER: 'CommonGameTag' = 2150
BUFF_POSSESSED_BUFFS: 'CommonGameTag' = 47139
BUFF_POSSESSED_BUFFS_NO_ANIMATE: 'CommonGameTag' = 47148
BUFF_SPELLS_CASTING_SPELL: 'CommonGameTag' = 49157
BUFF_TEMPERATURE: 'CommonGameTag' = 2481
BUFF_VAMPIRE_SUNLIGHT: 'CommonGameTag' = 40989
BUFF_WEATHER: 'CommonGameTag' = 59431
BUILD_ARCH: 'CommonGameTag' = 561
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_BILLS_DECREASE: 'CommonGameTag' = 2419
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_BILLS_INCREASE: 'CommonGameTag' = 2420
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2413
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2414
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2411
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2412
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2417
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ENVIRONMENT_SCORE_MINUS2: 'CommonGameTag' = 2418
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2415
BUILD_BB_GAMEPLAY_EFFECT_COLUMNS_ENVIRONMENT_SCORE_PLUS2: 'CommonGameTag' = 2416
BUILD_BB_GAMEPLAY_EFFECT_FENCES_BILLS_DECREASE: 'CommonGameTag' = 2409
BUILD_BB_GAMEPLAY_EFFECT_FENCES_BILLS_INCREASE: 'CommonGameTag' = 2410
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2403
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2404
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2401
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2402
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2407
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ENVIRONMENT_SCORE_MINUS2: 'CommonGameTag' = 2408
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2405
BUILD_BB_GAMEPLAY_EFFECT_FENCES_ENVIRONMENT_SCORE_PLUS2: 'CommonGameTag' = 2406
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_DECREASE_BILLS: 'CommonGameTag' = 2329
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2308
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2309
| |
import math
import gym
import cv2
import matplotlib.pyplot as plt
import time
import json
import random
import numpy as np
import networkx as nx
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib
matplotlib.use('Agg')
from collections import deque
DEVICE = "cpu" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
ENVIRONMENT = "PongDeterministic-v4"
DEVICE = torch.device('cpu')
SAVE_MODELS = False # Save models to file so you can test later
MODEL_PATH = "./models/pong-cnn-" # Models path for saving or loading
SAVE_MODEL_INTERVAL = 10 # Save models at every X epoch
TRAIN_MODEL = False # Train model while playing (Make it False when testing a model)
LOAD_MODEL_FROM_FILE = True # Load model from file
LOAD_FILE_EPISODE = 900 # Load Xth episode from file
BATCH_SIZE = 64 # Minibatch size that select randomly from mem for train nets
MAX_EPISODE = 100000 # Max episode
MAX_STEP = 100000 # Max step size for one episode
MAX_MEMORY_LEN = 50000 # Max memory len
MIN_MEMORY_LEN = 40000 # Min memory len before start train
GAMMA = 0.97 # Discount rate
ALPHA = 0.00025 # Learning rate
EPSILON_DECAY = 0.99 # Epsilon decay rate by step
RENDER_GAME_WINDOW = True # Opens a new window to render the game (Won't work on colab default)
# p and q are vectors of positive probabilities, summing to 1
# Returns KL Divergence of the distributions
def KLDivergence(p,q):
return torch.dot(p,(torch.log(p)-torch.log(q)))
"""
ENVIRONMENT = "PongDeterministic-v4"
SAVE_MODELS = False # Save models to file so you can test later
MODEL_PATH = "./models/pong-cnn-" # Models path for saving or loading
SAVE_MODEL_INTERVAL = 10 # Save models at every X epoch
TRAIN_MODEL = False # Train model while playing (Make it False when testing a model)
MINVALUE=0.1
LOAD_MODEL_FROM_FILE = True # Load model from file
LOAD_FILE_EPISODE = 900 # Load Xth episode from file
BATCH_SIZE = 64 # Minibatch size that select randomly from mem for train nets
MAX_EPISODE = 100000 # Max episode
MAX_STEP = 100000 # Max step size for one episode
MAX_MEMORY_LEN = 50000 # Max memory len
MIN_MEMORY_LEN = 40000 # Min memory len before start train
GAMMA = 0.97 # Discount rate
ALPHA = 0.00025 # Learning rate
EPSILON_DECAY = 0.99 # Epsilon decay rate by step
RENDER_GAME_WINDOW = True # Opens a new window to render the game (Won't work on colab default)
"""
class DuelCNN(nn.Module):
"""
CNN with Duel Algo. https://arxiv.org/abs/1511.06581
"""
def __init__(self, h, w, output_size):
super(DuelCNN, self).__init__()
#These will be used to reference logits before the last summing up layer of the network
#to compute gradients w.r.t. the inputs
self.advantageEstimation = torch.empty(0, device=DEVICE, dtype=torch.float)
self.valueEstimation = torch.empty(0, device=DEVICE, dtype=torch.float)
self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4)
self.bn1 = nn.BatchNorm2d(32)
convw, convh = self.conv2d_size_calc(w, h, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.bn2 = nn.BatchNorm2d(64)
convw, convh = self.conv2d_size_calc(convw, convh, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(64)
convw, convh = self.conv2d_size_calc(convw, convh, kernel_size=3, stride=1)
linear_input_size = convw * convh * 64 # Last conv layer's out sizes
# Action layer
self.Alinear1 = nn.Linear(in_features=linear_input_size, out_features=128)
self.Alrelu = nn.LeakyReLU() # Linear 1 activation funct
self.Alinear2 = nn.Linear(in_features=128, out_features=output_size)
# State Value layer
self.Vlinear1 = nn.Linear(in_features=linear_input_size, out_features=128)
self.Vlrelu = nn.LeakyReLU() # Linear 1 activation funct
self.Vlinear2 = nn.Linear(in_features=128, out_features=1) # Only 1 node
def conv2d_size_calc(self, w, h, kernel_size=5, stride=2):
"""
Calcs conv layers output image sizes
"""
next_w = (w - (kernel_size - 1) - 1) // stride + 1
next_h = (h - (kernel_size - 1) - 1) // stride + 1
return next_w, next_h
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = x.view(x.size(0), -1) # Flatten every batch
Ax = self.Alrelu(self.Alinear1(x))
Ax = self.Alinear2(Ax) # No activation on last layer
#Get reference to advantage estimation
self.advantageEstimation = Ax.clone()
Vx = self.Vlrelu(self.Vlinear1(x))
Vx = self.Vlinear2(Vx) # No activation on last layer
# Get reference for the value estimation
self.valueEstimation = Vx.clone()
q = Vx + (Ax - Ax.mean())
return q
# Seperates the network computation into 4 graphs:
# x->postConv1=self.preReLU1->self.postConv2=self.preReLU2->self.postConv3=self.preReLU3->y
# Then guided backpropagation computes gradients of each graph respectively,
# applying ReLU to the gradients in between the graphs
def guidedforward(self, x):
self.postConv1 = self.bn1(self.conv1(x))
self.preReLU1 = self.postConv1.detach().clone().requires_grad_(True)
self.postConv2 = self.bn2(self.conv2(F.relu(self.preReLU1)))
self.preReLU2 = self.postConv2.detach().clone().requires_grad_(True)
self.postConv3 = self.bn3(self.conv3(F.relu(self.preReLU2)))
self.preReLU3 = self.postConv3.detach().clone().requires_grad_(True)
x = F.relu(self.preReLU3)
x = x.view(x.size(0), -1) # Flatten every batch
Ax = self.Alrelu(self.Alinear1(x))
Ax = self.Alinear2(Ax) # No activation on last layer
self.advantageEstimation = Ax.clone()
Vx = self.Vlrelu(self.Vlinear1(x))
Vx = self.Vlinear2(Vx) # No activation on last layer
self.valueEstimation = Vx.clone()
q = Vx + (Ax - Ax.mean())
return q
# Inputs:
# x: 4 channel input to the neural network
# mode= 'value' or 'advantage'
# If 'value':
# Return saliency map(s) associated with the value estimate
# If 'advantage':
# Then the input action has to be set to the index of the desired action to compute saliency map for
# Returns 4xHxW dimensional 4 channel saliency map normalised in [-1,1] as a whole. (max=1 or min=-1)
def getGuidedBP(self, x, mode='value', action=None):
# guidedForward seperates the network computation into 4 graphs:
# x->postConv1=self.preReLU1->self.postConv2=self.preReLU2->self.postConv3=self.preReLU3->y
# This function computes gradients of each graph respectively,
# applying ReLU to the gradients in between the graphs
if mode != 'value':
if mode != 'advantage':
raise ValueError("mode needs to be 'value' or 'advantage'!")
else:
if action == None or action < 0:
raise ValueError("If mode=='advantage', set non-negative action index to input 'action'.")
self.zero_grad()
inputs = torch.tensor(x, requires_grad=True, device=DEVICE, dtype=torch.float)
self.guidedforward(inputs.unsqueeze(0))
#Compute gradients for the required value in the last graph
if mode == 'value':
self.valueEstimation.backward()
else:
self.advantageEstimation[0][action].backward()
#For each of the rest of the graphs, apply ReLUs(set negative values to zero) between graphs
# to cancel out negative gradients
self.postConv3.backward(gradient=F.threshold(self.preReLU3.grad, 0.0, 0.0))
self.postConv2.backward(gradient=F.threshold(self.preReLU2.grad, 0.0, 0.0))
self.postConv1.backward(gradient=F.threshold(self.preReLU1.grad, 0.0, 0.0))
saliency = inputs.grad.clone()
AbsSaliency = torch.abs(saliency.clone())
saliency = saliency / torch.max(AbsSaliency)
return saliency
# Inputs:
# x: 4 channel input to the neural network
# mode= 'value' or 'advantage'
# If 'value':
# Return saliency map(s) associated with the value estimate
# If 'advantage':
# Then the input action has to be set to the index of the desired action to compute saliency map for
# Returns 4xHxW dimensional 4 channel saliency map normalised in [-1,1] as a whole.
def getSaliencyMap(self, x, mode='value', action=None):
if mode != 'value':
if mode != 'advantage':
raise ValueError("mode needs to be 'value' or 'advantage'!")
else:
if action == None or action < 0:
raise ValueError("If mode=='advantage', set non-negative action index to input 'action'.")
self.zero_grad()
inputs = torch.tensor(x, requires_grad=True, device=DEVICE, dtype=torch.float)
self.forward(inputs.unsqueeze(0))
if mode == 'value':
self.valueEstimation.backward()
else:
self.advantageEstimation[0][action].backward()
saliency = inputs.grad.clone()
AbsSaliency = torch.abs(saliency.clone())
saliency = saliency / torch.max(AbsSaliency)
return saliency
class Agent:
def __init__(self, environment):
"""
Hyperparameters definition for Agent
"""
# State size for breakout env. SS images (210, 160, 3). Used as input size in network
self.state_size_h = environment.observation_space.shape[0]
self.state_size_w = environment.observation_space.shape[1]
self.state_size_c = environment.observation_space.shape[2]
# Activation size for breakout env. Used as output size in network
self.action_size = environment.action_space.n
# Image pre process params
self.original_h = 210
self.original_w = 160
self.target_h = 80 # Height after process
self.target_w = 64 # Widht after process
self.crop_dim = [20, self.state_size_h, 0,
self.state_size_w] # Cut 20 px from top to get rid of the score table
# Trust rate to our experiences
#self.gamma = GAMMA # Discount coef for future predictions
#self.alpha = ALPHA # Learning Rate
# After many experinces epsilon will be 0.05
# So we will do less Explore more Exploit
self.epsilon = 1 # Explore or Exploit
# self.epsilon_decay = EPSILON_DECAY # Adaptive Epsilon Decay Rate
self.epsilon_minimum = 0.05 # Minimum for Explore
# Deque holds replay mem.
# self.memory = deque(maxlen=MAX_MEMORY_LEN)
# Create two model for DDQN algorithm
self.online_model = DuelCNN(h=self.target_h, w=self.target_w, output_size=self.action_size).to(DEVICE)
self.target_model = DuelCNN(h=self.target_h, w=self.target_w, output_size=self.action_size).to(DEVICE)
self.target_model.load_state_dict(self.online_model.state_dict())
self.target_model.eval()
# Adam used as optimizer
#self.optimizer = optim.Adam(self.online_model.parameters(), lr=self.alpha)
# When we apply methods to get maps on the pixels in the order the agent takes them,
# We need to transpose it to normal size, this is done by Agent.postProcess
#But, in order to test that function, or do occlusion, we need to | |
(L*~R*L) * S
if not rel.is_one():
verbose("Failed relation A1")
return False
rel = ~S*R*S*R**(-25)
if not rel.is_one():
verbose("Failed relation A2")
return False
rel = (S*R**5*L*~R*L)**3 * ~(L * ~R * L)**2
if not rel.is_one():
verbose("Failed relation A3")
return False
return True
else:
# e>1, m>1
onehalf = ZZ(2).inverse_mod(m) # i.e. 2^(-1) mod m
onefifth = ZZ(5).inverse_mod(e) # i.e. 5^(-1) mod e
c,d = arith.CRT_basis([m, e])
# c=0 mod e, c=1 mod m; d=1 mod e, d=0 mod m
a = L**c
b = R**c
l = L**d
r = R**d
s = l**20 * r**onefifth * l**(-4) * ~r
#Congruence if the seven permutations below are trivial:
rel =~a*~r*a*r
if not rel.is_one():
verbose("Failed relation B1")
return False
rel = (a*~b*a)**4
if not rel.is_one():
verbose("Failed relation B2")
return False
rel = (a*~b*a)**2*(~a*b)**3
if not rel.is_one():
verbose("Failed relation B3")
return False
rel = (a*~b*a)**2*(b*b*a**(-onehalf))**(-3)
if not rel.is_one():
verbose("Failed relation B4")
return False
rel = (~l*r*~l)*s*(l*~r*l)*s
if not rel.is_one():
verbose("Failed relation B5")
return False
rel = ~s*r*s*r**(-25)
if not rel.is_one():
verbose("Failed relation B6")
return False
rel = (l*~r*l)**2*(s*r**5*l*~r*l)**(-3)
if not rel.is_one():
verbose("Failed relation B7")
return False
return True
def surgroups(self):
r"""
Return an iterator through the non-trivial intermediate groups between
`SL(2,\ZZ)` and this finite index group.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2)(3,4)(5,6)", S3="(1,2,3)(4,5,6)")
sage: H = next(G.surgroups())
sage: H
Arithmetic subgroup with permutations of right cosets
S2=(1,2)
S3=(1,2,3)
L=(1,3)
R=(2,3)
sage: G.is_subgroup(H)
True
The principal congruence group `\Gamma(3)` has thirteen surgroups::
sage: G = Gamma(3).as_permutation_group()
sage: G.index()
24
sage: l = []
sage: for H in G.surgroups():
....: l.append(H.index())
....: assert G.is_subgroup(H) and H.is_congruence()
sage: l
[6, 3, 4, 8, 4, 8, 4, 12, 4, 6, 6, 8, 8]
"""
from sage.interfaces.gap import gap
P = self.perm_group()._gap_()
for b in P.AllBlocks():
orbit = P.Orbit(b, gap.OnSets)
action = P.Action(orbit, gap.OnSets)
S2,S3,L,R = action.GeneratorsOfGroup()
yield ArithmeticSubgroup_Permutation(S2=S2, S3=S3, L=L, R=R, check=False)
class OddArithmeticSubgroup_Permutation(ArithmeticSubgroup_Permutation_class):
r"""
An arithmetic subgroup of `{\rm SL}(2, \ZZ)` not containing `-1`,
represented in terms of the right action of `{\rm SL}(2, \ZZ)` on its
cosets.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G
Arithmetic subgroup with permutations of right cosets
S2=(1,2,3,4)
S3=(1,3)(2,4)
L=(1,2,3,4)
R=(1,4,3,2)
sage: type(G)
<class 'sage.modular.arithgroup.arithgroup_perm.OddArithmeticSubgroup_Permutation_with_category'>
"""
def __init__(self, S2, S3, L, R, canonical_labels=False):
r"""
TESTS::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G
Arithmetic subgroup with permutations of right cosets
S2=(1,2,3,4)
S3=(1,3)(2,4)
L=(1,2,3,4)
R=(1,4,3,2)
sage: TestSuite(G).run()
"""
self._S2 = S2
self._S3 = S3
self._L = L
self._R = R
if canonical_labels:
self._canonical_label_group = self
ArithmeticSubgroup_Permutation_class.__init__(self)
def __reduce__(self):
r"""
Return the data used to construct self. Used in pickling.
TESTS::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G == loads(dumps(G)) #indirect doctest
True
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)",relabel=True)
sage: GG = loads(dumps(G))
sage: GG == G #indirect doctest
True
sage: GG.relabel(inplace=False) is GG
True
"""
if hasattr(self,'_canonical_label_group'):
canonical_labels = (self is self._canonical_label_group)
else:
canonical_labels = False
return (OddArithmeticSubgroup_Permutation,
(self._S2,self._S3,self._L,self._R,canonical_labels))
def is_odd(self):
r"""
Test whether the group is odd.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,6,4,3)(2,7,5,8)",S3="(1,2,3,4,5,6)(7,8)")
sage: G.is_odd()
True
"""
return True
def is_even(self):
r"""
Test whether the group is even.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,6,4,3)(2,7,5,8)",S3="(1,2,3,4,5,6)(7,8)")
sage: G.is_even()
False
"""
return False
def to_even_subgroup(self,relabel=True):
r"""
Returns the group with `-Id` added in it.
EXAMPLES::
sage: G = Gamma1(3).as_permutation_group()
sage: G.to_even_subgroup()
Arithmetic subgroup with permutations of right cosets
S2=(1,3)(2,4)
S3=(1,2,3)
L=(2,3,4)
R=(1,4,2)
sage: H = ArithmeticSubgroup_Permutation(S2 = '(1,4,11,14)(2,7,12,17)(3,5,13,15)(6,9,16,19)(8,10,18,20)', S3 = '(1,2,3,11,12,13)(4,5,6,14,15,16)(7,8,9,17,18,19)(10,20)')
sage: G = H.to_even_subgroup(relabel=False); G
Arithmetic subgroup with permutations of right cosets
S2=(1,4)(2,7)(3,5)(6,9)(8,10)
S3=(1,2,3)(4,5,6)(7,8,9)
L=(1,5)(2,4,9,10,8)(3,7,6)
R=(1,7,10,8,6)(2,5,9)(3,4)
sage: H.is_subgroup(G)
True
"""
N = self.index()
# build equivalence classes in e
s2 = self._S2
e = []
e2i = [None]*N
for i in range(N):
j = s2[s2[i]]
if i < j:
e2i[i] = e2i[j] = len(e)
e.append((i,j))
# build the quotient permutations
ss2 = [None]*(N//2)
ss3 = [None]*(N//2)
ll = [None]*(N//2)
rr = [None]*(N//2)
s3 = self._S3
l = self._L
r = self._R
for (j0,j1) in e:
ss2[e2i[j0]] = e2i[s2[j0]]
ss3[e2i[j0]] = e2i[s3[j0]]
ll[e2i[j0]] = e2i[l[j0]]
rr[e2i[j0]] = e2i[r[j0]]
G = EvenArithmeticSubgroup_Permutation(ss2,ss3,ll,rr)
if relabel:
G.relabel()
return G
def nu2(self):
r"""
Return the number of elliptic points of order 2.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G.nu2()
0
sage: G = Gamma1(2).as_permutation_group()
sage: G.nu2()
1
"""
return sum(1 for c in self.S2().cycle_tuples() if len(c) == 2)
def nu3(self):
r"""
Return the number of elliptic points of order 3.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G.nu3()
2
sage: G = Gamma1(3).as_permutation_group()
sage: G.nu3()
1
"""
return sum(1 for c in self.S3().cycle_tuples() if len(c) == 2)
def nirregcusps(self):
r"""
Return the number of irregular cusps.
The cusps are associated to cycles of the permutations `L` or `R`.
The irregular cusps are the one which are stabilised by `-Id`.
EXAMPLES::
sage: S2 = "(1,3,2,4)(5,7,6,8)(9,11,10,12)"
sage: S3 = "(1,3,5,2,4,6)(7,9,11,8,10,12)"
sage: G = ArithmeticSubgroup_Permutation(S2=S2,S3=S3)
sage: G.nirregcusps()
3
"""
inv = self.S2()**2
n = 0
for c in self.L().cycle_tuples(singletons=True):
if inv(c[0]) in c:
n += 1
return n
def nregcusps(self):
r"""
Return the number of regular cusps of the group.
The cusps are associated to cycles of `L` or `R`. The irregular cusps
correspond to the ones which are not stabilised by `-Id`.
EXAMPLES::
sage: G = Gamma1(3).as_permutation_group()
sage: G.nregcusps()
2
"""
inv = self.S2()**2
n = 0
for c in self.L().cycle_tuples(singletons=True):
if inv(c[0]) not in c:
n += 1
return n//2
def cusp_widths(self,exp=False):
r"""
Return the list of cusp widths.
INPUT:
``exp`` - boolean (default: False) - if True, return a dictionary with
keys the possible widths and with values the number of cusp with that
width.
EXAMPLES::
sage: G = Gamma1(5).as_permutation_group()
sage: G.cusp_widths()
[1, 1, 5, 5]
sage: G.cusp_widths(exp=True)
{1: 2, 5: 2}
"""
inv = self.S2()**2
L = self.L()
cusps = set(c[0] for c in L.cycle_tuples(singletons=True))
if exp:
widths = {}
else:
widths = []
while cusps:
c0 = cusps.pop()
c = L.orbit(c0)
if inv(c0) not in c:
c1 = min(L.orbit(inv(c0)))
cusps.remove(c1)
if exp:
if not len(c) in widths:
widths[len(c)] = 0
widths[len(c)] += 1
else:
widths.append(len(c))
else:
c2 = len(c) // 2
if exp:
if not c2 in widths:
widths[c2] = 0
widths[c2] += 1
else:
widths.append(c2)
if exp:
return widths
return sorted(widths)
def ncusps(self):
r"""
Returns the number of cusps.
EXAMPLES::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2,3,4)",S3="(1,3)(2,4)")
sage: G.ncusps()
1
sage: G = Gamma1(3).as_permutation_group()
sage: G.ncusps()
2
"""
inv = self.S2()**2
n = 0
m = 0
for c in self.L().cycle_tuples(singletons=True):
if inv(c[0]) in c:
n += 1
else:
m += 1
return n + m//2
class EvenArithmeticSubgroup_Permutation(ArithmeticSubgroup_Permutation_class):
r"""
An arithmetic subgroup of `{\rm SL}(2, \ZZ)` containing `-1`, represented
in terms of the right action of `{\rm SL}(2, \ZZ)` on its cosets.
EXAMPLES:
Construct a noncongruence subgroup of index 7 (the smallest possible)::
sage: a2 = SymmetricGroup(7)([(1,2),(3,4),(6,7)]); a3 = SymmetricGroup(7)([(1,2,3),(4,5,6)])
sage: G = ArithmeticSubgroup_Permutation(S2=a2, S3=a3); G
Arithmetic subgroup with permutations of right cosets
S2=(1,2)(3,4)(6,7)
S3=(1,2,3)(4,5,6)
L=(1,4,7,6,5,3)
R=(2,4,5,7,6,3)
sage: G.index()
7
sage: G.dimension_cusp_forms(4)
1
sage: G.is_congruence()
False
Convert some standard congruence subgroups into permutation form::
sage: G = Gamma0(8).as_permutation_group()
sage: G.index()
12
sage: G.is_congruence()
True
sage: G = Gamma0(12).as_permutation_group()
sage: G
Arithmetic subgroup of index 24
sage: G.is_congruence()
True
The following is the unique index 2 even subgroup of `{\rm SL}_2(\ZZ)`::
sage: w = SymmetricGroup(2)([2,1])
sage: G = ArithmeticSubgroup_Permutation(L=w, R=w)
sage: G.dimension_cusp_forms(6)
1
sage: G.genus()
0
"""
def __init__(self, S2, S3, L, R, canonical_labels=False):
r"""
TESTS::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2)(3,4)(5,6)",S3="(1,2,3)(4,5,6)")
sage: G == loads(dumps(G))
True
sage: G is loads(dumps(G))
False
"""
self._S2 = S2
self._S3 = S3
self._L = L
self._R = R
if canonical_labels:
self._canonical_label_group = self
ArithmeticSubgroup_Permutation_class.__init__(self)
def __reduce__(self):
r"""
Data for pickling.
TESTS::
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2)(3,4)",S3="(1,2,4)")
sage: G == loads(dumps(G)) #indirect doctest
True
sage: G = ArithmeticSubgroup_Permutation(S2="(1,2)(3,4)",S3="(1,2,4)",relabel=True)
sage: GG = loads(dumps(G))
sage: G == GG #indirect doctest
True
sage: GG.relabel(inplace=False) is GG
True
"""
if hasattr(self, '_canonical_label_group'):
canonical_labels = (self is self._canonical_label_group)
else:
canonical_labels = False
return (EvenArithmeticSubgroup_Permutation,
(self._S2, self._S3, self._L, self._R, canonical_labels))
def is_odd(self):
r"""
Returns True if this subgroup does not contain the matrix | |
dimension nsg_e, regarding idxsg_e, lsg_e.')
if is_empty(constraints.usg_e):
constraints.usg_e = np.zeros((nsg_e,))
elif constraints.usg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.')
dims.nsg_e = nsg_e
nsphi_e = constraints.idxsphi_e.shape[0]
if is_empty(constraints.lsphi_e):
constraints.lsphi_e = np.zeros((nsphi_e,))
elif constraints.lsphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.')
if is_empty(constraints.usphi_e):
constraints.usphi_e = np.zeros((nsphi_e,))
elif constraints.usphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.')
dims.nsphi_e = nsphi_e
# terminal
ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e
wrong_field = ""
if cost.Zl_e.shape[0] != ns_e:
wrong_field = "Zl_e"
dim = cost.Zl_e.shape[0]
elif cost.Zu_e.shape[0] != ns_e:
wrong_field = "Zu_e"
dim = cost.Zu_e.shape[0]
elif cost.zl_e.shape[0] != ns_e:
wrong_field = "zl_e"
dim = cost.zl_e.shape[0]
elif cost.zu_e.shape[0] != ns_e:
wrong_field = "zu_e"
dim = cost.zu_e.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\
+ f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}')
dims.ns_e = ns_e
# discretization
if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes):
# uniform discretization
opts.time_steps = opts.tf / dims.N * np.ones((dims.N,))
elif not is_empty(opts.shooting_nodes):
if np.shape(opts.shooting_nodes)[0] != dims.N+1:
raise Exception('inconsistent dimension N, regarding shooting_nodes.')
time_steps = np.zeros((dims.N,))
for i in range(dims.N):
time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i]
opts.time_steps = time_steps
elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)):
Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization')
tf = np.sum(opts.time_steps)
if (tf - opts.tf) / tf > 1e-15:
raise Exception(f'Inconsistent discretization: {opts.tf}'\
f' = tf != sum(opts.time_steps) = {tf}.')
def get_ocp_nlp_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_layout.json', 'r') as f:
ocp_nlp_layout = json.load(f)
return ocp_nlp_layout
def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
# Copy input ocp object dictionary
ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__)
# TODO: maybe make one funciton with formatting
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__))
# Copy ocp object attributes dictionaries
ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__)
ocp_nlp_dict = format_class_dict(ocp_nlp_dict)
# strip symbolics
ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model'])
# strip shooting_nodes
ocp_nlp_dict['solver_options'].pop('shooting_nodes', None)
dims_dict = acados_class2dict(acados_ocp.dims)
ocp_check_against_layout(ocp_nlp_dict, dims_dict)
with open(json_file, 'w') as f:
json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)
def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
with open(json_file, 'r') as f:
ocp_nlp_json = json.load(f)
ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims'])
# Instantiate AcadosOcp object
acados_ocp = AcadosOcp()
# load class dict
acados_ocp.__dict__ = ocp_nlp_dict
# laod class attributes dict, dims, constraints, etc
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
acados_attribute = getattr(acados_ocp, acados_struct)
acados_attribute.__dict__ = ocp_nlp_dict[acados_struct]
setattr(acados_ocp, acados_struct, acados_attribute)
return acados_ocp
def ocp_generate_external_functions(acados_ocp, model):
model = make_model_consistent(model)
if acados_ocp.solver_options.integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model)
elif acados_ocp.solver_options.integrator_type == 'IRK':
# implicit model -- generate C code
opts = dict(generate_hess=1)
generate_c_code_implicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'GNSF':
generate_c_code_gnsf(model)
else:
raise Exception("ocp_generate_external_functions: unknown integrator type.")
if acados_ocp.solver_options.hessian_approx == 'EXACT':
opts = dict(generate_hess=1)
else:
opts = dict(generate_hess=0)
if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0:
generate_c_code_constraint(model, model.name, False, opts)
if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0:
generate_c_code_constraint(model, model.name, True, opts)
# dummy matrices
if not acados_ocp.cost.cost_type == 'LINEAR_LS':
acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx))
acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu))
if not acados_ocp.cost.cost_type_e == 'LINEAR_LS':
acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx))
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, False)
elif acados_ocp.cost.cost_type == 'EXTERNAL':
generate_c_code_external_cost(model, False)
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, True)
elif acados_ocp.cost.cost_type_e == 'EXTERNAL':
generate_c_code_external_cost(model, True)
def ocp_render_templates(acados_ocp, json_file):
name = acados_ocp.model.name
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception('{} not found!'.format(json_path))
template_dir = 'c_generated_code/'
## Render templates
in_file = 'main.in.c'
out_file = 'main_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.c'
out_file = 'acados_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.h'
out_file = 'acados_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver_sfun.in.c'
out_file = 'acados_solver_sfunction_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'make_sfun.in.m'
out_file = 'make_sfun.m'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.c'
out_file = 'acados_sim_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = 'acados_sim_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = 'c_generated_code/{}_model/'.format(name)
in_file = 'model.in.h'
out_file = '{}_model.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0:
# constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_constraint.in.h'
out_file = '{}_phi_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0:
# terminal constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_e_constraint.in.h'
out_file = '{}_phi_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear constraints
if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_constraint.in.h'
out_file = '{}_h_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear constraints
if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_e_constraint.in.h'
out_file = '{}_h_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear cost function
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_fun.in.h'
out_file = '{}_cost_y_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear cost function
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_e_fun.in.h'
out_file = '{}_cost_y_e_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost
if acados_ocp.cost.cost_type == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost.in.h'
out_file = '{}_external_cost.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost - terminal
if acados_ocp.cost.cost_type_e == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost_e.in.h'
out_file = '{}_external_cost_e.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
class AcadosOcpSolver:
"""
class to interact with the acados ocp solver C object
"""
def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'):
self.solver_created = False
model = acados_ocp.model
# make dims consistent
make_ocp_dims_consistent(acados_ocp)
if acados_ocp.solver_options.integrator_type == 'GNSF':
set_up_imported_gnsf_model(acados_ocp)
# set integrator time automatically
acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0]
# generate external functions
ocp_generate_external_functions(acados_ocp, model)
# dump to json
ocp_formulation_json_dump(acados_ocp, json_file)
# render templates
ocp_render_templates(acados_ocp, json_file)
## Compile solver
os.chdir('c_generated_code')
os.system('make clean_ocp_shared_lib')
os.system('make ocp_shared_lib')
os.chdir('..')
self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so'
# get
self.shared_lib = CDLL(self.shared_lib_name)
self.shared_lib.acados_create()
self.solver_created = True
self.shared_lib.acados_get_nlp_opts.restype = c_void_p
self.nlp_opts = self.shared_lib.acados_get_nlp_opts()
self.shared_lib.acados_get_nlp_dims.restype = c_void_p
self.nlp_dims = self.shared_lib.acados_get_nlp_dims()
self.shared_lib.acados_get_nlp_config.restype = c_void_p
self.nlp_config = self.shared_lib.acados_get_nlp_config()
self.shared_lib.acados_get_nlp_out.restype = c_void_p
self.nlp_out = self.shared_lib.acados_get_nlp_out()
self.shared_lib.acados_get_nlp_in.restype = c_void_p
self.nlp_in = self.shared_lib.acados_get_nlp_in()
self.shared_lib.acados_get_nlp_solver.restype = c_void_p
self.nlp_solver = self.shared_lib.acados_get_nlp_solver()
self.acados_ocp = acados_ocp
def solve(self):
"""
solve the ocp with current input
"""
status = self.shared_lib.acados_solve()
return status
def get(self, stage_, field_):
"""
get the last solution of the solver:
:param stage: integer corresponding to shooting node
:param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',]
.. note:: regarding lam, t: \n
the inequalities are internally organized in the following order: \n
[ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n
lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
.. note:: pi: multipliers for dynamics equality constraints \n
lam: multipliers for inequalities \n
t: slack variables corresponding to evaluation of all inequalities (at the solution) \n
sl: slack variables of soft lower inequality constraints \n
su: slack variables of soft upper inequality constraints \n
"""
out_fields = ['x', 'u', 'z', 'pi', 'lam', 't']
mem_fields = ['sl', 'su']
field = field_
field = field.encode('utf-8')
if (field_ not in out_fields + mem_fields):
raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\
\n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
if (field_ in out_fields):
self.shared_lib.ocp_nlp_out_get.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_get(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, out_data)
elif field_ in mem_fields:
self.shared_lib.ocp_nlp_get_at_stage.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \
self.nlp_dims, self.nlp_solver, stage_, field, out_data)
return out
def print_statistics(self):
stat = self.get_stats("statistics")
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP':
print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter')
if stat.shape[0]>7:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \
int(stat[0][jj]), stat[1][jj], stat[2][jj], \
stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj])))
if stat.shape[0]>7:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj]))
print('\n')
elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
print('\niter\tqp_stat\tqp_iter')
if stat.shape[0]>3:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in | |
from src.utils.db_utils import execute_sql,insert_query, save_rds_pandas
from src.models.save_model import save_upload, parse_filename
from datetime import date, datetime
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType, DoubleType
from pyspark.sql.functions import monotonically_increasing_id, countDistinct, approxCountDistinct, when, lit
from pyspark.sql.functions import concat, col, lit
from pyspark.ml.feature import OneHotEncoder, StringIndexer, Imputer, VectorAssembler, StandardScaler, PCA
from pyspark.ml import Pipeline
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import MulticlassMetrics
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from pyspark.sql.functions import udf
from pyspark.sql.functions import col, lower, regexp_replace, split
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType
from pyspark.sql import functions as f
from collections import defaultdict
import psycopg2 as pg
import pandas.io.sql as psql
import pandas as pd
import time
import json
from src import (
MY_USER ,
MY_PASS ,
MY_HOST ,
MY_PORT,
MY_DB
)
def get_data(test=False):
config_psyco = "host='{0}' dbname='{1}' user='{2}' password='{3}'".format(MY_HOST,MY_DB,MY_USER,MY_PASS)
connection = pg.connect(config_psyco)
if test:
query_select = "(select * from semantic.rita where rangoatrasohoras = '0-1.5' order by flightdate limit 700) union all (select * from semantic.rita where rangoatrasohoras != '0-1.5' order by flightdate limit 1000);"
else:
query_select = "(select * from semantic.rita where rangoatrasohoras = '0-1.5' order by flightdate desc limit 700) union all (select * from semantic.rita where rangoatrasohoras != '0-1.5' order by flightdate desc limit 1000);"
#print(query_select)
pdf = pd.read_sql_query(query_select,con=connection)
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
df = spark.createDataFrame(pdf, schema=StructType([StructField('year', IntegerType(), True),
StructField('quarter', IntegerType(), True),
StructField('month', IntegerType(), True),
StructField('dayofmonth', IntegerType(), True),
StructField('dayofweek', IntegerType(), True),
StructField('flightdate', StringType(), True),
StructField('reporting_airline', StringType(), True),
StructField('dot_id_reporting_airline', IntegerType(), True),
StructField('iata_code_reporting_airline', StringType(), True),
StructField('tail_number', StringType(), True),
StructField('flight_number_reporting_airline', IntegerType(), True),
StructField('originairportid', IntegerType(), True),
StructField('originairportseqid', IntegerType(), True),
StructField('origincitymarketid', IntegerType(), True),
StructField('origin', StringType(), True),
StructField('origincityname', StringType(), True),
StructField('originstate', StringType(), True),
StructField('originstatefips', IntegerType(), True),
StructField('originstatename', StringType(), True),
StructField('originwac', IntegerType(), True),
StructField('destairportid', IntegerType(), True),
StructField('destairportseqid', IntegerType(), True),
StructField('destcitymarketid', IntegerType(), True),
StructField('dest', StringType(), True),
StructField('destcityname', StringType(), True),
StructField('deststate', StringType(), True),
StructField('deststatefips', IntegerType(), True),
StructField('deststatename', StringType(), True),
StructField('destwac', IntegerType(), True),
StructField('crsdeptime', StringType(), True),
StructField('deptime', StringType(), True),
StructField('depdelay', FloatType(), True),
StructField('depdelayminutes', FloatType(), True),
StructField('depdel15', FloatType(), True),
StructField('departuredelaygroups', FloatType(), True), #CAMBIA
StructField('deptimeblk', StringType(), True),
StructField('taxiout', FloatType(), True),
StructField('wheelsoff', FloatType(), True), #CAMBIA
StructField('wheelson', FloatType(), True), #CAMBIA
StructField('taxiin', FloatType(), True),
StructField('crsarrtime', IntegerType(), True),
StructField('arrtime', FloatType(), True), #CAMBIA
StructField('arrdelay', FloatType(), True),
StructField('arrdelayminutes', FloatType(), True),
StructField('arrdel15', FloatType(), True),
StructField('arrivaldelaygroups', FloatType(), True), #CAMBIA
StructField('arrtimeblk', StringType(), True),
StructField('cancelled', FloatType(), True),
StructField('diverted', FloatType(), True),
StructField('crselapsedtime', FloatType(), True),
StructField('actualelapsedtime', FloatType(), True),
StructField('airtime', FloatType(), True),
StructField('flights', FloatType(), True),
StructField('distance', FloatType(), True),
StructField('distancegroup', IntegerType(), True),
StructField('divairportlandings', StringType(), True),
StructField('rangoatrasohoras', StringType(), True),
StructField('findesemana', IntegerType(), True),
StructField('quincena', FloatType(), True),
StructField('dephour', FloatType(), True),
StructField('seishoras', FloatType(), True)
]))
df.head(2)
print((df.count(), len(df.columns)))
return df
def imputa_categoricos(df, ignore,data_types):
strings_used = [var for var in data_types["StringType"] if var not in ignore]
missing_data_fill = {}
for var in strings_used:
missing_data_fill[var] = "missing"
df = df.fillna(missing_data_fill)
return df
def ignore_list(df, data_types):
counts_summary = df.agg(*[countDistinct(c).alias(c) for c in data_types["StringType"]])
counts_summary = counts_summary.toPandas()
counts = pd.Series(counts_summary.values.ravel())
counts.index = counts_summary.columns
sorted_vars = counts.sort_values(ascending = False)
ignore = list((sorted_vars[sorted_vars >100]).index)
return ignore
def get_data_types(df):
data_types = defaultdict(list)
for entry in df.schema.fields:
data_types[str(entry.dataType)].append(entry.name)
return data_types
def create_pipeline(df, ignore):
"""
todo:
1. Con más observaciones volver a usar el OneHotEncoder
- Agregar a los stages
- Agregar a los features del VectorAssembler
"""
# Esto lo ponemos aqui para poder modificar las
#variables de los estimadores/transformadores
data_types = get_data_types(df)
#--------------------------------------
# -------------- STRING --------------
strings_used = [var for var in data_types["StringType"] if var not in ignore]
# -------------- DOUBLE --------------
numericals_double = [var for var in data_types["DoubleType"] if var not in ignore]
numericals_double_imputed = [var + "_imputed" for var in numericals_double]
# -------------- INTEGERS --------------
from pyspark.sql.types import IntegerType, DoubleType
numericals_int = [var for var in data_types["IntegerType"] if var not in ignore]
for c in numericals_int:
df = df.withColumn(c, df[c].cast(DoubleType()))
df = df.withColumn(c, df[c].cast("double"))
numericals_int_imputed = [var + "_imputed" for var in numericals_int]
# =======================================
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## P I P E L I N E
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ============= ONE HOT ENCODING ================
from pyspark.ml.feature import OneHotEncoder, StringIndexer
stage_string = [StringIndexer(inputCol= c, outputCol= c+"_string_encoded") for c in strings_used]
stage_one_hot = [OneHotEncoder(inputCol= c+"_string_encoded", outputCol= c+ "_one_hot") for c in strings_used]
# =============== IMPUTADORES ====================
from pyspark.ml.feature import Imputer
stage_imputer_double = Imputer(inputCols = numericals_double,
outputCols = numericals_double_imputed)
stage_imputer_int = Imputer(inputCols = numericals_int,
outputCols = numericals_int_imputed)
# ============= VECTOR ASESEMBLER ================
from pyspark.ml.feature import VectorAssembler
features = numericals_double_imputed + \
numericals_int_imputed
# + [var + "_one_hot" for var in strings_used]
stage_assembler = VectorAssembler(inputCols = features, outputCol= "assem_features")
# ==================== SCALER =======================
from pyspark.ml.feature import StandardScaler
stage_scaler = StandardScaler(inputCol= stage_assembler.getOutputCol(),
outputCol="scaled_features", withStd=True, withMean=True)
# ================== PIPELINE ===================
#stage_string + stage_one_hot + [ # Categorical Data
stages= [stage_imputer_double,
stage_imputer_int, # Data Imputation
stage_assembler, # Assembling data
stage_scaler]
## Tenemos que regesar el df porque las variables int las combierte en double
print(stages)
return stages , df
def get_models_params_dic():
stage_pca = PCA(k = 15,inputCol = "scaled_features",
outputCol = "features")
lr = LogisticRegression()
lr_paramGrid = ParamGridBuilder() \
.addGrid(stage_pca.k, [1]) \
.addGrid(lr.maxIter, [1]) \
.build()
dt = DecisionTreeClassifier()
dt_paramGrid = ParamGridBuilder() \
.addGrid(stage_pca.k, [1]) \
.addGrid(dt.maxDepth, [2]) \
.build()
paramGrid_dic= {"LR":lr_paramGrid,"DT":dt_paramGrid}
model_dic = {"LR":lr,"DT":dt}
return model_dic,paramGrid_dic
def prepare_data(df):
data_types = get_data_types(df)
ignore = ignore_list(df, data_types)
illegal = [s for s in df.columns if "del" in s]
extra_illegal = ['cancelled', 'rangoatrasohoras']
legal = [var for var in df.columns if (var not in ignore and var not in illegal and var not in extra_illegal)]
lista_objetivos = df.select('rangoatrasohoras').distinct().rdd.map(lambda r: r[0]).collect()
df = imputa_categoricos(df, ignore,data_types)
df_legal = df[legal]
y = df[['rangoatrasohoras']]
df_legal = df_legal.withColumn("id", monotonically_increasing_id())
y = y.withColumn("id", monotonically_increasing_id())
stages, df_new = create_pipeline(df_legal, ignore)
df_junto = df_new.join(y, "id", "outer").drop("id")
return stages,df_junto
def run_model(objetivo, model_name, hyperparams, luigi= False, test_split = 0.2):
df = get_data(False)
# Drop constant variables
cnt = df.agg(*(f.countDistinct(c).alias(c) for c in df.columns)).first()
df = df.drop(*[c for c in cnt.asDict() if cnt[c] == 1])
first_stages,df = prepare_data(df)
df = df.withColumn("label", when(df.rangoatrasohoras == objetivo, 1.0).otherwise(0.0))
# Selecciona el modelo
model_dic, paramGrid_dic = get_models_params_dic()
clr_model = model_dic[model_name]
# Parametros especificos
num_it = int(hyperparams["iter"])
if num_it > 0 and model_name == "LR":
clr_model.setMaxIter(num_it)
# Adds new stages
num_pca = int(hyperparams["pca"])
if num_pca > 0:
stage_pca = PCA(k = num_pca,inputCol = "scaled_features",
outputCol = "features")
else:
stage_pca = PCA(k = 8,inputCol = "scaled_features",
outputCol = "features")
# Checar que no se haya corrido este modelo
print("Modelo evaluado: ", clr_model, "con params: ", clr_model.explainParams())
# Creates Pipeline
pipeline = Pipeline(stages= first_stages + [stage_pca, clr_model])
df_train, df_test = df.randomSplit([(1-test_split),test_split ], 123)
start = time.time()
cvModel = pipeline.fit(df_train)
end = time.time()
prediction = cvModel.transform(df_test)
log = evaluate(prediction)
#Guarda en s3
save_upload(cvModel, objetivo, model_name, hyperparams)
# Sube metadatos a RDS
# --- Metadata -----
train_time = end - start
train_nrows = df_train.count()
# -------------------
add_meta_data(objetivo, model_name,hyperparams, log,train_time, test_split, train_nrows)
# Guarda en RDS predictions test para fairness y bias
save_train_predictions(prediction, objetivo, model_name, hyperparams)
return prediction, df_test
def save_train_predictions(prediction, objetivo, model_name, hyperparams):
s3_name = parse_filename(objetivo, model_name, hyperparams)
s3_name = s3_name[2:]
vars_bias = ['dayofmonth', 'flight_number_reporting_airline', 'prediction', 'originwac', 'label', 'distance']
df_bias = prediction.select([c for c in prediction.columns if c in vars_bias])
df_bias=df_bias.withColumnRenamed("prediction", "score").withColumnRenamed("label", "label_value")
df_bias = df_bias.withColumn('s3_name', lit(s3_name))
df_bias = df_bias.withColumn('aux', f.when(f.col('dayofmonth') < 9, "0").otherwise(""))
df_bias = df_bias.withColumn('fecha', concat(lit("2019"), lit("12"), col('aux'), col('dayofmonth')))
vars_bias = ['flight_number_reporting_airline', 'prediction',
'originwac', 'label_value', 'distance', 'score',
's3_name', 'fecha']
df_bias = df_bias.select([c for c in df_bias.columns if c in vars_bias])
df_pandas = df_bias.toPandas()
save_rds_pandas(df_pandas, "predictions.train")
def evaluate(predictionAndLabels):
log = {}
# Show Validation Score (AUROC)
evaluator = BinaryClassificationEvaluator(metricName='areaUnderROC')
log['AUROC'] = "%f" % evaluator.evaluate(predictionAndLabels)
print("Area under ROC = {}".format(log['AUROC']))
# Show Validation Score (AUPR)
evaluator = BinaryClassificationEvaluator(metricName='areaUnderPR')
log['AUPR'] = "%f" % evaluator.evaluate(predictionAndLabels)
print("Area under PR = {}".format(log['AUPR']))
# Metrics
predictionRDD = predictionAndLabels.select(['label', 'prediction']) \
.rdd.map(lambda line: (line[1], line[0]))
metrics = MulticlassMetrics(predictionRDD)
# Confusion Matrix
print(metrics.confusionMatrix().toArray())
# Overall statistics
log['precision'] = "%s" % metrics.precision()
log['recall'] = "%s" % metrics.recall()
log['F1 Measure'] = "%s" % metrics.fMeasure()
print("[Overall]\tprecision = %s | recall = %s | F1 Measure = %s" % \
(log['precision'], log['recall'], log['F1 Measure']))
# Statistics by class
labels = [0.0, 1.0]
for label in sorted(labels):
log[label] = {}
log[label]['precision'] = "%s" % metrics.precision(label)
log[label]['recall'] = "%s" % metrics.recall(label)
log[label]['F1 | |
self.percent_usage + 56
data[metric_param]['previous'] = self.percent_usage + 57
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(data, meta_data,
data_for_alerting)
try:
eval(mock_param).assert_called_with(
self.system_name, data[metric_param]['current'],
self.critical, meta_data['last_monitored'], self.critical,
self.parent_id, self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemBackUpAgainAlert",
autospec=True)
def test_system_back_up_no_alert(self, mock_system_back_up) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_system_back_up.assert_not_called()
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemBackUpAgainAlert",
autospec=True)
def test_system_back_up_alert(self, mock_system_back_up) -> None:
data_for_alerting = []
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._system_initial_alert_sent[
self.system_id][
GroupedSystemAlertsMetricCode.SystemIsDown.value] = True
data = self.data_received_initially_no_alert['result']['data']
data['went_down_at']['previous'] = self.last_monitored
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_system_back_up.assert_called_once_with(
self.system_name, self.info, self.last_monitored,
self.parent_id, self.system_id
)
# There are extra alerts due to initial start-up alerts
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.TimedTaskLimiter.reset",
autospec=True)
def test_system_back_up_timed_task_limiter_reset(self, mock_reset) -> None:
data_for_alerting = []
self.test_system_alerter._create_state_for_system(self.system_id)
# Set that the initial downtime alert was sent already
self.test_system_alerter._system_initial_alert_sent[
self.system_id][
GroupedSystemAlertsMetricCode.SystemIsDown.value] = True
data = self.data_received_initially_no_alert['result']['data']
data['went_down_at']['previous'] = self.last_monitored
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_reset.assert_called_once()
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
def test_system_went_down_at_no_alert_below_warning_threshold(
self, mock_system_is_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_not_called()
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
"""
These tests assume that critical_threshold_seconds >
warning_threshold_seconds
"""
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
def test_system_went_down_at_alert_above_warning_threshold(
self, mock_system_is_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['meta_data']['time'] = self.last_monitored + \
self.warning_threshold_seconds
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.warning, data['meta_data']['time'],
self.parent_id, self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
def test_system_went_down_at_alert_above_critical_threshold(
self, mock_system_is_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['meta_data']['time'] = self.last_monitored + \
self.critical_threshold_seconds
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.critical, data['meta_data']['time'],
self.parent_id, self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemStillDownAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system.TimedTaskLimiter.last_time_that_did_task",
autospec=True)
def test_system_went_down_at_alert_above_warning_threshold_then_no_critical_repeat(
self, mock_last_time_did_task, mock_system_is_down,
mock_system_still_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
past_warning_time = self.last_monitored + self.warning_threshold_seconds
mock_last_time_did_task.return_value = past_warning_time
data['meta_data']['time'] = past_warning_time
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(
data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.warning, past_warning_time,
self.parent_id, self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data['meta_data'][
'time'] = past_warning_time + self.critical_repeat_seconds - 1
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(
data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.warning, past_warning_time,
self.parent_id, self.system_id
)
mock_system_still_down.assert_not_called()
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemStillDownAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system.TimedTaskLimiter.last_time_that_did_task",
autospec=True)
def test_system_went_down_at_alert_above_warning_threshold_then_critical_repeat(
self, mock_last_time_did_task, mock_system_is_down,
mock_system_still_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
past_warning_time = self.last_monitored + self.warning_threshold_seconds
mock_last_time_did_task.return_value = past_warning_time
data['meta_data']['time'] = past_warning_time
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.warning, past_warning_time,
self.parent_id, self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data['meta_data'][
'time'] = past_warning_time + self.critical_repeat_seconds
downtime = int(data['meta_data']['time'] - self.last_monitored)
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.warning, past_warning_time,
self.parent_id, self.system_id
)
mock_system_still_down.assert_called_once_with(
self.system_name, downtime, self.critical,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(3, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemStillDownAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system.TimedTaskLimiter.last_time_that_did_task",
autospec=True)
def test_system_went_down_at_alert_above_critical_threshold_then_no_critical_repeat(
self, mock_last_time_did_task, mock_system_is_down,
mock_system_still_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
past_critical_time = self.last_monitored + \
self.critical_threshold_seconds
mock_last_time_did_task.return_value = past_critical_time
data['meta_data']['time'] = past_critical_time
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.critical, past_critical_time,
self.parent_id, self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data['meta_data'][
'time'] = past_critical_time + self.critical_repeat_seconds - 1
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.critical, past_critical_time,
self.parent_id, self.system_id
)
mock_system_still_down.assert_not_called()
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.SystemStillDownAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.SystemWentDownAtAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system.TimedTaskLimiter.last_time_that_did_task",
autospec=True)
def test_system_went_down_at_alert_above_warning_threshold_then_critical_repeat(
self, mock_last_time_did_task, mock_system_is_down,
mock_system_still_down) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
past_critical_time = self.last_monitored + \
self.critical_threshold_seconds
mock_last_time_did_task.return_value = past_critical_time
data['meta_data']['time'] = past_critical_time
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.critical, past_critical_time,
self.parent_id, self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data['meta_data'][
'time'] = past_critical_time + self.critical_repeat_seconds
downtime = int(data['meta_data']['time'] - self.last_monitored)
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(
data, data_for_alerting)
try:
mock_system_is_down.assert_called_once_with(
self.system_name, self.critical, past_critical_time,
self.parent_id, self.system_id
)
mock_system_still_down.assert_called_once_with(
self.system_name, downtime, self.critical,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
"""
Testing error alerts of MetricNotFound and InvalidURL
"""
@mock.patch("src.alerter.alerters.system.MetricNotFoundErrorAlert",
autospec=True)
def test_process_errors_metric_not_found_alert(self, mock_alert) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5003
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.MetricFoundAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.MetricNotFoundErrorAlert",
autospec=True)
def test_process_error_metric_not_found_alert_metric_found_alert(self,
mock_alert_not_found,
mock_alert_found) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5003
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_not_found.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data = self.data_received_error_data['error']
data['code'] = 600000000 # This code doesn't exist
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_found.assert_called_once_with(
self.system_name, "Metrics have been found!", self.info,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.MetricFoundAlert",
autospec=True)
@mock.patch("src.alerter.alerters.system.MetricNotFoundErrorAlert",
autospec=True)
def test_process_error_metric_not_found_alert_process_result_metric_found_alert(
self,
mock_alert_not_found, mock_alert_found) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5003
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_not_found.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._process_results(data, meta_data,
data_for_alerting)
try:
mock_alert_found.assert_called_once_with(
self.system_name, "Metrics have been found!", self.info,
meta_data['last_monitored'], self.parent_id,
self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.InvalidUrlAlert", autospec=True)
def test_process_errors_invalid_url_alert(self, mock_alert) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5009
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.ValidUrlAlert", autospec=True)
@mock.patch("src.alerter.alerters.system.InvalidUrlAlert", autospec=True)
def test_process_errors_invalid_url_alert_then_valid_url_alert(self,
mock_alert_invalid,
mock_alert_valid) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5009
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_invalid.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data = self.data_received_error_data['error']
data['code'] = 600000000 # This code doesn't exist
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_valid.assert_called_once_with(
self.system_name, "Url is valid!", self.info,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.alerter.alerters.system.ValidUrlAlert", autospec=True)
@mock.patch("src.alerter.alerters.system.InvalidUrlAlert", autospec=True)
def test_process_errors_invalid_url_alert_then_process_results_valid_url_alert(
self,
mock_alert_invalid, mock_alert_valid) -> None:
data_for_alerting = []
data = self.data_received_error_data['error']
data['code'] = 5009
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_errors(data, data_for_alerting)
try:
mock_alert_invalid.assert_called_once_with(
self.system_name, data['message'], self.error,
data['meta_data']['time'], self.parent_id,
self.system_id
)
self.assertEqual(1, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._process_results(data, meta_data,
data_for_alerting)
try:
mock_alert_valid.assert_called_once_with(
self.system_name, "Url is valid!", self.info,
meta_data['last_monitored'], self.parent_id,
self.system_id
)
self.assertEqual(2, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(SystemAlerter, "_classify_alert")
def test_alerts_warning_alerts_disabled_metric_above_warning_threshold(
self, mock_classify_alert) -> None:
data_for_alerting = []
data = self.data_received_initially_warning_alert['result']['data']
meta_data = self.data_received_initially_warning_alert['result'][
'meta_data']
self.test_system_alerter_warnings_disabled._create_state_for_system(
self.system_id)
self.test_system_alerter_warnings_disabled._process_results(
data, meta_data, data_for_alerting)
try:
self.assertEqual(4, mock_classify_alert.call_count)
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@parameterized.expand([
('open_file_descriptors', 'mock_ofd_increase'),
('system_cpu_usage', 'mock_cpu_usage_increase'),
('system_ram_usage', 'mock_ram_usage_increase'),
('system_storage_usage', 'mock_storage_usage_increase'),
])
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageDecreasedBelowThresholdAlert",
autospec=True)
def test_warning_alerts_disabled_increase_above_warning_threshold_no_alerts_occur(
self, metric_param, mock_param, mock_storage_usage_decrease,
mock_storage_usage_increase, mock_ram_usage_decrease,
mock_ram_usage_increase, mock_cpu_usage_decrease,
mock_cpu_usage_increase, mock_ofd_decrease,
mock_ofd_increase) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
data[metric_param]['current'] = self.percent_usage + 46
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter_warnings_disabled._create_state_for_system(
self.system_id)
self.test_system_alerter_warnings_disabled._process_results(
data, meta_data, data_for_alerting)
try:
mock_storage_usage_decrease.assert_not_called()
mock_ram_usage_decrease.assert_not_called()
mock_cpu_usage_decrease.assert_not_called()
mock_ofd_decrease.assert_not_called()
eval(mock_param).assert_not_called()
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(SystemAlerter, "_classify_alert")
def test_alerts_critical_alerts_disabled_metric_above_critical_threshold(
self, mock_classify_alert) -> None:
data_for_alerting = []
data = self.data_received_initially_warning_alert['result']['data']
meta_data = self.data_received_initially_warning_alert['result'][
'meta_data']
self.test_system_alerter_critical_disabled._create_state_for_system(
self.system_id)
self.test_system_alerter_critical_disabled._process_results(
data, meta_data, data_for_alerting)
try:
self.assertEqual(4, mock_classify_alert.call_count)
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@parameterized.expand([
('open_file_descriptors', 'mock_ofd_increase'),
('system_cpu_usage', 'mock_cpu_usage_increase'),
('system_ram_usage', 'mock_ram_usage_increase'),
('system_storage_usage', 'mock_storage_usage_increase'),
])
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageDecreasedBelowThresholdAlert",
autospec=True)
def test_critical_alerts_disabled_increase_above_critical_threshold_warning_alert(
self, metric_param, mock_param, mock_storage_usage_decrease,
mock_storage_usage_increase, mock_ram_usage_decrease,
mock_ram_usage_increase, mock_cpu_usage_decrease,
mock_cpu_usage_increase, mock_ofd_decrease,
mock_ofd_increase) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
data[metric_param]['current'] = self.percent_usage + 56
data[metric_param]['previous'] = self.percent_usage + 46
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter_critical_disabled._create_state_for_system(
self.system_id)
self.test_system_alerter_critical_disabled._process_results(
data, meta_data, data_for_alerting)
try:
eval(mock_param).assert_called_once_with(
self.system_name, data[metric_param]['current'],
self.warning, meta_data['last_monitored'],
self.warning, self.parent_id, self.system_id
)
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@parameterized.expand([
('open_file_descriptors', 'mock_ofd_increase'),
('system_cpu_usage', 'mock_cpu_usage_increase'),
('system_ram_usage', 'mock_ram_usage_increase'),
('system_storage_usage', 'mock_storage_usage_increase'),
])
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsIncreasedAboveThresholdAlert",
autospec=True)
| |
import imgaug.augmenters as iaa
>>> crop = iaa.CenterCropToFixedSize(height=20, width=10)
Create an augmenter that takes ``20x10`` sized crops from the center of
images.
"""
# Added in 0.4.0.
def __init__(self, width, height,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CenterCropToFixedSize, self).__init__(
width=width, height=height, position="center",
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class CropToMultiplesOf(CropToFixedSize):
"""Crop images down until their height/width is a multiple of a value.
.. note::
For a given axis size ``A`` and multiple ``M``, if ``A`` is in the
interval ``[0 .. M]``, the axis will not be changed.
As a result, this augmenter can still produce axis sizes that are
not multiples of the given values.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_multiple : int or None
Multiple for the width. Images will be cropped down until their
width is a multiple of this value.
If ``None``, image widths will not be altered.
height_multiple : int or None
Multiple for the height. Images will be cropped down until their
height is a multiple of this value.
If ``None``, image heights will not be altered.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that crops images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_multiple, height_multiple, position="uniform",
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CropToMultiplesOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_multiple, self.height_multiple, self.position]
class CenterCropToMultiplesOf(CropToMultiplesOf):
"""Crop images equally on all sides until H/W are multiples of given values.
This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`,
but uses ``position="center"`` by default, which spreads the crop amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads
them randomly.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_multiple : int or None
See :func:`CropToMultiplesOf.__init__`.
height_multiple : int or None
See :func:`CropToMultiplesOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that crops images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_multiple, height_multiple,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CenterCropToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
position="center",
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class PadToMultiplesOf(PadToFixedSize):
"""Pad images until their height/width is a multiple of a value.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_multiple : int or None
Multiple for the width. Images will be padded until their
width is a multiple of this value.
If ``None``, image widths will not be altered.
height_multiple : int or None
Multiple for the height. Images will be padded until their
height is a multiple of this value.
If ``None``, image heights will not be altered.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that pads images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(PadToMultiplesOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_multiple, self.height_multiple,
self.pad_mode, self.pad_cval,
self.position]
class CenterPadToMultiplesOf(PadToMultiplesOf):
"""Pad images equally on all sides until H/W are multiples of given values.
This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but
uses ``position="center"`` by default, which spreads the pad amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them
randomly.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_multiple : int or None
See :func:`PadToMultiplesOf.__init__`.
height_multiple : int or None
See :func:`PadToMultiplesOf.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation | |
@_int_property_decorator
def games_pinch_hitter(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a pinch hitter.
"""
return self._games_pinch_hitter
@_int_property_decorator
def games_pinch_runner(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a pinch runner.
"""
return self._games_pinch_runner
@_int_property_decorator
def wins(self):
"""
Returns an ``int`` of the number of games the player has won as a
pitcher.
"""
return self._wins
@_int_property_decorator
def losses(self):
"""
Returns an ``int`` of the number of games the player has lost as a
pitcher.
"""
return self._losses
@_float_property_decorator
def win_percentage(self):
"""
Returns a ``float`` of the players winning percentage as a pitcher.
Percentage ranges from 0-1.
"""
return self._win_percentage
@_float_property_decorator
def era(self):
"""
Returns a ``float`` of the pitcher's Earned Runs Average.
"""
return self._era
@_int_property_decorator
def games_finished(self):
"""
Returns an ``int`` of the number of games the player finished as a
pitcher.
"""
return self._games_finished
@_int_property_decorator
def shutouts(self):
"""
Returns an ``int`` of the number of times the player did not allow any
runs and threw a complete game as a pitcher.
"""
return self._shutouts
@_int_property_decorator
def saves(self):
"""
Returns an ``int`` of the number of saves the player made as a pitcher.
"""
return self._saves
@_int_property_decorator
def hits_allowed(self):
"""
Returns an ``int`` of the number of hits the player allowed as a
pitcher.
"""
return self._hits_allowed
@_int_property_decorator
def runs_allowed(self):
"""
Returns an ``int`` of the number of runs the player allowed as a
pitcher.
"""
return self._runs_allowed
@_int_property_decorator
def earned_runs_allowed(self):
"""
Returns an ``int`` of the number of earned runs the player allowed as a
pitcher.
"""
return self._earned_runs_allowed
@_int_property_decorator
def home_runs_allowed(self):
"""
Returns an ``int`` of the number of home runs a player has allowed as a
pitcher.
"""
return self._home_runs_allowed
@_int_property_decorator
def bases_on_balls_given(self):
"""
Returns an ``int`` of the number of bases on balls the player has given
as a pitcher.
"""
return self._bases_on_balls_given
@_int_property_decorator
def intentional_bases_on_balls_given(self):
"""
Returns an ``int`` of the number of bases the player has intentionally
given as a pitcher.
"""
return self._intentional_bases_on_balls_given
@_int_property_decorator
def strikeouts(self):
"""
Returns an ``int`` of the number of strikeouts the player threw as a
pitcher.
"""
return self._strikeouts
@_int_property_decorator
def times_hit_player(self):
"""
Returns an ``int`` of the number of times the pitcher hit a player with
a pitch.
"""
return self._times_hit_player
@_int_property_decorator
def balks(self):
"""
Returns an ``int`` of the number of times the pitcher balked.
"""
return self._balks
@_int_property_decorator
def wild_pitches(self):
"""
Returns an ``int`` of the number of wild pitches the player has thrown.
"""
return self._wild_pitches
@_float_property_decorator
def era_plus(self):
"""
Returns a ``float`` of the pitcher's ERA while adjusted for the
ballpark.
"""
return self._era_plus
@_float_property_decorator
def fielding_independent_pitching(self):
"""
Returns a ``float`` of the pitcher's effectiveness at preventing home
runs, bases on balls, and hitting players with pitches, while causing
strikeouts.
"""
return self._fielding_independent_pitching
@_float_property_decorator
def whip(self):
"""
Returns a ``float`` of the pitcher's WHIP score, equivalent to (bases
on balls + hits) / innings played.
"""
return self._whip
@_float_property_decorator
def hits_against_per_nine_innings(self):
"""
Returns a ``float`` of the number of hits the player has given per nine
innings played.
"""
return self._hits_against_per_nine_innings
@_float_property_decorator
def home_runs_against_per_nine_innings(self):
"""
Returns a ``float`` of the number of home runs the pitcher has given
per nine innings played.
"""
return self._home_runs_against_per_nine_innings
@_float_property_decorator
def bases_on_balls_given_per_nine_innings(self):
"""
Returns a ``float`` of the number of bases on balls the pitcher has
given per nine innings played.
"""
return self._bases_on_balls_given_per_nine_innings
@_float_property_decorator
def batters_struckout_per_nine_innings(self):
"""
Returns a ``float`` of the number of batters the pitcher has struck out
per nine innings played.
"""
return self._batters_struckout_per_nine_innings
@_float_property_decorator
def strikeouts_thrown_per_walk(self):
"""
Returns a ``float`` of the number of batters the pitcher has struck out
per the number of walks given.
"""
return self._strikeouts_thrown_per_walk
class Roster:
"""
Get stats for all players on a roster.
Request a team's roster for a given season and create instances of the
Player class for each player, containing a detailed list of the players
statistics and information.
Parameters
----------
team : string
The team's abbreviation, such as 'HOU' for the Houston Astros.
year : string (optional)
The 4-digit year to pull the roster from, such as '2018'. If left
blank, defaults to the most recent season.
slim : boolean (optional)
Set to True to return a limited subset of player information including
the name and player ID for each player as opposed to all of their
respective stats which greatly reduces the time to return a response if
just the names and IDs are desired. Defaults to False.
"""
def __init__(self, team, year=None, slim=False):
self._team = team
self._slim = slim
if slim:
self._players = {}
else:
self._players = []
self._find_players(year)
def _pull_team_page(self, url):
"""
Download the team page.
Download the requested team's season page and create a PyQuery object.
Parameters
----------
url : string
A string of the built URL for the requested team and season.
Returns
-------
PyQuery object
Returns a PyQuery object of the team's HTML page.
"""
try:
return pq(url)
except HTTPError:
return None
def _create_url(self, year):
"""
Build the team URL.
Build a URL given a team's abbreviation and the 4-digit year.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
Returns
-------
string
Returns a string of the team's season page for the requested team
and year.
"""
return ROSTER_URL % (self._team.upper(), year)
def _get_id(self, player):
"""
Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID.
"""
name_tag = player('td[data-stat="player"] a')
name = re.sub(r'.*/players/./', '', str(name_tag))
return re.sub(r'\.shtml.*', '', name)
def _get_name(self, player):
"""
Parse the player's name.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player's name.
"""
name_tag = player('td[data-stat="player"] a')
return name_tag.text()
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 4-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('mlb')
# If stats for the requested season do not exist yet (as is the
# case right before a new season begins), attempt to pull the
# previous year's stats. If it exists, use the previous year
# instead.
if not utils._url_exists(self._create_url(year)) and \
utils._url_exists(self._create_url(str(int(year) - 1))):
year = str(int(year) - 1)
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the following "
"URL exists: %s" % url)
raise ValueError(output)
players = page('table#team_batting tbody tr').items()
players_parsed = []
for player in players:
if 'class="thead"' in str(player):
continue
player_id = self._get_id(player)
if self._slim:
name = self._get_name(player)
self._players[player_id] = name
else:
player_instance = Player(player_id)
self._players.append(player_instance)
players_parsed.append(player_id)
for player in page('table#team_pitching tbody tr').items():
if 'class="thead"' in str(player):
continue
player_id = self._get_id(player)
# Skip players that showup in both batting and pitching tables, as
# is often the case with National League pitchers.
if player_id in players_parsed:
continue
if self._slim:
name = self._get_name(player)
self._players[player_id] = name
else:
player_instance = Player(player_id)
self._players.append(player_instance)
@property
def players(self):
"""
Returns a ``list`` of | |
not None and type(node.content) is not str:
msg = f'Node "{node.name}" content should be type "{TYPE_STR}", not "{type(node.content)}"'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.CONTENT_EXPECTED_STRING,
msg,
node,
type(node.content),
)
)
if node.content is not None:
try:
node.content.encode(encoding="utf-8", errors="strict")
except UnicodeError as ex:
msg = f'Node "{node.name}" content contains non-unicode character(s)'
if errs is None:
raise StrContentUnicodeError(msg)
else:
errs.append(
(
ValidationError.CONTENT_EXPECTED_STRING,
msg,
node,
type(node.content),
)
)
@staticmethod
def _validate_time_content(node: Node, errs: list = None):
val = node.content
if val is not None and not Rule.is_time(val):
msg = f'Node "{node.name}" format should be time ("HH:MM:SS" or "HH:MM:SS.f")'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.CONTENT_EXPECTED_TIME_FORMAT,
msg,
node,
node.content,
)
)
@staticmethod
def _validate_uri_content(node: Node, errs: list = None):
uri = node.content
if uri is not None and not Rule.is_uri(uri):
msg = f'Node "{node.name}" uri content "{uri}" is not valid'
if errs is None:
raise ContentExpectedUriError(msg)
else:
errs.append(
(
ValidationError.CONTENT_EXPECTED_URI,
msg,
node,
node.content,
)
)
@staticmethod
def _validate_yeardate_content(node: Node, errs: list = None):
val = node.content
if val is not None and not Rule.is_yeardate(val):
msg = f'Node "{node.name}" format should be year ("YYYY") or date ("YYYY-MM-DD")'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.CONTENT_EXPECTED_YEAR_FORMAT,
msg,
node,
node.content,
)
)
def _validate_attributes(self, node: Node, errs: list = None) -> None:
"""
Validates node attributes for rule compliance.
Iterates through the dict of attribute rules and validates whether
the node instance complies with the rule.
Args:
node: Node instance to be validated
Returns:
None
Raises:
MetapypeRuleError: Illegal attribute or missing required attribute
"""
for attribute in self._attributes:
required = self._attributes[attribute][0]
# Test for required attributes
if required and attribute not in node.attributes:
msg = f'"{attribute}" is a required attribute of node "{node.name}"'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.ATTRIBUTE_REQUIRED,
msg,
node,
attribute,
)
)
for attribute in node.attributes:
# Test for non-allowed attribute
if attribute not in self._attributes:
msg = f'"{attribute}" is not a recognized attribute of node "{node.name}"'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.ATTRIBUTE_UNRECOGNIZED,
msg,
node,
attribute,
)
)
else:
# Test for enumerated list of allowed values
if (
len(self._attributes[attribute]) > 1
and node.attributes[attribute]
not in self._attributes[attribute][1:]
):
msg = f'Node "{node.name}" attribute "{attribute}" must be one of the following: "{self._attributes[attribute][1:]}"'
if errs is None:
raise MetapypeRuleError(msg)
else:
errs.append(
(
ValidationError.ATTRIBUTE_EXPECTED_ENUM,
msg,
node,
attribute,
self._attributes[attribute][1:],
)
)
def _validate_children(self, node: Node, is_mixed_content: bool, errs: list = None) -> None:
"""
Validates node children for rule compliance.
1. Ignores validation of children if parent node is "metadata"
2. Ensures children are valid for node
3. Iterates through the list children rules and validates whether
the node instance complies with the rules.
Returns:
None
Raises:
MetapypeRuleError: Illegal child, bad sequence or choice, missing
child, or wrong child cardinality
"""
self._node = node
self._node_children_names = []
self._node_index = 0
if self._node.name == names.METADATA:
# Metadata nodes may contain any type of child node, but only one such node
if len(self._node.children) > 1:
msg = f"Maximum occurrence of 1 child exceeded in parent '{names.METADATA}'"
if errs is None:
raise MaxOccurrenceExceededError(msg)
else:
errs.append((ValidationError.MAX_OCCURRENCE_EXCEEDED, msg, self._node))
else:
for node_child in self._node.children:
self._node_children_names.append(node_child.name)
# Test for non-valid children
for node_child_name in self._node_children_names:
if not self.is_allowed_child(node_child_name):
msg = f"Child '{node_child_name}' not allowed in parent '{self._node.name}'"
if errs is None:
raise ChildNotAllowedError(msg)
else:
errs.append((ValidationError.CHILD_NOT_ALLOWED, msg, self._node, node_child_name))
if len(self._children) > 0:
# Begin validation of children
modality = Rule._get_children_modality(self._children)
if modality == "sequence":
self._validate_sequence(self._children, is_mixed_content, errs)
else:
self._validate_choice(self._children, is_mixed_content, errs)
if self._node_index != len(self._node_children_names):
msg = (
f"Child '{self._node_children_names[self._node_index]}' "
f"is not allowed in this position for parent '{self._node.name}'"
)
if errs is None:
raise ChildNotAllowedError(msg)
else:
errs.append(
(
ValidationError.CHILD_NOT_ALLOWED,
msg,
self._node,
self._node_children_names[self._node_index],
)
)
def _validate_sequence(self, rule_children: list, is_mixed_content: bool, errs: list = None):
for rule_child in rule_children:
if Rule._get_children_modality(rule_child) == "choice":
self._validate_choice(rule_child, is_mixed_content, errs)
else:
self._validate_rule_child(rule_child, False, errs)
def _validate_choice(self, rule_children: list, is_mixed_content: bool, errs: list = None):
choice_min = rule_children[-2]
choice_max = rule_children[-1]
choice_occurrence = 0
while (
self._node_index < len(self._node_children_names) and
self._node_children_names[self._node_index] in self._get_rule_children_names(rule_children)
):
for rule_child in rule_children[:-2]:
if self._node_index == len(self._node_children_names):
break
modality = Rule._get_children_modality(rule_child)
if modality == "sequence":
if self._node_children_names[self._node_index] in self._get_rule_children_names(rule_child):
self._validate_sequence(rule_child, is_mixed_content, errs)
choice_occurrence += 1
elif modality == "choice":
if self._node_children_names[self._node_index] in self._get_rule_children_names(rule_child):
self._validate_choice(rule_child, is_mixed_content, errs)
choice_occurrence += 1
else:
if self._node_children_names[self._node_index] == rule_child[0]:
self._validate_rule_child(rule_child, True, errs)
choice_occurrence += 1
if choice_max is not INFINITY and choice_occurrence > choice_max:
msg = f"Maximum occurrence of '{choice_max}' exceeded for choice in parent '{self._node.name}'"
if errs is None:
raise MaxOccurrenceExceededError(msg)
else:
errs.append(
(
ValidationError.MAX_CHOICE_EXCEEDED,
msg,
self._node,
self._node.name,
choice_max,
)
)
if choice_occurrence < choice_min and not is_mixed_content:
msg = f"Minimum occurrence of '{choice_min}' not met for choice in parent '{self._node.name}'"
if errs is None:
raise MinOccurrenceUnmetError(msg)
else:
errs.append(
(
ValidationError.MIN_CHOICE_UNMET,
msg,
self._node,
self._node.name,
choice_min,
)
)
def _validate_rule_child(self, rule_child: list, limit_max: bool, errs: list = None):
rule_child_name = rule_child[0]
rule_child_min = rule_child[-2]
rule_child_max = rule_child[-1]
occurrence = 0
while (
self._node_index < len(self._node_children_names) and
rule_child_name == self._node_children_names[self._node_index]
):
occurrence += 1
self._node_index += 1
if limit_max and occurrence == rule_child_max:
return None
if rule_child_max is not INFINITY and occurrence > rule_child_max:
msg = (
f"Maximum occurrence of '{rule_child_max}' "
f"exceeded for child '{self._node_children_names[self._node_index]}' in parent "
f"'{self._node.name}'"
)
if errs is None:
raise MaxOccurrenceExceededError(msg)
else:
errs.append(
(
ValidationError.MAX_OCCURRENCE_EXCEEDED,
msg,
self._node,
None if self._node_index >= len(self._node_children_names)
else self._node_children_names[self._node_index],
rule_child_max,
)
)
if occurrence < rule_child_min:
msg = (
f"Minimum occurrence of '{rule_child_min}' "
f"not met for child '{rule_child_name}' in parent '{self._node.name}'"
)
if errs is None:
raise MinOccurrenceUnmetError(msg)
else:
errs.append(
(
ValidationError.MIN_OCCURRENCE_UNMET,
msg,
self._node,
rule_child_name,
# None if self._node_index >= len(self._node_children_names)
# else self._node_children_names[self._node_index],
rule_child_min,
)
)
@staticmethod
def _get_children_modality(rule_children: list) -> str:
if Rule._is_rule_child(rule_children):
mode = "child_rule"
elif Rule._is_sequence(rule_children):
mode = "sequence"
elif Rule._is_choice(rule_children):
mode = "choice"
else:
msg = f"Unknown modality for {rule_children}"
raise ValueError(msg)
return mode
@staticmethod
def _is_rule_child(rule_children: list) -> bool:
# of the form ["str", int, int|None]
is_rule_child = False
if isinstance(rule_children[0], str):
is_rule_child = True
return is_rule_child
@staticmethod
def _is_sequence(rule_children: list) -> bool:
# of the form [["str", int, int|None]...[]]]
is_sequence = False
if len(rule_children) >= 1 and isinstance(rule_children[-1], list):
is_sequence = True
return is_sequence
@staticmethod
def _is_choice(rule_children: list) -> bool:
# of the form [["str", int, int|None]...[], int, int|None]]
is_choice = False
if len(rule_children) >= 3 and (isinstance(rule_children[0], list) and isinstance(rule_children[-2], int)):
is_choice = True
return is_choice
@staticmethod
def _get_rule_children_names(children: list) -> list:
children_names = []
if len(children) > 0:
modality = Rule._get_children_modality(children)
if modality == "choice":
children_names += Rule._get_rule_children_names(children[:-2])
elif modality == "sequence":
for child in children:
children_names += Rule._get_rule_children_names(child)
else:
children_names.append(children[0])
return children_names
@property
def name(self):
return self._name
@property
def attributes(self):
return self._attributes
@property
def children(self):
return self._children
@property
def content_rules(self):
return self._content["content_rules"]
@property
def content_enum(self):
if self.has_enum_content():
return self._content["content_enum"]
else:
return []
# Named constants for EML metadata rules
RULE_ACCESS = "accessRule"
RULE_ACCURACY = "accuracyRule"
RULE_ADDITIONALMETADATA = "additionalMetadataRule"
RULE_ADDRESS = "addressRule"
RULE_ALLOW = "allowRule"
RULE_ALTERNATEIDENTIFIER = "alternateIdentifierRule"
RULE_ALTITUDEUNITS = "altitudeUnitsRule"
RULE_ANNOTATION = "annotationRule"
RULE_ANYINT = "anyIntRule"
RULE_ANYNAME = "anyNameRule"
RULE_ANYSTRING = "anyStringRule"
RULE_ANYURI = "anyURIRule"
RULE_ATTRIBUTE = "attributeRule"
RULE_ATTRIBUTELIST = "attributeListRule"
RULE_AUTHENTICATION = "authenticationRule"
RULE_AWARD = "awardRule"
RULE_BINARYRASTER_FORMAT = "binaryRasterFormatRule"
RULE_BOUNDINGALTITUDES = "boundingAltitudesRule"
RULE_BOUNDINGCOORDINATE_EW = "boundingCoordinateRule_EW"
RULE_BOUNDINGCOORDINATE_NS = "boundingCoordinateRule_NS"
RULE_BOUNDINGCOORDINATES = "boundingCoordinatesRule"
RULE_BOUNDS = "boundsRule"
RULE_CODEDEFINITION = "codeDefinitionRule"
RULE_COMPLEX = "complexRule"
RULE_COVERAGE = "coverageRule"
RULE_DATAFORMAT = "dataFormatRule"
RULE_DATASET = "datasetRule"
RULE_DATASOURCE = "datasetRule"
RULE_DATATABLE = "dataTableRule"
RULE_DATETIME = "dateTimeRule"
RULE_DATETIMEDOMAIN = "dateTimeDomainRule"
RULE_DENY = "denyRule"
RULE_DESCRIPTOR = "descriptorRule"
RULE_DISTRIBUTION = "distributionRule"
RULE_EML = "emlRule"
RULE_ENTITYCODELIST = "entityCodeListRule"
RULE_ENUMERATEDDOMAIN = "enumeratedDomainRule"
RULE_EXTERNALCODESET = "externalCodeSetRule"
RULE_EXTERNALLYDEFINIEDFORMAT = "externallyDefinedFormatRule"
RULE_FUNDING = "fundingRule"
RULE_GEOGRAPHICCOVERAGE = "geographicCoverageRule"
RULE_INDIVIDUALNAME = "individualNameRule"
RULE_INTERVALRATIO = "intervalRatioRule"
RULE_KEYWORD = "keywordRule"
RULE_KEYWORDSET = "keywordSetRule"
RULE_KEYWORDTHESAURUS = "keywordThesaurusRule"
RULE_LICENSED = "licensedRule"
RULE_MAINTENANCE = "maintenanceRule"
RULE_MEASUREMENTSCALE = "measurementScaleRule"
RULE_METADATA = "metadataRule"
RULE_METHODS = "methodsRule"
RULE_METHODSTEP = "methodStepRule"
RULE_MINMAX = "minMaxRule"
RULE_MISSINGVALUECODE = "missingValueCodeRule"
RULE_MULTIBAND = "multiBandRule"
RULE_NOMINAL = "nominalOrdinalRule"
RULE_NONNEGATIVEFLOAT = "nonNegativeFloatRule"
RULE_NONNUMERICDOMAIN = "nonNumericDomainRule"
RULE_NUMERICDOMAIN = "numericDomainRule"
RULE_OFFLINE = "offlineRule"
RULE_ONLINE = "onlineRule"
RULE_ORDINAL = "nominalOrdinalRule"
RULE_OTHERENTITY = "otherEntityRule"
RULE_PERMISSION = "permissionRule"
RULE_PHONE = "phoneRule"
RULE_PHYSICAL = "physicalRule"
RULE_PRINCIPAL = "principalRule"
RULE_PROJECT = "projectRule"
RULE_PROPERTYURI = "propertyUriRule"
RULE_QUALITYCONTROL = "qualityControlRule"
RULE_QUANTITATIVEATTRIBUTEACCURACYASSESSMENT = (
"quantitativeAttributeAccuracyAssessmentRule"
)
RULE_RANGEOFDATES = "rangeOfDatesRule"
RULE_RATIO = "ratioRule"
RULE_REFERENCES = "referencesRule"
RULE_RELATEDPROJECT = "relatedProjectRule"
RULE_RESPONSIBLEPARTY = "responsiblePartyRule"
RULE_RESPONSIBLEPARTY_WITH_ROLE = "responsiblePartyWithRoleRule"
RULE_ROLE = "roleRule"
RULE_ROWCOLUMN = "rowColumnRule"
RULE_SAMPLING = "samplingRule"
RULE_SECTION = "sectionRule"
RULE_SINGLEDATETIME = | |
<gh_stars>10-100
import atexit
import csv
import ctypes.util
import importlib
import os
import platform
import re
import resource
import sys
import urllib.parse
import warnings
from collections.abc import Iterable
from hashlib import md5 as hashlib_md5
from marshal import dumps as marshal_dumps
from math import ceil as math_ceil
from os import getpid, listdir, mkdir
from os.path import abspath as _os_path_abspath
from os.path import dirname as _os_path_dirname
from os.path import expanduser as _os_path_expanduser
from os.path import expandvars as _os_path_expandvars
from os.path import join as _os_path_join
from os.path import relpath as _os_path_relpath
import cfdm
# import cPickle
import netCDF4
from numpy import __file__ as _numpy__file__
from numpy import __version__ as _numpy__version__
from numpy import all as _numpy_all
from numpy import allclose as _x_numpy_allclose
from numpy import array as _numpy_array
from numpy import ascontiguousarray as _numpy_ascontiguousarray
from numpy import integer as _numpy_integer
from numpy import isclose as _x_numpy_isclose
from numpy import ndim as _numpy_ndim
from numpy import shape as _numpy_shape
from numpy import sign as _numpy_sign
from numpy import size as _numpy_size
from numpy import take as _numpy_take
from numpy import tile as _numpy_tile
from numpy import where as _numpy_where
from numpy.ma import all as _numpy_ma_all
from numpy.ma import allclose as _numpy_ma_allclose
from numpy.ma import is_masked as _numpy_ma_is_masked
from numpy.ma import isMA as _numpy_ma_isMA
from numpy.ma import masked as _numpy_ma_masked
from numpy.ma import take as _numpy_ma_take
from psutil import Process, virtual_memory
from . import __file__, __version__, mpi_size
from .constants import (
CONSTANTS,
OperandBoundsCombination,
_file_to_fh,
_stash2standard_name,
)
from .docstring import _docstring_substitution_definitions
# Instruction to close /proc/mem at exit.
def _close_proc_meminfo():
try:
_meminfo_file.close()
except Exception:
pass
atexit.register(_close_proc_meminfo)
# --------------------------------------------------------------------
# Inherit classes from cfdm
# --------------------------------------------------------------------
class Constant(cfdm.Constant):
def __docstring_substitutions__(self):
return _docstring_substitution_definitions
def __docstring_package_depth__(self):
return 0
def __repr__(self):
"""Called by the `repr` built-in function."""
return super().__repr__().replace("<", "<CF ", 1)
class DeprecationError(Exception):
pass
KWARGS_MESSAGE_MAP = {
"relaxed_identity": "Use keywords 'strict' or 'relaxed' instead.",
"axes": "Use keyword 'axis' instead.",
"traceback": "Use keyword 'verbose' instead.",
"exact": "Use 're.compile' objects instead.",
"i": (
"Use keyword 'inplace' instead. Note that when inplace=True, "
"None is returned."
),
"info": (
"Use keyword 'verbose' instead. Note the informational levels "
"have been remapped: V = I + 1 maps info=I to verbose=V inputs, "
"excluding I >= 3 which maps to V = -1 (and V = 0 disables messages)"
),
}
# Are we running on GNU/Linux?
_linux = platform.system() == "Linux"
if _linux:
# ----------------------------------------------------------------
# GNU/LINUX
# ----------------------------------------------------------------
# Opening /proc/meminfo once per PE here rather than in
# _free_memory each time it is called works with MPI on
# Debian-based systems, which otherwise throw an error that there
# is no such file or directory when run on multiple PEs.
# ----------------------------------------------------------------
_meminfo_fields = set(("SReclaimable:", "Cached:", "Buffers:", "MemFree:"))
_meminfo_file = open("/proc/meminfo", "r", 1)
def _free_memory():
"""The amount of available physical memory on GNU/Linux.
This amount includes any memory which is still allocated but is no
longer required.
:Returns:
`float`
The amount of available physical memory in bytes.
**Examples:**
>>> _free_memory()
96496240.0
"""
# https://github.com/giampaolo/psutil/blob/master/psutil/_pslinux.py
# ----------------------------------------------------------------
# The available physical memory is the sum of the values of
# the 'SReclaimable', 'Cached', 'Buffers' and 'MemFree'
# entries in the /proc/meminfo file
# (http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/proc.txt).
# ----------------------------------------------------------------
free_KiB = 0.0
n = 0
# with open('/proc/meminfo', 'r', 1) as _meminfo_file:
# Seeking the beginning of the file /proc/meminfo regenerates
# the information contained in it.
_meminfo_file.seek(0)
for line in _meminfo_file:
field_size = line.split()
if field_size[0] in _meminfo_fields:
free_KiB += float(field_size[1])
n += 1
if n > 3:
break
free_bytes = free_KiB * 1024
return free_bytes
else:
# ----------------------------------------------------------------
# NOT GNU/LINUX
# ----------------------------------------------------------------
def _free_memory():
"""The amount of available physical memory.
:Returns:
`float`
The amount of available physical memory in bytes.
**Examples:**
>>> _free_memory()
96496240.0
"""
return float(virtual_memory().available)
# --- End: if
# TODODASK - deprecate 'collapse_parallel_mode' when move to dask is complete
def configuration(
atol=None,
rtol=None,
tempdir=None,
of_fraction=None,
chunksize=None,
collapse_parallel_mode=None,
free_memory_factor=None,
log_level=None,
regrid_logging=None,
relaxed_identities=None,
bounds_combination_mode=None,
):
"""View or set any number of constants in the project-wide
configuration.
The full list of global constants that can be set in any
combination are:
* `atol`
* `rtol`
* `tempdir`
* `of_fraction`
* `chunksize`
* `collapse_parallel_mode`
* `free_memory_factor`
* `log_level`
* `regrid_logging`
* `relaxed_identities`
* `bounds_combination_mode`
These are all constants that apply throughout cf, except for in
specific functions only if overridden by the corresponding keyword
argument to that function.
The value of `None`, either taken by default or supplied as a
value, will result in the constant in question not being changed
from the current value. That is, it will have no effect.
Note that setting a constant using this function is equivalent to
setting it by means of a specific function of the same name,
e.g. via `cf.atol`, but in this case multiple constants can be set
at once.
.. versionadded:: 3.6.0
.. seealso:: `atol`, `rtol`, `tempdir`, `of_fraction`,
`chunksize`, `collapse_parallel_mode`,
`total_memory`, `free_memory_factor`, `fm_threshold`,
`min_total_memory`, `log_level`, `regrid_logging`,
`relaxed_identities`, `bounds_combination_mode`
:Parameters:
atol: `float` or `Constant`, optional
The new value of absolute tolerance. The default is to not
change the current value.
rtol: `float` or `Constant`, optional
The new value of relative tolerance. The default is to not
change the current value.
tempdir: `str` or `Constant`, optional
The new directory for temporary files. Tilde expansion (an
initial component of ``~`` or ``~user`` is replaced by
that *user*'s home directory) and environment variable
expansion (substrings of the form ``$name`` or ``${name}``
are replaced by the value of environment variable *name*)
are applied to the new directory name.
The default is to not change the directory.
of_fraction: `float` or `Constant`, optional
The new fraction (between 0.0 and 1.0). The default is to
not change the current behaviour.
chunksize: `float` or `Constant`, optional
The new chunksize in bytes. The default is to not change
the current behaviour.
collapse_parallel_mode: `int` or `Constant`, optional
The new value (0, 1 or 2).
bounds_combination_mode: `str` or `Constant`, optional
Determine how to deal with cell bounds in binary
operations. See `cf.bounds_combination_mode` for details.
free_memory_factor: `float` or `Constant`, optional
The new value of the fraction of memory kept free as a
temporary workspace. The default is to not change the
current behaviour.
log_level: `str` or `int` or `Constant`, optional
The new value of the minimal log severity level. This can
be specified either as a string equal (ignoring case) to
the named set of log levels or identifier ``'DISABLE'``,
or an integer code corresponding to each of these, namely:
* ``'DISABLE'`` (``0``);
* ``'WARNING'`` (``1``);
* ``'INFO'`` (``2``);
* ``'DETAIL'`` (``3``);
* ``'DEBUG'`` (``-1``).
regrid_logging: `bool` or `Constant`, optional
The new value (either True to enable logging or False to
disable it). The default is to not change the current
behaviour.
relaxed_identities: `bool` or `Constant`, optional
The new value; if True, use "relaxed" mode when getting a
construct identity. The default is to not change the
current value.
:Returns:
`Configuration`
The dictionary-like object containing the names and values
of the project-wide constants prior to the change, or the
current names and values if no new values are specified.
**Examples:**
>>> cf.configuration() # view full global configuration of constants
{'rtol': 2.220446049250313e-16,
'atol': 2.220446049250313e-16,
'tempdir': '/tmp',
'of_fraction': 0.5,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'WARNING',
'bounds_combination_mode': 'AND',
'chunksize': 82873466.88000001}
>>> cf.chunksize(7.5e7) # any change to one constant...
82873466.88000001
>>> cf.configuration()['chunksize'] # ...is reflected in the configuration
75000000.0
>>> cf.configuration(
... of_fraction=0.7, tempdir='/usr/tmp', log_level='INFO') # set items
{'rtol': 2.220446049250313e-16,
'atol': 2.220446049250313e-16,
'tempdir': '/tmp',
'of_fraction': 0.5,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'WARNING',
'bounds_combination_mode': 'AND',
'chunksize': 75000000.0}
>>> cf.configuration() # the items set have been updated accordingly
{'rtol': 2.220446049250313e-16,
'atol': 2.220446049250313e-16,
'tempdir': '/usr/tmp',
'of_fraction': 0.7,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'INFO',
'bounds_combination_mode': 'AND',
'chunksize': 75000000.0}
Use as a context manager:
>>> print(cf.configuration())
{'rtol': 2.220446049250313e-16,
'atol': 2.220446049250313e-16,
'tempdir': '/usr/tmp',
'of_fraction': 0.7,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'INFO',
'bounds_combination_mode': 'AND',
'chunksize': 75000000.0}
>>> with cf.configuration(atol=9, rtol=10):
... print(cf.configuration())
...
{'rtol': 9.0,
'atol': 10.0,
'tempdir': '/usr/tmp',
'of_fraction': 0.7,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'INFO',
'bounds_combination_mode': 'AND',
'chunksize': 75000000.0}
>>> print(cf.configuration())
{'rtol': 2.220446049250313e-16,
'atol': 2.220446049250313e-16,
'tempdir': '/usr/tmp',
'of_fraction': 0.7,
'free_memory_factor': 0.1,
'regrid_logging': False,
'collapse_parallel_mode': 0,
'relaxed_identities': False,
'log_level': 'INFO',
'bounds_combination_mode': 'AND',
'chunksize': 75000000.0}
"""
| |
import requests
import csv
import logging
from requests.auth import HTTPBasicAuth
import time
from primeapidata import PI_ADDRESS, USERNAME, PASSWORD
requests.packages.urllib3.disable_warnings()
'''
Call one of those from the main function or put one out of comments here, be carefull of the different filenames.
It should probably be transfered into a function and pass the filename as a parameter.
But then maybe the logging object would need to be passed around. Not sure. Will test.
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
filename='GetAllDevices.log', level=logging.INFO)
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S',
filename='GetAll_IPs.log', level=logging.INFO)
'''
# Define Global Variables - these should be included in a separate file named primeapaidata.py
#USERNAME = "username" # define REST API username
#PASSWORD = "password" # define REST API passowrd
#PI_ADDRESS = "ip_address" # define IP Address of Prime Infrastructure Server
def getAPIResponse(apiurl):
response = requests.get(apiurl, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)
r_json = response.json()
return (r_json)
# Beginning of Function
def getDeviceGroups():
logging.info("Getting all device groups")
apiurl = "https://"+PI_ADDRESS+"/webacs/api/v2/data/DeviceGroups.json?.full=true"
r_json = getAPIResponse(apiurl)
Group_List = []
for entity in r_json['queryResponse']['entity']:
group = entity["deviceGroupsDTO"]["groupName"]
Group_List.append(group)
logging.info(f" - added group {group}")
logging.info("Initial groups ok... moving on")
return(Group_List)
# End of Function
# Beginning of Function
def RemoveGeneric(Group_List):
#if thing in some_list: some_list.remove(thing)
logging.info("Removing Generic Groups")
if "Device Type" in Group_List:
Group_List.remove("Device Type")
if "Routers" in Group_List:
Group_List.remove("Routers")
if "Security and VPN" in Group_List:
Group_List.remove("Security and VPN")
if "Switches and Hubs" in Group_List:
Group_List.remove("Switches and Hubs")
if "Unified AP" in Group_List:
Group_List.remove("Unified AP")
if "Unsupported Cisco Device" in Group_List:
Group_List.remove("Unsupported Cisco Device")
if "Wireless Controller" in Group_List:
Group_List.remove("Wireless Controller")
if "Cisco 4400 Series Integrated Services Routers" in Group_List:
Group_List.remove("Cisco 4400 Series Integrated Services Routers")
new_Group_List = Group_List
logging.info("Final groups ok... moving on")
return(new_Group_List)
# End of Function
# Beginning of Function
def getDevices_old(Group_List):
timestr = time.strftime("%Y%m%d_%H%M")
Device_List = "DeviceList_"+timestr+".csv"
logging.info(" - Getting Device Info")
i = 0
DeviceFileList = []
NumOfGroups = len(Group_List)
# remove last / from controller url
new_url = "https://"+PI_ADDRESS+"/webacs/api/v4/data/InventoryDetails"
while i < NumOfGroups:
group = Group_List[i]
url = new_url + ".json?.full=true&.group=" + group + "&.maxResults=300"
response = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)
r_json = response.json()
count = (r_json.get("queryResponse", "")).get("@count", "")
if count != 0: # no devices in group
logging.info(f' - Getting devices in group {group}')
for entity in r_json["queryResponse"]["entity"]:
Info = entity["inventoryDetailsDTO"]
DeviceName = Info.get("summary").get("deviceName", "")
IP_Addr = Info.get("summary").get("ipAddress", "")
Location = Info.get("summary", "").get("location", "")
if group != ("Unsupported Cisco Device"):
Model = Info["chassis"]["chassis"][0]["modelNr"]
SN = Info["chassis"]["chassis"][0]["serialNr"]
else:
Model = "-"
SN = "-"
new_row = [DeviceName, IP_Addr, Location, Model, SN]
DeviceFileList.append(new_row)
# Move on to next group
i += 1
# no devices in group
else:
# move on to next group
i += 1
logging.info(" - All info has been collected.\nEND")
# open a file for writing
DeviceList = open(Device_List, 'w')
# create the csv writer object
csvwriter = csv.writer(DeviceList)
header = ["DeviceName", "IP_Address", "Location", "Type", "Serial Number"]
csvwriter.writerow(header)
for line in DeviceFileList:
csvwriter.writerow(line)
DeviceList.close()
logging.info(" - All done.\nEND")
return()
# End of function
# Beginning of function
def getDevices(Group_List):
logging.info("Getting Device Info")
#create a list of list of strings for the device file
DeviceList = []
for group in Group_List:
apiurl = "https://"+PI_ADDRESS+"/webacs/api/v4/data/InventoryDetails.json?.full=true&.maxResults=1000&.group=" + group
r_json = getAPIResponse(apiurl)
try:
count = int(r_json["queryResponse"]["@count"])
if count != 0:
logging.info(f' - Getting devices in group {group} with count {count}')
for entity in r_json["queryResponse"]["entity"]:
Info = entity["inventoryDetailsDTO"]
DeviceName = Info["summary"]["deviceName"]
IP_Addr = Info["summary"]["ipAddress"]
Location = Info["summary"]["location"]
DevType = Info["summary"]["deviceType"]
if group != "Unsupported Cisco Device":
SN = Info["chassis"]["chassis"][0]["serialNr"]
Model = Info["chassis"]["chassis"][0]["modelNr"]
else:
SN = "-"
Model = "-"
new_row = [DeviceName, IP_Addr, Location, Model, SN]
DeviceList.append(new_row)
else:
continue
except:
logging.info(" - Moving on to next group - due to error")
continue
# exit try and move to next group
logging.info(" - All info has been collected.")
return(DeviceList)
def getIPs_old(Group_List):
logging.info("Getting Device IPs")
timestr = time.strftime("%Y%m%d_%H%M")
IP_file = "IP_"+timestr+".csv"
output = []
controller_url = "https://"+PI_ADDRESS+"/webacs/api/v4/data/InventoryDetails/"
for group in Group_List:
url = controller_url + ".json?.full=true&.group=" + group + "&.maxResults=1000"
response = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)
r_json = response.json()
count = (r_json.get("queryResponse", "")).get("@count", "")
if count != 0:
logging.info(f'Getting IPs for devices in group {group}')
''' This group has already been removed from the group list, the if clause is redundant'''
if group != "Unsupported Cisco Device":
for Info in r_json['queryResponse']['entity']:
intf_list = Info["inventoryDetailsDTO"]["ipInterfaces"]["ipInterface"]
for interface in intf_list:
ip = interface["ipAddress"].get("address", "")
mask = interface.get("prefixLength", "")
ip_and_mask = str(ip) + "/" + str(mask)
output.append(ip_and_mask)
else:
for Info in r_json['queryResponse']['entity']:
ip = Info["inventoryDetailsDTO"]["summary"]["ipAddress"]
mask = "24"
ip_and_mask = str(ip) + "/" + str(mask)
output.append(ip_and_mask)
# Move on to next group
# If no devices in Group then move on
else:
continue
# for loop ends
# open a file for writing
IPList = open(IP_file, 'w')
# create the csv writer object
csvwriter = csv.writer(IPList)
csvwriter.writerows(zip(output))
IPList.close()
return()
# End of function
def getIPs(Group_List):
logging.info("Getting Device IPs")
IPs_List = []
for group in Group_List:
apiurl = "https://"+PI_ADDRESS+"/webacs/api/v4/data/InventoryDetails.json?.full=true&.maxResults=1000&.group=" + group
r_json = getAPIResponse(apiurl)
try:
# get the number of devices in group
count = int(r_json["queryResponse"]["@count"])
# if there are devices in this group process them
if count != 0:
logging.info(f' - Getting IPs for devices in group {group}')
''' This group has already been removed from the group list, the if clause is redundant'''
if group != "Unsupported Cisco Device":
for Info in r_json['queryResponse']['entity']:
intf_list = Info["inventoryDetailsDTO"]["ipInterfaces"]["ipInterface"]
for interface in intf_list:
ip = interface["ipAddress"].get("address", "")
mask = interface.get("prefixLength", "")
ip_and_mask = str(ip) + "/" + str(mask)
IPs_List.append(ip_and_mask)
else:
for Info in r_json['queryResponse']['entity']:
ip = Info["inventoryDetailsDTO"]["summary"]["ipAddress"]
mask = "24"
ip_and_mask = str(ip) + "/" + str(mask)
IPs_List.append(ip_and_mask)
# Move on to next group
# If no devices in Group then move on
else:
continue
count = (r_json.get("queryResponse", "")).get("@count", "")
except:
logging.info(" - Moving on to next group - due to error")
continue
return IPs_List
# for loop ends
'''
This Functions needs to be reviewed.
Would be nice if the filename is passed as a parameter.
Also if the list of fields is also parameter,
then we can have a single function for exporting
to csv format.
'''
def writeDevices(DeviceFileList):
logging.info("Writing data to file.")
timestr = time.strftime("%Y%m%d_%H%M")
Device_List = "DeviceList_"+timestr+".csv"
# open a file for writing
DeviceList = open(Device_List, 'w')
# create the csv writer object
csvwriter = csv.writer(DeviceList)
header = ["DeviceName", "IP_Address", "Location", "Type", "Serial Number"]
csvwriter.writerow(header)
for line in DeviceFileList:
csvwriter.writerow(line)
DeviceList.close()
logging.info("All done.")
return()
# End of function
def writeIPs(IPs_List):
timestr = time.strftime("%Y%m%d_%H%M")
IP_file = "IP_"+timestr+".csv"
# open a file for writing
IPList = open(IP_file, 'w')
# create the csv writer object
csvwriter = csv.writer(IPList)
'''
zip is needed to put the IP addresses together without commas between each string.
We may look for a better solution.
'''
csvwriter.writerows(zip(IPs_List))
IPList.close()
return()
# just get serials and type
def getDevicesSerials(Group_List):
logging.info(" - Getting Device Info")
#create a list of list of strings for the device file
DeviceList = []
for group in Group_List:
apiurl = "https://"+PI_ADDRESS+"/webacs/api/v4/data/InventoryDetails.json?.full=true&.maxResults=1000&.group=" + group
r_json = getAPIResponse(apiurl)
try:
count = int(r_json["queryResponse"]["@count"])
if count != 0:
logging.info(f' - Getting devices in group {group} with count {count}')
for entity in r_json["queryResponse"]["entity"]:
if group == "Unsupported Cisco Device":
continue
Info = entity["inventoryDetailsDTO"]
DeviceDetails = dict()
DeviceDetails["code"] = Info["chassis"]["chassis"][0]["modelNr"]
DeviceDetails["serial"] = Info["chassis"]["chassis"][0]["serialNr"]
DeviceList.append(DeviceDetails)
else:
continue
except:
logging.info(" - Moving on to next group - due to error")
continue
# exit try and move to next group
logging.info(" - All info has been collected.")
return(DeviceList)
def getSimpleDeviceSerials():
logging.info(" - Getting Device Info")
#create a list of list of strings for the device file
apiurl = "https://"+PI_ADDRESS+"/webacs/api/v4/data/Devices.json?.full=true&.maxResults=1000"
resp = getAPIResponse(apiurl)
DeviceList = []
counter = 0
last_url = ""
try:
count = int(resp["queryResponse"]["@count"])
logging.info(f" - {count} Devices found")
if count > 0:
logging.info(" - Looping on Devices")
for entity in resp["queryResponse"]["entity"]:
dev_url = entity['@url']
if "devicesDTO" not in entity.keys():
logging.warning(f" - Device at url: {dev_url}.json is missing deviceDTO section")
continue
dev_info = entity["devicesDTO"]
if dev_info["adminStatus"] == "UNMANAGED":
logging.warning(f" - Device at url: {dev_url}.json is UNMANAGED")
continue
if "manufacturerPartNrs" not in dev_info.keys():
logging.warning(f" - Device at url: {dev_url}.json is unsupported and missing manufacturer section")
continue
if dev_info["managementStatus"].strip() not in ["MANAGED_AND_SYNCHRONIZED","INSERVICE_MAINTENANCE"]:
logging.warning(f" - Device at url: {dev_url}.json is {dev_info['managementStatus']}")
continue
if | |
import os
import ray
import time
import torch
import queue
import threading
from shutil import copy2
from copy import deepcopy
import pytorchrl as prl
from pytorchrl.scheme.base.worker import Worker as W
from pytorchrl.scheme.utils import ray_get_and_free, broadcast_message, pack, unpack
# Puts a limit to the allowed policy lag
max_queue_size = 10
class GWorker(W):
"""
Worker class handling gradient computation.
This class wraps an actor instance, a storage class instance and a
worker set of remote data collection workers. It receives data from the
collection workers and computes gradients following a logic defined in
function self.step(), which will be called from the Learner class.
Parameters
----------
index_worker : int
Worker index.
col_workers_factory : func
A function that creates a set of data collection workers.
col_communication : str
Communication coordination pattern for data collection.
compress_grads_to_send : bool
Whether or not to compress gradients before sending then to the update worker.
col_execution : str
Execution patterns for data collection.
col_fraction_workers : float
Minimum fraction of samples required to stop if collection is
synchronously coordinated and most workers have finished their
collection task.
device : str
"cpu" or specific GPU "cuda:number`" to use for computation.
initial_weights : ray object ID
Initial model weights.
Attributes
----------
index_worker : int
Index assigned to this worker.
iter : int
Number of times gradients have been computed and sent.
col_communication : str
Communication coordination pattern for data collection.
compress_grads_to_send : bool
Whether or not to compress gradients before sending then to the update worker.
col_workers : CWorkerSet
A CWorkerSet class instance.
local_worker : CWorker
col_workers local worker.
remote_workers : List of CWorker's
col_workers remote data collection workers.
num_workers : int
Number of collection remote workers.
actor : Actor
An actor class instance.
algo : Algo
An algorithm class instance.
storage : Storage
A Storage class instance.
inqueue : queue.Queue
Input queue where incoming collected samples are placed.
collector : CollectorThread
Class handling data collection via col_workers and placing incoming
rollouts into the input queue `inqueue`.
"""
def __init__(self,
index_worker,
col_workers_factory,
col_communication=prl.SYNC,
compress_grads_to_send=False,
col_execution=prl.CENTRAL,
col_fraction_workers=1.0,
initial_weights=None,
device=None):
self.index_worker = index_worker
super(GWorker, self).__init__(index_worker)
# Define counters and other attributes
self.iter = 0
self.col_communication = col_communication
self.compress_grads_to_send = compress_grads_to_send
self.processing_time_start = None
# Computation device
dev = device or "cuda" if torch.cuda.is_available() else "cpu"
# Create CWorkerSet instance
self.col_workers = col_workers_factory(dev, initial_weights, index_worker)
self.local_worker = self.col_workers.local_worker()
self.remote_workers = self.col_workers.remote_workers()
self.num_remote_workers = len(self.remote_workers)
self.num_collection_workers = 1 if self.num_remote_workers == 0 else self.num_remote_workers
# Get Actor Critic instance
self.actor = self.local_worker.actor
# Get Algorithm instance
self.algo = self.local_worker.algo
# Get storage instance.
if col_communication == prl.ASYNC and self.local_worker.envs_train is not None:
# If async collection, c_worker and g_worker need different storage instances
# To avoid overwriting data. Define c_worker one with the minimum size required,
# to save memory
size1 = self.local_worker.storage.max_size
size2 = self.local_worker.algo.update_every
size2 = size2 * 2 if size2 is not None else float("Inf")
new_size = min(size1, size2)
if self.local_worker.envs_train is not None:
# Create a storage class deepcopy, but be careful with envs attribute,
# can not be deep-copied
envs = getattr(self.local_worker.storage, "envs")
self.local_worker.storage.envs = None
self.storage = deepcopy(self.local_worker.storage)
self.local_worker.storage.envs = envs
self.local_worker.update_storage_parameter("max_size", new_size)
else:
self.storage = self.local_worker.storage
for e in self.remote_workers:
e.update_storage_parameter.remote("max_size", new_size)
else:
self.storage = self.local_worker.storage
if len(self.remote_workers) > 0 or (len(self.remote_workers) == 0 and
self.local_worker.envs_train is not None):
# Create CollectorThread
self.collector = CollectorThread(
index_worker=index_worker,
local_worker=self.local_worker,
remote_workers=self.remote_workers,
col_communication=col_communication,
col_fraction_workers=col_fraction_workers,
col_execution=col_execution,
broadcast_interval=1)
# Print worker information
self.print_worker_info()
@property
def actor_version(self):
"""Number of times Actor has been updated."""
return self.local_worker.actor_version
def step(self, distribute_gradients=False):
"""
Pulls data from `self.collector.queue`, then perform a gradient computation step.
Parameters
----------
distribute_gradients : bool
If True, gradients will be directly shared across remote workers
and optimization steps will executed in a decentralised way.
Returns
-------
grads : list of tensors
List of actor gradients.
info : dict
Summary dict of relevant gradient operation information.
"""
self.get_data()
grads, info = self.get_grads(distribute_gradients)
if distribute_gradients:
self.apply_gradients()
# Encode data if self.compress_data_to_send is True
data_to_send = pack((grads, info)) if self.compress_grads_to_send else (grads, info)
return data_to_send
def get_data(self):
"""
Pulls data from `self.collector.queue` and prepares batches to
compute gradients.
"""
try: # Try pulling next batch
self.next_batch = self.batches.__next__()
# Only use episode information once
self.info.pop(prl.EPISODES, None)
# Only record collected samples once
self.info[prl.NUMSAMPLES] = 0
# except (StopIteration, AttributeError):
except Exception:
if self.col_communication == prl.SYNC:
self.collector.step()
data, self.info = self.collector.queue.get()
# Time data processing and calculate collection-to-processing time ratio
if not self.processing_time_start:
self.processing_time_start = time.time()
else:
self.info[prl.TIME][prl.PROCESSING] = time.time() - self.processing_time_start
self.info[prl.TIME][prl.CPRATIO] = \
(self.info[prl.TIME][prl.COLLECTION] / self.num_collection_workers
) / self.info[prl.TIME][prl.PROCESSING]
self.processing_time_start = time.time()
self.storage.insert_data_slice(data)
self.storage.before_gradients()
self.batches = self.storage.generate_batches(
self.algo.num_mini_batch, self.algo.mini_batch_size,
self.algo.num_epochs)
self.next_batch = self.batches.__next__()
def get_grads(self, distribute_gradients=False):
"""
Perform a gradient computation step.
Parameters
----------
distribute_gradients : bool
If True, gradients will be directly shared across remote workers
and optimization steps will executed in a decentralised way.
Returns
-------
grads : list of tensors
List of actor gradients.
info : dict
Summary dict of relevant gradient operation information.
"""
# Get gradients and algorithm-related information
t = time.time()
grads, algo_info = self.compute_gradients(self.next_batch, distribute_gradients)
compute_time = time.time() - t
# Add extra information to info dict
self.info[prl.ALGORITHM] = algo_info
self.info[prl.VERSION][prl.GRADIENT] = self.local_worker.actor_version
self.info[prl.TIME][prl.GRADIENT] = compute_time
# Run after gradients data process (if any)
info = self.storage.after_gradients(self.next_batch, self.info)
# Update iteration counter
self.iter += 1
return grads, info
def compute_gradients(self, batch, distribute_gradients):
"""
Calculate actor gradients and update networks.
Parameters
----------
batch : dict
data batch containing all required tensors to compute algo loss.
distribute_gradients : bool
If True, gradients will be directly shared across remote workers
and optimization steps will executed in a decentralised way.
Returns
-------
grads : list of tensors
List of actor gradients.
info : dict
Summary dict with relevant gradient-related information.
"""
grads, info = self.algo.compute_gradients(batch, grads_to_cpu=not distribute_gradients)
if distribute_gradients:
if torch.cuda.is_available():
for g in grads:
torch.distributed.all_reduce(g, op=torch.distributed.ReduceOp.SUM)
else:
torch.distributed.all_reduce_coalesced(grads, op=torch.distributed.ReduceOp.SUM)
for p in self.actor.parameters():
if p.grad is not None:
p.grad /= self.num_remote_workers
grads = None
return grads, info
def apply_gradients(self, gradients=None):
"""Update Actor Critic model"""
self.local_worker.actor_version += 1
self.algo.apply_gradients(gradients)
if self.col_communication == prl.SYNC and len(self.remote_workers) > 0:
self.collector.broadcast_new_weights()
def set_weights(self, actor_weights):
"""
Update the worker actor version with provided weights.
weights : dict of tensors
Dict containing actor weights to be set.
"""
self.local_worker.actor_version = actor_weights[prl.VERSION]
self.local_worker.algo.set_weights(actor_weights[prl.WEIGHTS])
def update_algorithm_parameter(self, parameter_name, new_parameter_value):
"""
If `parameter_name` is an attribute of Worker.algo, change its value to
`new_parameter_value value`.
Parameters
----------
parameter_name : str
Algorithm attribute name
"""
self.local_worker.update_algorithm_parameter(parameter_name, new_parameter_value)
for e in self.remote_workers:
e.update_algorithm_parameter.remote(parameter_name, new_parameter_value)
self.algo.update_algorithm_parameter(parameter_name, new_parameter_value)
for e in self.col_workers.remote_workers():
e.update_algorithm_parameter.remote(parameter_name, new_parameter_value)
def save_model(self, fname):
"""
Save current version of actor as a torch loadable checkpoint.
Parameters
----------
fname : str
Filename given to the checkpoint.
Returns
-------
save_name : str
Path to saved file.
"""
torch.save(self.local_worker.actor.state_dict(), fname + ".tmp")
os.rename(fname + '.tmp', fname)
save_name = fname + ".{}".format(self.local_worker.actor_version)
copy2(fname, save_name)
return save_name
def stop(self):
"""Stop collecting data."""
if hasattr(self, "collector"):
self.collector.stopped = True
for e in self.remote_workers:
e.terminate_worker.remote()
class CollectorThread(threading.Thread):
"""
This class receives data samples from the data collection workers and
queues them into the data input_queue.
Parameters
----------
index_worker : int
Index assigned to this worker.
input_queue : queue.Queue
Queue to store the data dicts received from data collection workers.
local_worker : Worker
Local worker that acts as a parameter server.
remote_workers : list of Workers
Set of workers collecting and sending rollouts.
col_fraction_workers : float
Minimum fraction of samples required to stop if collection is
synchronously coordinated and most workers have finished their
collection task.
col_communication : str
Communication coordination pattern for data collection.
col_execution : str
Execution patterns for data collection.
broadcast_interval : int
After how many central updates, model weights should be broadcasted to
remote collection workers.
Attributes
----------
stopped : bool
Whether or not the thread in running.
queue : queue.Queue
Queue to store the data dicts received from data collection workers.
index_worker : int
Index assigned to this worker.
local_worker : CWorker
col_workers | |
plot.xlog = True
plot.smoothplot(plot_used_original_data=1)
def skip_fields_and_check_accuracy(self):
sharpestpoint = self.find_sharpest_raw_point()
x = sharpestpoint.x
y = sharpestpoint.y
fields = self.fields
fieldnumbers = list(range(len(fields)))
skips = [1, 7]
sharps = []
sharps = []
numpoints = []
count = 1
print("Inc Start Points SFR SFR+- BestFocus Bestfocus+-")
for skip in skips:
sharplst = []
focusposlst = []
counts = []
for start in np.arange(skip):
usefields = fields[start::skip]
# Temporarily replace self.fields #naughty
self.fields = usefields
focusob = self.find_best_focus(x, y, axis=MERIDIONAL, plot=1)
sharplst.append(focusob.sharp)
focusposlst.append(focusob.focuspos)
counts.append(count)
count += 1
text = ""
if skip == 1 and start == 0:
baseline = focusob.sharp, focusob.focuspos
text = "** BASELINE ***"
delta = focusob.sharp - baseline[0]
percent = delta / baseline[0] * 100
bestfocus = (focusob.focuspos * skip) + start
bestfocusdelta = bestfocus - baseline[1]
print("{:3.0f} {:5.0f} {:6.0f} {:6.3f} {:7.3f} {:10.3f} {:10.3f} {}".format(skip,
start,
len(self.fields),
focusob.sharp,
delta,
bestfocus,
bestfocusdelta,
text))
# plt.plot(counts, sharplst, '.', color=COLOURS[skip])
# plt.plot([len(usefields)] * skip, sharplst, '.', color=COLOURS[skip])
numpoints.append(len(usefields))
self.fields = fields
print(count)
plt.legend(numpoints)
# plt.xlabel("Testrun number")
plt.xlabel("Number of images in sequence")
plt.ylabel("Peak Spacial frequency response")
plt.title("Peak detection vs number of images in sequence")
# plt.plot([0, count], [baseline[0], baseline[0]], '--', color='gray')
plt.plot([3, len(fields)], [baseline[0], baseline[0]], '--', color='gray')
plt.ylim(baseline[0]-0.1, baseline[0]+0.1)
plt.show()
def get_wavefront_data_path(self, seed=None):
path = os.path.join(self.rootpath, "wavefront_results/")
return path
def get_old_wavefront_data_path(self, seed=None):
path = os.path.join(self.rootpath, "wavefront_data.csv")
return path
def read_wavefront_data(self, overwrite, read_old=False, copy_old=True, read_autosave=True, x_loc=None, y_loc=None):
wfl = read_wavefront_data(self.get_wavefront_data_path(), read_autosave=read_autosave, x_loc=x_loc, y_loc=y_loc)
if overwrite:
self.wavefront_data = wfl
return wfl
def copy_old_wavefront_data_to_new(self):
self.read_wavefront_data(overwrite=True, copy_old=False, read_old=True)
for item in self.wavefront_data:
save_wafefront_data(self.get_wavefront_data_path(), [item], suffix="copied")
# @staticmethod
# def intepolate_value_helper(fieldnumber, x, y, freq, axis):
# return float(gfs2.fields[fieldnumber].interpolate_value(x, y, freq, axis))
# @classmethod
# def find_best_focus_helper(cls, args):
# if args[6] % 50 == 0:
# print("Finding best focus for location {} / {} in {}".format(args[6], args[7], args[5]))
# ob = globalfocusset.find_best_focus(*args[2:6])
#
# return args[0], args[1], float(ob.sharp), float(ob.focuspos)
def clear_numbered_autosaves(path):
for entry in os.scandir(path):
if 'autosave' in entry.name.lower() and 'csv' in entry.name.lower():
digits = [char for char in entry.name if char.isdigit()]
if len(digits):
os.remove(entry.path)
def save_focus_jitter(rootpath, focus_values, code_version=0):
filepath = os.path.join(rootpath, "focus_positions.csv")
sfr_filenames = []
with os.scandir(rootpath) as it:
for entry in it:
if entry.name.lower().endswith(".sfr"):
digits = "".join((char for char in entry.name if char.isdigit()))
sfr_filenames.append((int(digits), entry.name, entry.path))
sfr_filenames.sort()
print(sfr_filenames)
print(focus_values)
if len(sfr_filenames) != len(focus_values):
log.warning("Focus value array does not correspond to number of .sfr files in path")
with open(filepath, 'w') as file:
writer = csv.writer(file, delimiter=" ", quotechar='"')
file.writelines(("# ID Filename Estimated_Focus_Position\n",))
writer.writerow(["Code_version", CURRENT_JITTER_CODE_VERSION])
for (num, name, path), focus_position in zip(sfr_filenames, focus_values):
writer.writerow((num, name, focus_position))
def estimate_focus_jitter(path=None, data_in=None, plot=1):
code_version = 2
if data_in is not None:
iter = [(None, None, None)]
data = data_in
else:
focusset = FocusSet(rootpath=path, include_all=True, use_calibration=True, load_focus_data=False)
xvals = np.linspace(IMAGE_WIDTH / 4, IMAGE_WIDTH * 5/8, 4)[:]
yvals = np.linspace(IMAGE_HEIGHT / 4, IMAGE_HEIGHT * 5/8, 4)[:]
axes = [SAGITTAL, MERIDIONAL]
iter = zip(*(_.flatten() for _ in np.meshgrid(xvals, yvals, axes))) # Avoid nested loops
all_errors = []
freqs = np.arange(2, 19, 2) / 64
for x, y, axis in iter:
if data_in is None:
y = IMAGE_HEIGHT / IMAGE_WIDTH * x
data = FocusSetData()
datalst = []
for freq in freqs:
pos = focusset.get_interpolation_fn_at_point(x, y, freq, axis)
datalst.append(pos.sharp_data)
data.merged_mtf_values = np.array(datalst)
data.focus_values = pos.focus_data
nfv = len(data.focus_values)
if nfv < 8:
log.warning("Focus jitter estimation may not be reliable with {} samples".format(nfv))
if nfv < 6:
raise ValueError("More focus samples needed")
freq_ix = 0
nom_focus_values = data.focus_values
non_inc = nom_focus_values[1] - nom_focus_values[0]
xmods = []
grads = []
lowcut = 0.2
while freq_ix < (data.merged_mtf_values.shape[0]):
low_freq_average = data.merged_mtf_values[freq_ix, :]
if low_freq_average.max() < 0.35:
break
print("Using frequency index {}".format(freq_ix))
freq_ix += 1
min_modulation = low_freq_average.min()
print("Min modulation", min_modulation)
print("Mean modulation", low_freq_average.mean())
if min_modulation < 0.4:
maxpoly = 4
elif min_modulation < 0.8:
maxpoly = 4
else:
maxpoly = 2
rawpoly = np.polyfit(nom_focus_values, low_freq_average, 4)
rawpolygrad = np.polyder(rawpoly, 1)
gradmetric = np.clip(np.abs(np.polyval(rawpolygrad, nom_focus_values)), 0, 0.5 / non_inc)
valid = np.logical_and(low_freq_average > lowcut, gradmetric > 0.012 / non_inc)
# print(gradmetric)
# print(valid)
valid_x = data.focus_values[valid]
valid_y = low_freq_average[valid]
if len(valid_x) < 5:
continue
poly_order = min(maxpoly, len(valid_x) - 2) # Ensure is overdetmined
# print("Using {} order polynomial to find focus errors with {} samples".format(poly_order, len(valid_x)))
poly = np.polyfit(nom_focus_values[low_freq_average > lowcut], low_freq_average[low_freq_average > lowcut],
poly_order)
def cost(xmod):
smoothline = np.polyval(poly, valid_x + xmod)
# modcost = ((1.0 - cost_gradmetric)**2 * xmod**2).mean() * 10e-5
modcost = 0
return ((valid_y - smoothline) ** 2).mean() + modcost
mul = 3.0
bounds = [(-non_inc * mul, non_inc * mul)] * len(valid_x)
opt = optimize.minimize(cost, np.zeros(valid_x.shape), method='L-BFGS-B', bounds=bounds)
xmod = []
grad = []
ix = 0
# print(valid)
# print(valid_x)
# print(opt.x)
for v, gradmetric_ in zip(valid, gradmetric):
if v:
xmod.append(opt.x[ix])
grad.append(gradmetric_)
ix += 1
else:
xmod.append(np.nan)
grad.append(0)
xmod = np.array(xmod)
xmods.append(xmod)
grads.append(grad)
# print("Freq focus errors", xmod)
if plot >= 2 and np.abs(xmod[np.isfinite(xmod)]).sum() > 0:
plt.plot(nom_focus_values, low_freq_average, label='Raw Data {}'.format(freq_ix))
# plt.plot(nom_focus_values, validate_freq_average, label="Raw Data (2, validation)")
plt.plot(nom_focus_values, np.polyval(rawpolygrad, nom_focus_values) * 10 * non_inc,
label="Raw Gradient")
plt.plot(valid_x, np.polyval(poly, valid_x + opt.x) + 0.01, label="Polyfit after optimisation")
# plt.plot(nom_focus_values, np.polyval(validate_poly, nom_focus_values + xmod)+0.01, label="Data 2 polyfit after optimisation")
plt.plot(valid_x, np.polyval(poly, valid_x) + 0.01, label="Valid Polyfit")
plt.plot(nom_focus_values, np.polyval(rawpoly, nom_focus_values) + 0.01, label="Raw Polyfit")
# plt.plot(nom_focus_values, np.polyval(validate_poly, nom_focus_values), label="Data2 polyfit")
plt.plot(nom_focus_values, gradmetric * 10 * non_inc, label="Grad valid metric")
plt.plot(nom_focus_values, xmod, '-', marker='s', label="Estimated focus errors")
if 'focus_errors' in data.hints:
plt.plot(nom_focus_values, data.hints['focus_errors'], '-', marker='v',
label="Hint data focus errors")
plt.legend()
plt.show()
# print(xmods)
# print(grads)
gradsum = np.sum(np.array(grads), axis=0)
# xmodmean = np.nansum(np.array(xmods) * np.array(grads), axis=0) / gradsum
xmodmean = np.nanmean(np.array(xmods), axis=0)
if np.isfinite(xmodmean).sum() == 0:
raise ValueError("No data! available")
# xmodmean[gradsum < 0.] = 0
# print(xmods)
# print(xmodmean)
if np.nansum(xmodmean) != 0:
xmodmean_zeroed = xmodmean.copy()
xmodmean_zeroed[~np.isfinite(xmodmean)] = 0
error_poly = np.polyfit(nom_focus_values, xmodmean_zeroed, 2)
roots = np.roots(np.polyder(error_poly, 1))
peakerrors = np.polyval(error_poly, roots)
polyerrormax = np.abs(peakerrors).max()
if plot >= 2:
plt.plot(nom_focus_values, np.polyval(error_poly, nom_focus_values), label="Error polyfit")
if polyerrormax > 0.06:
log.warning("Warning errors may not be random")
elif polyerrormax > 0.2:
raise ValueError("Error result doesn't appear random, please use more samples")
if plot >= 2:
# plt.plot(nom_focus_values, low_freq_average, label='Raw Data')
# plt.plot(nom_focus_values, validate_freq_average, label="Raw Data (2, validation)")
# plt.plot(nom_focus_values, np.polyval(rawpolygrad, nom_focus_values) * 10, label="Raw Gradient")
# plt.plot(valid_x, np.polyval(poly, valid_x + opt.x) + 0.01, label="Polyfit after optimisation")
# plt.plot(nom_focus_values, np.polyval(validate_poly, nom_focus_values + xmod)+0.01, label="Data 2 polyfit after optimisation")
# plt.plot(valid_x, np.polyval(poly, valid_x) + 0.01, label="Polyfit")
# plt.plot(nom_focus_values, np.polyval(validate_poly, nom_focus_values), label="Data2 polyfit")
# plt.plot(nom_focus_values, gradmetric, label="Grad valid metric")
plt.plot(nom_focus_values, np.mean(data.merged_mtf_values, axis=0), label="AUC")
plt.plot(nom_focus_values + xmodmean, np.mean(data.merged_mtf_values + 0.02, axis=0), label="AUCfix")
plt.plot(nom_focus_values, xmodmean, '-', marker='s', label="Estimated focus errors")
if 'focus_errors' in data.hints:
plt.plot(nom_focus_values, data.hints['focus_errors'], '-', marker='v', label="Hint data focus errors")
plt.legend()
plt.show()
all_errors.append(xmodmean)
for errs in all_errors:
if plot >= 1 or 1:
plt.plot(data.focus_values, errs, marker='s')
for focus, err in zip(data.focus_values, errs):
print("Offset {:.3f} at position {}".format(err, focus))
# allxmodmean = np.nanmean(all_errors, axis=0)
# allxmodmean[~np.isfinite(allxmodmean)] = 0
# Remove outliers and take mean
all_errors_ay = np.array(all_errors)
allxmodmean = np.zeros(all_errors_ay.shape[1])
for ix in range(len(allxmodmean)):
errors_at_position = all_errors_ay[np.isfinite(all_errors_ay[:, ix]), ix]
if len(errors_at_position) >= 3:
error_high = errors_at_position.max()
error_low = errors_at_position.min()
no_outliers = errors_at_position[np.logical_and(errors_at_position > error_low,
errors_at_position < error_high)]
if len(no_outliers) > 0:
1+1
allxmodmean[ix] = no_outliers.mean()
elif 0 > len(errors_at_position) >= 2:
allxmodmean[ix] = errors_at_position.mean()
print(allxmodmean)
# plt.show()
# exit()
new_focus_values = data.focus_values + allxmodmean
if plot >= 1:
plt.plot(data.focus_values, allxmodmean, color='black', marker='v')
plt.show()
if data_in is None:
save_focus_jitter(rootpath=path, focus_values=new_focus_values, code_version=code_version)
return new_focus_values
else:
data_in.focus_values = new_focus_values
# datain.jittererr = (((data.hints['focus_errors'] - xmodmean))**2).mean()
# datain.jittererrmax = (np.abs(data.hints['focus_errors'] - xmodmean)).max()
# datain.hintjit = ((data.hints['focus_errors'])**2).max()
return data_in
def scan_path(path, make_dir_if_absent=False, find_autosave=False, x_loc=None, y_loc=None):
if os.path.exists(path):
autosave_entry = None
existing_max = -1
highentry = None
if x_loc is not None:
xfindstr = "x{}".format(x_loc)
yfindstr = "y{}".format(y_loc)
else:
xfindstr, yfindstr = "", ""
for entry in os.scandir(path):
if entry.is_file:
if not xfindstr in entry.name or not yfindstr in entry.name:
continue
split = [_ for _ in entry.name.split(".") if len(_) > 1]
filenumber = -np.inf
for string in split:
if string[0].lower() == | |
<reponame>sandialabs/MPNN
#!/usr/bin/env python
import sys
import copy
import torch
import functools
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from .utils_gen import rel_l2, linear_transform
from .utils_nn import MLPBase
## GLOBAL variables. Not elegant but will do.
kB = 8.6173e-5 # eV/K # OR 1.380649e-23 m2kg/s2/K
a_to_m = 1.6605402e-27
#m = um * a_to_m # CO = 4.6511705e-26, H = 1.6735575e-27 # kg
h = 6.62607004 * 1.e-34 # m2 kg / s # (Planck's constant in J*s)
ev_to_j = 1.60218e-19 # J/eV
def trans_kinetic_factor(TT, um):
m = um * a_to_m
return 10**(-30) * (np.sqrt(2.0 * np.pi * m * kB * TT * ev_to_j / (h**2.0)))**3.0
def rot_kinetic_factor(TT, um, momIn, ads=None):
m = um * a_to_m
if ads=='CO':
return np.sqrt(2.0 * np.pi * momIn * kB * TT * ev_to_j) *\
np.sqrt(2.0 * np.pi * momIn * kB * TT * ev_to_j) / (h**2)
else:
print(f"Need to implement rotational kinetic factor for {ads}. Exiting.")
sys.exit()
def downselect(dists, rad_min, rad_max, yy, ymin):
nn = yy.shape[0]
indsel1 = np.arange(nn)[dists<rad_max]
indsel2 = np.arange(nn)[dists>rad_min]
indsel3 = np.arange(nn)[yy>ymin]
indsel = functools.reduce(np.intersect1d, [indsel1, indsel2, indsel3])
return indsel
class Rhombus():
def __init__(self, init, delta_x, delta_z, verbose=False):
self.setInit(init) # coordinate of lower left corner
self.delta_x = delta_x
self.delta_z = delta_z
self.delta_y = delta_x * np.sqrt(3.) / 2 # 6.576879959372833 /3.
# Transform matrix from cube to rhombus
self.transform = np.diag(np.array([self.delta_x, self.delta_y, self.delta_z]))
self.transform[1, 0] = self.delta_x / 2.
self.inv_transform = np.linalg.inv(self.transform)
if verbose:
self.printInfo()
return
def printInfo(self):
print(f"Initial point: {self.init}")
print("Transform matrix:")
print(self.transform)
print(f"Rhombus width(x) {self.delta_x}")
print(f"Rhombus height(y) {self.delta_y}")
print("Inv-Transform matrix:")
print(self.inv_transform)
def setInit(self, init):
self.init = init
def centerInit(self, x):
cc = np.array([3.* self.delta_x / 4., np.sqrt(3.)* self.delta_x / 4.,self.delta_z/2.])
self.init = (x[:3]-cc)
def toCube(self, xyz, xyfold=False):
xyz_cube = linear_transform(xyz, inshift=self.init, matrix=self.inv_transform)
if xyfold:
for j in range(2):
xyz_cube[:, j] -= np.floor(xyz_cube[:, j])
return xyz_cube
def fromCube(self, xyz):
return linear_transform(xyz, matrix=self.transform, outshift=self.init)
def mpts_toCube(self, mpts, xyfold=False):
mpts_new = []
for mpt in mpts:
center_new = mpt.center.copy()
center_new[:3] = linear_transform(mpt.center[:3].reshape(1, -1), inshift=self.init, matrix=self.inv_transform).reshape(-1,)
if xyfold:
for j in range(2):
center_new[j] -= np.floor(center_new[j])
ndim = center_new.shape[0]
transform_all = np.eye(ndim)
transform_all[:3, :3] = self.transform
hess_new = transform_all @ mpt.hess @ transform_all.T
mpts_new.append(MPoint(center_new, hess_new, mpt.yshift))
return mpts_new
def mpts_fromCube(self, mpts):
mpts_new = []
for mpt in mpts:
center_new = mpt.center.copy()
center_new[:3] = linear_transform(mpt.center[:3].reshape(1, -1), matrix=self.transform, outshift=self.init).reshape(-1,)
ndim = center_new.shape[0]
transform_all = np.eye(ndim)
transform_all[:3, :3] = self.inv_transform
hess_new = transform_all @ mpt.hess @ transform_all.T
mpts_new.append(MPoint(center_new, hess_new, mpt.yshift))
return mpts_new
def multiply_traindata(xall_cube, yall, kx=0, ky=0):
npt = yall.shape[0]
nx = 2*kx+1
ny = 2*ky+1
yall_ = np.tile(yall, (nx*ny,))
xall_cube_ = np.tile(xall_cube, (nx*ny,1))
for ix in range(0, nx):
for iy in range(0, ny):
xall_cube_[(ix*ny+iy)*npt:(ix*ny+iy+1)*npt, :2] += np.array([ix-(nx-1)//2, iy-(ny-1)//2])
return xall_cube_, yall_
class Quadratic():
def __init__(self, center, hess):
self.center = center
self.hess = hess
return
def __call__(self, x):
nsam = x.shape[0]
yy = np.empty(nsam,)
for i in range(nsam):
yy[i] = 0.5 * np.dot(x[i, :] - self.center, np.dot(self.hess, x[i, :] - self.center))
return yy
class MPoint():
def __init__(self, center, hess, yshift):
self.center = center
self.hess = hess
self.yshift = yshift
return
def __repr__(self):
rep = f"StatPoint({self.center})"
return rep
class SModel():
def __init__(self, ptmodel, mpt, rhomb, expon=True):
self.ptmodel = ptmodel
self.mpt = mpt
self.rhomb = rhomb
self.expon = expon
return
def __call__(self, x):
x_ = x.copy()
x_[:, :3] = self.rhomb.toCube(x[:, :3], xyfold=True)
mpt_ = self.rhomb.mpts_toCube([self.mpt], xyfold=True)[0]
quad = Quadratic(mpt_.center, mpt_.hess)
ypred = self.ptmodel(torch.from_numpy(x_ - mpt_.center).double()).detach().numpy().reshape(-1,)
if self.expon:
y = mpt_.yshift + quad(x_) * np.exp(ypred)
else:
y = mpt_.yshift + quad(x_) * ypred
return y
class ZModel():
def __init__(self, mpt):
self.mpt = mpt
return
def __call__(self, x):
quad = Quadratic(self.mpt.center, self.mpt.hess)
return self.mpt.yshift + quad(x)
class WFcn():
def __init__(self, mpts, eps, rhomb=None):
self.mpts = mpts
self.centers = np.array([mpt.center for mpt in self.mpts])
self.eps = eps
self.rhomb = rhomb
return
def __call__(self, x):
x_ = x.copy()
centers_ = self.centers.copy()
if self.rhomb is not None:
x_[:, :3] = self.rhomb.toCube(x[:, :3], xyfold=True)
centers_[:, :3] = self.rhomb.toCube(self.centers[:, :3], xyfold=True)
dists = cdist(x_, centers_)
scales = np.exp(-dists / self.eps)
scales /= np.sum(scales, axis=1).reshape(-1, 1)
return scales
class MultiModelTch(torch.nn.Module):
def __init__(self, models, wfcn=None, cfcn=None):
super(MultiModelTch, self).__init__()
self.models = models
self.nmod = len(self.models)
assert(wfcn is not None or cfcn is not None)
if wfcn is not None:
assert(cfcn is None)
self.wflag = True
self.wfcn = wfcn
if cfcn is not None:
assert(wfcn is None)
self.wflag = False
self.cfcn = cfcn
def set_wfcn(self, wfcn):
self.wfcn = wfcn
return
def forward(self, x):
if self.wflag:
val = self.wfcn(x)[:, 0] * self.models[0](x).reshape(-1,)
summ = self.wfcn(x)[:, 0]
for j in range(1, self.nmod):
val += self.wfcn(x)[:, j] * self.models[j](x).reshape(-1,)
summ += self.wfcn(x)[:, j]
return val / summ
else:
y = np.empty((x.shape[0]))
for j in np.unique(self.cfcn(x)):
y[self.cfcn(x) == j] = self.models[j](x[self.cfcn(x) == j, :]).reshape(-1,)
return y
class MultiModel(object):
def __init__(self, models, wfcn=None, cfcn=None):
super(MultiModel, self).__init__()
self.models = models
self.nmod = len(self.models)
assert(wfcn is not None or cfcn is not None)
if wfcn is not None:
assert(cfcn is None)
self.wflag = True
self.wfcn = wfcn
if cfcn is not None:
assert(wfcn is None)
self.wflag = False
self.cfcn = cfcn
def __call__(self, x):
if self.wflag:
val = self.wfcn(x)[:, 0] * self.models[0](x).reshape(-1,)
summ = self.wfcn(x)[:, 0]
for j in range(1, self.nmod):
val += self.wfcn(x)[:, j] * self.models[j](x).reshape(-1,)
summ += self.wfcn(x)[:, j]
return val / summ
else:
y = np.empty((x.shape[0]))
for j in np.unique(self.cfcn(x)):
y[self.cfcn(x) == j] = self.models[j](x[self.cfcn(x) == j, :]).reshape(-1,)
return y
def ifcn(x):
if len(x.shape) == 1:
x = x.reshape(-1, 1)
center = np.array([3.5])
hess = np.array([[1.0]])
yshift = 0.0
pt1 = MPoint(center, hess, yshift)
zm1 = ZModel(pt1)
center = np.array([7.5])
hess = np.array([[9.0]])
yshift = 0.0
pt2 = MPoint(center, hess, yshift)
zm2 = ZModel(pt2)
mpts = [pt1, pt2]
eps = 0.5
wfcn = WFcn(mpts, eps)
mm = MultiModel([zm1, zm2], wfcn=wfcn)
return mm(x)
##########################################################################################
##########################################################################################
##########################################################################################
class XYMap():
"""docstring for XYMap"""
def __init__(self):
return
def fwd(self, x, y):
raise NotImplementedError("Base XYMap forward call not implemented")
def inv(self, x,y):
raise NotImplementedError("Base XYMap inverse not implemented")
class Identity(XYMap):
"""docstring for Identity"""
def __init__(self):
super(Identity, self).__init__()
def fwd(self, x, y):
return x, y
def inv(self, xn, yn):
return xn, yn
class MPNNMap(XYMap):
"""docstring for MPNNMap"""
def __init__(self, mapparams):
super(MPNNMap, self).__init__()
self.center, self.hess, self.yshift = mapparams
def fwd(self, x, y):
quad = Quadratic(self.center, self.hess)
ynew = (y - self.yshift) / quad(x)
#print("AAAA ", np.min(ynew), np.min(quad(x)))
ynew = np.log(ynew)
xnew = x - self.center
return xnew, ynew
def inv(self, xnew, ynew):
quad = Quadratic(self.center, self.hess)
x = xnew + self.center
y = self.yshift + quad(x) * np.exp(ynew)
return x, y
##########################################################################################
##########################################################################################
##########################################################################################
class MPNNet(MLPBase):
def __init__(self, nnmodel, in_dim, center, hessian, yshift):
super(MPNNet, self).__init__(in_dim, 1)
self.center = center
self.hessian = hessian
self.chol = torch.linalg.cholesky(hessian).transpose(-2, -1).conj() # upper cholesky
self.yshift = yshift
self.nnmodel = nnmodel
def forward(self, x):
nx = x.shape[0]
Ux = torch.matmul(x - self.center, self.chol)
factor2 = torch.sum(Ux**2, dim=1).view(-1, 1)
#factor = torch.linalg.vector_norm(Ux, dim=1).view(-1,1)
#factor2 = factor**2
#print(x.shape, Ux.shape, factor.shape)
# return 0.5 * torch.exp(torch.sum(x-self.center,1).view(-1,1)*self.nnmodel(x-self.center)) * factor2 +self.yshift
return 0.5 * torch.exp(self.nnmodel(x - self.center)) * factor2 + self.yshift
def plot_integrand_surr(xtrn, ytrn, ytrn_pred, xtst, ytst, ytst_pred, figname=None, showtest=True):
Trange = np.arange(400, 1401, 200)
plt.figure(figsize=(16, 10))
ir = 2
ic = 3
cic = 1
for T in Trange:
beta = 1. / (kB * T)
eytrn = np.exp(-beta * ytrn)
eytrn_pred = np.exp(-beta * ytrn_pred)
eytst = np.exp(-beta * ytst)
eytst_pred = np.exp(-beta * ytst_pred)
err = rel_l2(eytst_pred, eytst)
ntr = xtrn.shape[0]
print(ntr, T, err)
nts = ytst.shape[0]
plt.subplot(ir, ic, cic)
plt.plot(eytrn, eytrn_pred, 'go', markeredgecolor='black',
label='Train N$_{trn}$ = ' + str(ntr))
if showtest:
plt.plot(eytst, eytst_pred, 'ro', markeredgecolor='black',
label='Test N$_{tst}$ = ' + str(nts))
plt.gca().set_xlabel(r'$e^{-E/kT}$', fontsize=20)
plt.gca().set_ylabel(r'$e^{-E_s/kT}$', fontsize=20)
if cic == 1 and showtest:
plt.gca().legend(fontsize=12)
plt.gca().set_title('T=' + str(T) + ' Rel. RMSE=' + "{:0.3f}".format(err), fontsize=15)
ymin = min(np.min(eytst), np.min(eytst_pred), np.min(eytrn), np.min(eytrn_pred))
ymax = max(np.min(eytst), np.max(eytst_pred), np.max(eytrn), np.max(eytrn_pred))
plt.plot([ymin, ymax], [ymin, ymax], 'k--', lw=1)
plt.gca().axis('equal')
# plt.yscale('log')
# plt.xscale('log')
# plt.gca().set_xlim([ymin, ymax])
# plt.gca().set_ylim([ymin, ymax])
#err = np.sqrt(np.mean((eytest_pred-eytest)**2) / np.mean(eytest**2))
cic += 1
if not showtest:
plt.gcf().suptitle('N$_{trn}$ = ' + str(ntr), x=0.05, y=1.0, color='g', fontsize=15)
plt.gcf().tight_layout(pad=1.5)
if figname is None:
plt.savefig('fit_integrands_N' + str(ntr) + '.png')
else:
plt.savefig(figname)
plt.clf()
def plot_xdata(xall, mpts=None, pnames=None, every=1):
ndim = xall.shape[1]
if | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.channel_v1.types import channel_partner_links
from google.cloud.channel_v1.types import customers
from google.cloud.channel_v1.types import entitlements
from google.cloud.channel_v1.types import offers
from google.cloud.channel_v1.types import products
from google.cloud.channel_v1.types import service
class ListCustomersPager:
"""A pager for iterating through ``list_customers`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListCustomersResponse` object, and
provides an ``__iter__`` method to iterate through its
``customers`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCustomers`` requests and continue to iterate
through the ``customers`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListCustomersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListCustomersResponse],
request: service.ListCustomersRequest,
response: service.ListCustomersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListCustomersRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListCustomersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListCustomersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListCustomersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[customers.Customer]:
for page in self.pages:
yield from page.customers
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCustomersAsyncPager:
"""A pager for iterating through ``list_customers`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListCustomersResponse` object, and
provides an ``__aiter__`` method to iterate through its
``customers`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCustomers`` requests and continue to iterate
through the ``customers`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListCustomersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListCustomersResponse]],
request: service.ListCustomersRequest,
response: service.ListCustomersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListCustomersRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListCustomersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListCustomersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListCustomersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[customers.Customer]:
async def async_generator():
async for page in self.pages:
for response in page.customers:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListEntitlementsPager:
"""A pager for iterating through ``list_entitlements`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListEntitlementsResponse` object, and
provides an ``__iter__`` method to iterate through its
``entitlements`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListEntitlements`` requests and continue to iterate
through the ``entitlements`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListEntitlementsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListEntitlementsResponse],
request: service.ListEntitlementsRequest,
response: service.ListEntitlementsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListEntitlementsRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListEntitlementsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListEntitlementsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListEntitlementsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[entitlements.Entitlement]:
for page in self.pages:
yield from page.entitlements
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListEntitlementsAsyncPager:
"""A pager for iterating through ``list_entitlements`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListEntitlementsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``entitlements`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListEntitlements`` requests and continue to iterate
through the ``entitlements`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListEntitlementsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListEntitlementsResponse]],
request: service.ListEntitlementsRequest,
response: service.ListEntitlementsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListEntitlementsRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListEntitlementsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListEntitlementsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListEntitlementsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[entitlements.Entitlement]:
async def async_generator():
async for page in self.pages:
for response in page.entitlements:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTransferableSkusPager:
"""A pager for iterating through ``list_transferable_skus`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListTransferableSkusResponse` object, and
provides an ``__iter__`` method to iterate through its
``transferable_skus`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTransferableSkus`` requests and continue to iterate
through the ``transferable_skus`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListTransferableSkusResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListTransferableSkusResponse],
request: service.ListTransferableSkusRequest,
response: service.ListTransferableSkusResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListTransferableSkusRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListTransferableSkusResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListTransferableSkusRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListTransferableSkusResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[entitlements.TransferableSku]:
for page in self.pages:
yield from page.transferable_skus
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTransferableSkusAsyncPager:
"""A pager for iterating through ``list_transferable_skus`` requests.
This class thinly wraps an initial
:class:`google.cloud.channel_v1.types.ListTransferableSkusResponse` object, and
provides an ``__aiter__`` method to iterate through its
``transferable_skus`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTransferableSkus`` requests and continue to iterate
through the ``transferable_skus`` field on the
corresponding responses.
All the usual :class:`google.cloud.channel_v1.types.ListTransferableSkusResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListTransferableSkusResponse]],
request: service.ListTransferableSkusRequest,
response: service.ListTransferableSkusResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.channel_v1.types.ListTransferableSkusRequest):
The initial request object.
response (google.cloud.channel_v1.types.ListTransferableSkusResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = | |
<gh_stars>0
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import time
import select
import logging
import paramiko
from steelscript.cmdline import channel
from steelscript.cmdline import exceptions
from steelscript.cmdline import sshprocess
DEFAULT_TERM_WIDTH = 80
DEFAULT_TERM_HEIGHT = 24
DEFAULT_EXPECT_TIMEOUT = 60
class SSHChannel(channel.Channel):
"""
Two-way SSH channel that allows sending and receiving data.
:param hostname: hostname, fqdn, or ip address of the target
system.
:type hostname: string
:param port: optional port for the connection. Default is 22.
:param username: account to use for authentication
:param password: <PASSWORD>
:param private_key_path: absolute system path to private key file
:param terminal: terminal emulation to use; defaults to 'console'
:param width: width (in characters) of the terminal screen;
defaults to 80
:param height: height (in characters) of the terminal screen;
defaults to 24
Both password and private_key_path may be passed, but private keys
will take precedence for authentication, with no fallback to password
attempt.
Additional arguments are accepted and ignored for compatibility
with other channel implementations.
"""
BASH_PROMPT = r'(^|\n|\r)\[\S+ \S+\]#'
DEFAULT_PORT = 22
def __init__(self, hostname, username, password=None,
private_key_path=None, port=DEFAULT_PORT,
terminal='console',
width=DEFAULT_TERM_WIDTH, height=DEFAULT_TERM_HEIGHT,
**kwargs):
self.conn_port = port
if password is None and private_key_path is None:
cause = 'Either password or path to private key must be included.'
raise exceptions.ConnectionError(cause=cause)
pkey = None
if private_key_path is not None:
with open(private_key_path, 'r') as f:
pkey = paramiko.rsakey.RSAKey.from_private_key(f)
self.sshprocess = sshprocess.SSHProcess(host=hostname,
user=username,
password=password,
private_key=pkey,
port=self.conn_port)
self._host = hostname
self._term = terminal
self._term_width = width
self._term_height = height
self.channel = None
def _verify_connected(self):
"""
Helper function that verifies the connection has been established
and that the transport object we are using is still connected.
:raises ConnectionError: if we are not connected
"""
if not self.channel:
raise exceptions.ConnectionError(
context='Channel has not been started')
if not self.sshprocess.is_connected():
raise exceptions.ConnectionError(
context='Host SSH shell has been disconnected')
def start(self, match_res=None, timeout=DEFAULT_EXPECT_TIMEOUT):
"""
Start an interactive ssh session and logs in.
:param match_res: Pattern(s) of prompts to look for.
May be a single regex string, or a list of them.
:param timeout: maximum time, in seconds, to wait for a regular
expression match. 0 to wait forever.
:return: Python :class:`re.MatchObject` containing data on
what was matched.
"""
if not match_res:
match_res = [self.BASH_PROMPT]
elif not isinstance(match_res, list) or isinstance(match_res, tuple):
match_res = [match_res, ]
if not self.sshprocess.is_connected():
# sshprocess.connect() handles the authentication / login.
self.sshprocess.connect()
# Start channel
self.channel = self.sshprocess.open_interactive_channel(
self._term, self._term_width, self._term_height)
logging.info('Interactive channel to "%s" started' % self._host)
return self.expect(match_res)[1]
def close(self):
if self.sshprocess.is_connected():
# This closes the paramiko channel's underlying transport,
# which according to the paramiko documentation closes
# all channels that were using the transport.
self.sshprocess.disconnect()
def receive_all(self):
"""
Flushes the receive buffer, returning all text that was in it.
:return: the text that was present in the receive queue, if any.
"""
self._verify_connected()
logging.debug('Receiving all data')
# Going behind Paramiko's back here; the Channel object does not have a
# function to do this, but the BufferedPipe object that it uses to
# store incoming data does. Note that this assumes stderr is redirected
# to the main recv queue.
data = self.channel.in_buffer.empty()
# Check whether need to send a window update.
ack = self.channel._check_add_window(len(data))
# The number of bytes we receive is larger than in_windows_threshold,
# send a window update. Paramiko Channel only sends window updates
# when received bytes exceed its threshold.
if ack > 0:
m = paramiko.Message()
m.add_byte(chr(paramiko.channel.MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.channel.remote_chanid)
m.add_int(ack)
self.channel.transport._send_user_message(m)
# translate return data to string
data = data.decode()
return data
def send(self, text_to_send):
"""
Sends text to the channel immediately. Does not wait for any response.
:param text_to_send: Text to send, may be an empty string.
"""
self._verify_connected()
logging.debug('Sending "%s"' % self.safe_line_feeds(text_to_send))
bytes_sent = 0
bytes_to_send = text_to_send.encode()
while bytes_sent < len(bytes_to_send):
bytes_sent_this_time = self.channel.send(
bytes_to_send[bytes_sent:]
)
if bytes_sent_this_time == 0:
raise exceptions.ConnectionError(context='Channel is closed')
bytes_sent += bytes_sent_this_time
def expect(self, match_res, timeout=DEFAULT_EXPECT_TIMEOUT):
"""
Waits for text to be received that matches one or more regex patterns.
Note that data may have been received before this call and is waiting
in the buffer; you may want to call receive_all() to flush the receive
buffer before calling send() and call this function to match the
output from your send() only.
:param match_res: Pattern(s) to look for to be considered successful.
May be a single regex string, or a list of them.
Currently cannot match multiple lines.
:param timeout: maximum time, in seconds, to wait for a regular
expression match. 0 to wait forever.
:return: ``(output, match_object)`` where output is the output of
the command (without the matched text), and match_object is a
Python :class:`re.MatchObject` containing data on what was matched.
You may use ``MatchObject.string[m.start():m.end()]`` to recover
the actual matched text, which will be unicode.
``re.MatchObject.pattern`` will contain the pattern that matched,
which will be one of the elements of match_res passed in.
:raises CmdlineTimeout: if no match found before timeout.
:raises ConnectionError: if the channel is closed.
"""
match_res, safe_match_text = self._expect_init(match_res)
received_data = ''
# Index into received_data marking the start of the first unprocessed
# line.
next_line_start = 0
starttime = time.time()
while True:
# Use select to check whether channel is ready for read.
# Reading on the channel directly would block until data is
# ready, where select blocks at most 10 seconds which allows
# us to check whether the specified timeout has been reached.
# If channel is not ready for reading within 10 seconds,
# select returns an empty list to 'readers'.
(readers, w, x) = select.select([self.channel], [], [], 10)
# Timeout if this is taking too long.
if timeout and ((time.time() - starttime) > timeout):
partial_output = repr(self.safe_line_feeds(received_data))
raise exceptions.CmdlineTimeout(command=None,
output=partial_output,
timeout=timeout,
failed_match=match_res)
new_data = None
# We did not find clear documentation in Paramiko on how to check
# whether a channel is closed unexpectedly. Our current logic is
# that a channel is closed if:
# (1) read the channel and get 0 bytes, or
# (2) channel is not ready for reading but exit_status_ready()
# Our experiments has shown that this correctly handles detecting
# if a channel has been unexpected closed.
if len(readers) > 0:
new_data = self.channel.recv(4096)
if len(new_data) == 0:
# Channel closed
raise exceptions.ConnectionError(
failed_match=match_res,
context='Channel unexpectedly closed')
# If we're still here, we have new data to process.
received_data, new_lines = self._process_data(
new_data, received_data, next_line_start)
output, match = self._match_lines(
received_data, next_line_start, new_lines, match_res)
if (output, match) != (None, None):
return output, match
# Update next_line_start to be the index of the last \n
next_line_start = received_data.rfind('\n') + 1
elif self.channel.exit_status_ready():
raise exceptions.ConnectionError(
failed_match=match_res,
context='Channel unexpectedly closed')
def _process_data(self, new_data, received_data, next_line_start):
"""
Process the new data and return updated received_data and new lines.
:param bytes new_data: The newly read data in bytes
:param str received_data: All data received before new_data, str
:param next_line_start: Where to start splitting off the new lines.
:return: A tuple of the updated received_data followed by the list
of new lines.
"""
received_data += new_data.decode()
# The CLI does some odd things, sending multiple \r's or just a
# \r, sometimes \r\r\n. To make this look like typical input, all
# the \r characters with \n near them are stripped. To make
# prompt matching easier, any \r character that does not have
# a \n near is replaced with a \n.
received_data = received_data[:next_line_start] + \
self.fixup_carriage_returns(received_data[next_line_start:])
# Take the data from next_line_start to end and split it into
# lines so we can look for a match on each one
new_lines = received_data[next_line_start:].splitlines()
return received_data, new_lines
def _match_lines(self, received_data, next_line_start,
new_lines, match_res):
"""
Examine new lines for matches against our regular expressions.
:param received_data: All data received so far, including latest.
:param new_lines: Latest data split | |
<gh_stars>0
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Extracts translations to a .po file using django's makemessages script
and additional custom support for js- and python-formatted strings.
Also supports message descriptions and meanings provided by
specially-formatted comments directly above a message to be translated.
In javascript, comments look like:
// Some other comment, must not separate i18n comments from the code
//i18n: Label for an administrative division of a country
messages.DEPARTMENT = 'Department';
In python:
# Some other comment, must not separate i18n comments from the code
#i18n: Label for an administrative division of a country
dept = _('Department')
And in a Django template:
{% comment %}
#Some other comment, must not separate i18n comments from the code
#i18n: Label for an administrative division of a country
{% endcomment %}
<span>{% trans "Department" %}</span>
Warning: This code technically also supports an i18n_meaning tag to create
msgctxt lines in the .po file, but these are not supported by the current
django version used by appengine (if msgctxt lines appear, not only are they
ignored, but they prevent the correct translation from being returned),
so they are not used.
Instead of running this script directly, use the 'extract_messages' shell
script, which sets up the PYTHONPATH and other necessary environment variables.
NOTE: Although this can be run from any directory, the filenames on the
command line must be specified relative to the app/ directory.
Example:
../tools/extract_messages ../tools/setup.py static/locale.js
"""
import codecs
import os
import re
import sys
DJANGO_END_COMMENT_PATTERN = '{\% endcomment \%}'
DJANGO_STRING_PATTERN = '''['"](.*)['"]\s*$'''
STRING_LITERAL_PATTERN = r'''\s*(["'])((\\.|[^\\])*?)\1'''
DJANGO_BIN = os.environ['APPENGINE_DIR'] + '/lib/django/django/bin'
PATTERNS = {
'js' : {
'start': r'\s*(messages\.[A-Z_1-9]+)\s*=',
'string': STRING_LITERAL_PATTERN,
'end': r';\s*$',
'description': r'^\s*//i18n:\s*(.*)',
'meaning': r'\s*//i18n_meaning:\s*(.*)'
},
'py' : {
'start': r'\s*[a-z]+_message\(', # precedes a message in setup.py
'string': r'en\s*=' + STRING_LITERAL_PATTERN,
'end': r'\),?\s*$',
'description': r'^\s*#i18n:\s*(.*)',
'meaning': r'^\s*#i18n_meaning:\s*(.*)'
},
'html': {
'description': r'^\s*#i18n:\s*(.*)',
'meaning': r'^\s*#i18n_meaning:\s*(.*)'
},
}
class Message:
""" Describes a message, with optional description and meaning"""
def __init__(self, msgid, description='', meaning='', msgstr=''):
self.msgid = msgid
self.description = description
self.meaning = meaning
self.msgstr = msgstr
def __eq__(self, other):
"""Only message and meaning factor into equality and hash."""
if not isinstance(other, type(self)):
return False
return self.msgid == other.msgid and self.meaning == other.meaning
def __hash__(self):
"""Only message and meaning factor into equality and hash."""
return hash(self.msgid) ^ hash(self.meaning)
def __cmp__(self, other):
"""Compare based on msgid."""
if type(other) is not type(self):
return NotImplemented
return cmp(self.msgid, other.msgid)
def django_makemessages():
"""Run django's makemessages routine to extract messages from python and
html files."""
if os.system(os.path.join(DJANGO_BIN, 'make-messages.py') + ' -a'):
raise SystemExit('make-messages.py failed')
def parse_django_po(po_filename):
"""Return the header from the django-generated .po file
and a dict from Message to a list of file:line_num references where that
Message was extracted"""
# Holds the header at the top of the django po file
header = ''
# A sentinel to know when to stop considering lines part of the header
header_done = False
# The return dict of Message to code ref 'file:line_num'
message_to_ref = {}
# The current file:line_num ref, which occurs on a previous line to it's
# corresponding message
current_ref = ''
# The current Message
current_msg = Message(None, None, None, None)
for line in codecs.open(po_filename, encoding='utf-8'):
if line.startswith('#:') or line.startswith('#.'):
header_done = True
if not header_done:
if line.startswith('"POT-Creation-Date'):
# The POT-Creation-Date line changes on every run to include
# the current date and time, creating unnecessary changesets.
# Skipping this line makes extract_messages idempotent.
continue
header += line
continue
line = line.strip()
if not line.strip() and current_msg.msgid:
refs = current_ref.split(' ')
if not current_msg.description and not current_msg.meaning:
(desc, meaning) = find_description_meaning(refs)
current_msg.description = desc
current_msg.meaning = meaning
if not current_msg.description:
current_msg.description = ''
if not current_msg.meaning:
current_msg.meaning = ''
message_to_ref[current_msg] = set(refs)
current_ref = ''
current_msg = Message(None, None, None, None)
elif line.startswith('#:'):
current_ref = line[3:]
elif line.startswith('#.'):
current_msg.description = line[3:]
elif line.startswith('msgstr'):
current_msg.msgstr = parse_po_tagline(line, 'msgstr')
elif current_msg.msgstr is not None:
current_msg.msgstr += parse_po_tagline(line)
elif line.startswith('msgid'):
current_msg.msgid = parse_po_tagline(line, 'msgid')
elif current_msg.msgid is not None:
current_msg.msgid += parse_po_tagline(line)
elif line.startswith('msgctxt'):
current_msg.meaning = parse_po_tagline(line, 'msgctxt')
elif current_msg.meaning is not None:
current_msg.meaning += parse_po_tagline(line)
if current_msg.msgid:
refs = current_ref.split(' ')
if not current_msg.description and not current_msg.meaning:
(desc, meaning) = find_description_meaning(refs)
current_msg.description = desc
current_msg.meaning = meaning
if not current_msg.description:
current_msg.description = ''
if not current_msg.meaning:
current_msg.meaning = ''
message_to_ref[current_msg] = set(refs)
return (header, message_to_ref)
def parse_po_tagline(line, tag=''):
"""Parses a line consisting of the given tag followed by a quoted string."""
match = re.match((tag and (tag + ' ') or '') + DJANGO_STRING_PATTERN, line)
return match and match.group(1) or ''
def find_description_meaning(refs):
"""Given a list of references (in the form "filename:line_num") to where a
message occurs, find the description and meaning in comments preceding any
occurrence of the message and returns a (description, meaning) pair.
(Horribly inefficient, but needed because django makemessages doesn't
parse them out for us.)"""
description = meaning = ''
for ref in refs:
(file, line_num) = ref.split(':')
line_num = int(line_num)
# django makemessages hacks in support for html files by appending .py
# to the end and treating them like py files. Remove that hack here.
file = file.replace('.html.py', '.html')
# Look for description/meaning patterns appropriate for the file type.
patterns = PATTERNS[file.split('.')[-1]]
# Hold the description and meaning, if we find them
description_lines = []
meaning_lines = []
# Start at the line before the message and proceed backwards.
lines = open(file).readlines()
for line in reversed(lines[:line_num - 1]):
match = re.match(patterns['description'], line)
if match:
description_lines.insert(0, match.group(1))
continue
match = re.match(patterns['meaning'], line)
if match:
meaning_lines.insert(0, match.group(1))
continue
# For html files, need to skip over the django end comment marker
# to get to the meaning lines
if re.search(DJANGO_END_COMMENT_PATTERN, line):
continue
# The line was not part of a message description or meaning comment,
# so it must not exist
break
description = description or ' '.join(description_lines)
meaning = meaning or ' '.join(meaning_lines)
return (description, meaning)
def parse_file(input_filename):
"""Parses the given file, extracting messages. Returns a list of tuples
of 'input_filename:line_number' to a tuple of
(message string, description, meaning)."""
# Patterns for the given input file
patterns = PATTERNS[input_filename.split('.')[-1]]
# The return list of pairs of ref 'file:line_num' to message
ref_msg_pairs = []
# Description lines for the current message
current_description = []
# Meaning lines for the current message
current_meaning = []
# The current message being parsed. This is a local var as the msg
# can span multiple lines.
current_message = ''
# The line number to assign to the current message, usually the first line
# of the statement containing the message.
current_message_line_num = -1
# Current line number in the input file
line_num = 0
for line in file(input_filename):
line_num += 1
match = re.match(patterns['description'], line)
if match:
current_description.append(match.group(1))
continue
match = re.match(patterns['meaning'], line)
if match:
current_meaning.append(match.group(1))
continue
if re.match(patterns['start'], line):
# Remember that we've started a message for multi-line messages
current_message_line_num = line_num
if current_message_line_num != -1:
current_message += parse_message(patterns['string'], line)
if re.search(patterns['end'], line):
# End of the current message
ref = input_filename + ':' + str(current_message_line_num)
ref_msg_pairs.append(
(ref, Message(current_message,
' '.join(current_description),
' '.join(current_meaning))))
current_message_line_num = -1
current_message = ''
current_description = []
current_meaning = []
return ref_msg_pairs
def parse_message(pattern, line):
match = re.search(pattern, line)
msg_part = ''
if match:
# Unescape the type of quote (single or double) that surrounded
# the message, then escape double-quotes, which we use to
# surround the message in the .po file
quote = match.group(1)
msg_part = match.group(2).replace('\\' + quote, quote).replace(
'"', '\\"')
return msg_part
def merge(msg_to_ref, ref_msg_pairs):
""" Merge ref_msg_pairs into msg_to_ref """
for (ref, msg) in ref_msg_pairs:
msg_to_ref.setdefault(msg, | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""abaixe.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1RsUIORt1qNzuUX7OAfeYScGkNCI7Q7zt
Abaixe - 'put down'
v.01: (Sin Thank)
_x_ right now all the reads are first processed by
_x_ check input and outputs
# the loading and post-loading stats look good - but the RPKM seems to be inflating the numbers severly.
# recoded 'Make_density.py' v3.0 (Suggest Lecture) to use TPM instead of RPKM
_x_ read depth read in from bed, bedGraph ?
python scripts/abaixe.py -depth -d depth/DGY1657_depth.tab -v -o depth/DGY1657_abs_depth.p
###python scripts/abaixe.py -depth -d depth/DGY1657_depth.tab -r temp_depth.p -v -o temp_depth_r.p
v.02 (Meaning Clearance)
_x_ add genome wide differential expression test
python scripts/abaixe.py -exp -i density/ -r depth/_abs_depth.p -o var/
_x_ make support for TLS, UTR, regions in plots
_x_ make differential expression represent replicates
_x_ correct depth of each sample versus the region
_x_ make_fake
python scripts/abaixe.py -manual -d depth/DGY1657_depth.tab -o depth/DGY1728_depth.tab
python scripts/abaixe.py -depth -d depth/DGY1728_depth.tab -v -o depth/DGY1728_abs_depth.p
v.03 (Tolerate Mold)
v.04 (Guard Valid)
_x_ Move away from gene based to location based lookup
_x_ import reads to a dictionary using chromo and location as keys
_x_ use modified make_density?
_x_ use both true reads and normalized reads
_x_ save as pickles
v.05 (List Mature)
_x_ Make depth_dict # of reads and uid_depth_dict # of uids
_x_ Make 'reads over region' and 'depth over region' analysis for the process_sam method
_x_ We need to check that the read normalization is as good or better than the fractional normalization
_x_ read in gff
_x_ produce counts
_x_ check the reason why some genes are getting dumped in the parse density log files
_x_ these are genes absent from the density_dict object
v.06 (Hall Disaster)
v.07 (Engineer Axis)
_x_ redo calculate gene expression to use uid sources
# currently uses the occupancy output generated by bedtools and gff
_x_ integrate DNA depth in reads_over_regions
_x_ verify that reads in tsv are correct.
_x_ improve heatmap plots to enable non-CNV plotting as well
_x_ with read_depth calculator - why not a heuristic cap, anything over 10 is 10, over 0.2 is 1?
_x_ maybe best implemented at the plotting level - it does make the other
v0.8 (Location Salon)
_x_ Enrichment of regulation via command line trends
_x_ Enrichment of regulation via overlap simulation
v1.0 (Communist Consensus) - Public Beta
_x_ add in strand switching ("reverse_strand") for sam file read in
_x_ Improve depth calculation by masking out "problematic" areas with wacky coverage.
_x_ low-complexity
_x_ gag-pol
_x_ updated to use the experimental object for metadata
_x_ "reverse_strand"
_x_ integrate gene_name standardizer
_x_ currently using "FeatureType_Gene_SGD_Systamtic_Standard.tsv"
incorporated into the gff_parser
___ Replace lm_over_regions with positional analysis
_x_ minimum region definition requiring 90% hit as a seed -
_x_ stich together overlapping regions
_x_ DNA depth correction
___ Seperate case handling for RNA, RPF.
___ RNA extends or reduces transcript
___ RPF extends ORF (N-terminal Isoforms) or pause sites.
___ Change experiment object to have 'DNA input file' (bedtools genomecov)
and rename 'dna_path' to 'Relative DNA file' (output of calculate_depth)
___ a "range(+1)" issue with dna depth:
if relative_depth_analysis:
if nt not in relative_depth_dict[chromo]:
print('relative_depth_dict[chromo][nt]', chromo, nt)
___ Future features:
___ One to One downsampling:
Instead of downsampling to the lowest common denominator you downsample to the compared sample
___ Depth aware sampling:
Instead of downsampling to the lowest common denomiator of the compared sample you downsample to the
expectation of the compared sample given the difference in DNA depth.
___ Returning to analysis
#RealAnalysis
___ Do we find changes in ribosome density in TLS, are these associated with changes in mORF tEff?
_!_ minor change to expRPF calculation
___ update control file / mapping file method
___ improve heatmap plots to support all chromosomes
_!_ check why 'YKR103W' DGY1728 DNA read depth is ~1
_!_ ECM1 with internal TIS, check Nedialkova for rich media
"""
"""
Import
"""
import numpy as np
import pickle
import pandas as pd
from scipy import stats
import argparse
import re
import random
'''
# Step one: Build experiment object from map file, parse input sam files
python /scratch/ps163/Carolina_03_18_2021/abaixe/abaixe.py -sam -map /scratch/ps163/Carolina_03_18_2021/metadata/map_file.tsv \
-filter /scratch/ps163/Carolina_03_18_2021/metadata/filter_regions.tab \
-o /scratch/ps163/Carolina_03_18_2021/pickles/ \
-experiment /scratch/ps163/Carolina_03_18_2021/pickles/cnv_experiment.p
# Step two: Build DNA depth object:
python /scratch/ps163/Carolina_03_18_2021/abaixe/abaixe.py -depth \
-experiment /scratch/ps163/Carolina_03_18_2021/pickles/cnv_experiment.p \
-filter /scratch/ps163/Carolina_03_18_2021/metadata/filter_regions.tab
# Step three: normalize read depth
python /scratch/ps163/Carolina_03_18_2021/abaixe/abaixe.py \
-norm \
-experiment /scratch/ps163/Carolina_03_18_2021/pickles/cnv_experiment.p \
-o pickles/
# Step four:
python /scratch/ps163/Carolina_03_18_2021/abaixe/abaixe.py -locus_depth \
--gene_systematic_name_file /scratch/ps163/Carolina_03_18_2021/metadata/FeatureType_Gene_SGD_Systamtic_Standard.tsv \
-gff /scratch/ps163/Carolina_03_18_2021/ensembl_50/Saccharomyces_cerevisiae.R64-1-1.50.gff3 \
-feature ensembl \
-experiment pickles/cnv_experiment.p -o pickles
#Step five
python /scratch/ps163/Carolina_03_18_2021/abaixe/abaixe.py -locus_expression \
--gene_systematic_name_file /scratch/ps163/Carolina_03_18_2021/metadata/metadata/FeatureType_Gene_SGD_Systamtic_Standard.tsv \
-gff /scratch/ps163/Carolina_03_18_2021/ensembl_50/Saccharomyces_cerevisiae.R64-1-1.50.gff3 \
-feature ensembl \
-experiment pickles/cnv_experiment.p -o pickles/ror
#Step six
python abaixe.py -adjust_expression \
-experiment pickles/cnv_experiment.p -o pickles/ror
step Seven
python abaixe.py -expression -experiment pickles/cnv_experiment.p \
-experiment pickles/cnv_experiment.p \
-o ror_exp_differences
step Eight
python /scratch/ps163/Carolina_03_18_2021/scripts/abaixe.py --positional_regions \
-experiment /scratch/ps163/Carolina_03_18_2021/pickles/cnv_experiment.p \
-gff /scratch/ps163/Carolina_03_18_2021/ensembl_50/Saccharomyces_cerevisiae.R64-1-1.50.gff3 \
-feature ensembl \
-o cnv_exp
python abaixe.py -expression_trends \
arg
'''
#file = open('DGY1657_abs_depth.p','rb')
#
#depth = pickle.load(file)
#
#file.close
#
#depth['XI'].keys()
parser = argparse.ArgumentParser()
#python abaixe.py -exp -i ./temp_expression_dict.p -o temp_differences
parser.add_argument('-expression',"--calculate_expression_profile", action='store_true')
parser.add_argument('-i',"--input_abundance_file")
parser.add_argument('-o',"--output_file")
parser.add_argument('-manual',"--manual_depth", action='store_true')
parser.add_argument('-depth',"--calculate_depth", action='store_true')
parser.add_argument('-d',"--input_depth_file")
parser.add_argument('-r',"--relative_depth_file")
parser.add_argument('-round',"--round_DNA_depth")
parser.add_argument('-sam', '--process_sam', action='store_true')
parser.add_argument('-map', '--map_file')
parser.add_argument('-filter', '--filter_regions')
parser.add_argument('-experiment', '--experiment_object')
#normalize parsed sam
# python abaixe.py -norm -experiment pickles/cnv_experiment.p -o pickles/
parser.add_argument('-norm', '--normalize_reads', action='store_true')
#reads_over_regions
parser.add_argument('-ror', '--reads_over_regions', action='store_true')
parser.add_argument('-gsnf', '--gene_systematic_name_file')
parser.add_argument('-locus_depth', '--locus_depth', action='store_true')
parser.add_argument('-locus_expression', '--locus_expression', action='store_true')
parser.add_argument('-adjust_expression','--adjust_expression', action='store_true')
parser.add_argument('-gff', '--gff_file_name')
parser.add_argument('-bed', '--bed_file_name')
parser.add_argument('-feature', '--feature_name')
parser.add_argument('-dor', '--depths_over_regions', action='store_true')
parser.add_argument('-pr', '--positional_regions', action='store_true')
#parser.add_argument('-v',"--verbose", action='store_true')
# python abaixe.py -norm -experiment pickles/cnv_experiment.p -o pickles/
parser.add_argument('-plot', '--plot', action='store_true')
parser.add_argument('-trends', '--expression_trends', action='store_true')
args = parser.parse_args()
'''
set globals
'''
density_dict = {}
coord_dict = {}
relative_depth_dict = {}
dna_depth = False
chromo_standardizer = {"NC_001133.9":"I", "NC_001134.8":"II", "NC_001135.5":"III",
"NC_001136.10":"IV", "NC_001137.3":"V", "NC_001138.5":"VI",
"NC_001139.9":"VII", "NC_001140.6":"VIII", "NC_001141.2":"IX",
"NC_001142.9":"X", "NC_001143.9":"XI", "NC_001144.5":"XII",
"NC_001145.3":"XIII", "NC_001146.8":"XIV", "NC_001147.6":"XV",
"NC_001148.4":"XVI", "NC_001224.1":"MT",
"chrI":"I", "chrII":"II", "chrIII":"III",
"chrIV":"IV", "chrV":"V", "chrVI":"VI",
"chrVII":"VII", "chrVIII":"VIII", "chrIX":"IX",
"chrX":"X", "chrXI":"XI", "chrXII":"XII",
"chrXIII":"XIII", "chrXIV":"XIV", "chrXV":"XV",
"chrXVI":"XVI", "chrMT":"MT",
"ChrI":"I", "ChrII":"II", "ChrIII":"III",
"ChrIV":"IV", "ChrV":"V", "ChrVI":"VI",
"ChrVII":"VII", "ChrVIII":"VIII", "ChrIX":"IX",
"ChrX":"X", "ChrXI":"XI", "ChrXII":"XII",
"ChrXIII":"XIII", "ChrXIV":"XIV", "ChrXV":"XV",
"ChrXVI":"XVI", "ChrMT":"MT",
"chrmt":"MT",
"I":"I", "II":"II", "III":"III",
"IV":"IV", "V":"V", "VI":"VI",
"VII":"VII", "VIII":"VIII", "IX":"IX",
"X":"X", "XI":"XI", "XII":"XII",
"XIII":"XIII", "XIV":"XIV", "XV":"XV",
"XVI":"XVI", "Mito":"MT",}
chromo_set = ['I','II','III','IV','V', 'VI','VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI']
#chromo_set = ['VIII']
if args.round_DNA_depth:
depth_round = int(args.round_DNA_depth)
else:
depth_round = 0
def check_for_name(xname):
global density_dict
for gene in density_dict:
if xname in density_dict[gene]:
return(True)
else:
return(False)
def gene_systemitizer(gene_lookup_name, remove_if=True):
gene_lookup_file = open(gene_lookup_name)
filter_set = set(['Dubious'])
#SGD_Primary_DBID Systematic Standard Status
gene_standard = {}
for line in gene_lookup_file:
if line[0] != '#':
line = line.strip()
process = True
sgd, systematic, standard, status = line.split('\t')
status = status.strip()
if remove_if:
if status in filter_set:
process = False
if process:
if standard == "''":
standard = sgd
if sgd not in gene_standard:
gene_standard[sgd] = systematic
else:
print('sgd name collision', line)
if systematic not in gene_standard:
gene_standard[systematic] = systematic
else:
print('systematic name collision', line)
if standard not in gene_standard:
gene_standard[standard] = systematic
else:
print('standard name collision', line)
return(gene_standard)
def get_genome_depth(infile_name, outfile_name, regions_to_filter, relative_depth_analysis, relative_depth_file):
#infile = open("DGY1657_depth.tab")
#experiment_object_name = open(args.experiment_object)
infile = open(infile_name)
genome_list = []
chromo_deets_dict = {}
ct_dict = {}
cn_dict = {}
for line in infile:
if line[0]!='#':
chromo = chromo_standardizer[line.split('\t')[0]]
nt = int(line.split('\t')[1])
process = True
if chromo in regions_to_filter:
if (nt in regions_to_filter[chromo]['+']) or (nt in regions_to_filter[chromo]['-']):
process = False
# #filter mitochondria
# if chromo == 'MT':
# process = False
#
# #filter rDNA
# if chromo == 'XII':
# if (nt > 451410) and (nt < 489500):
# process = False
if process:
count = int(line.split('\t')[2])
if chromo not in ct_dict:
print('processing ', chromo)
ct_dict[chromo] = {}
ct_dict[chromo][nt] = count
if chromo not in chromo_deets_dict:
chromo_deets_dict[chromo] = {'total':[], 'nt_list':[], 'min':1, 'max':0}
chromo_deets_dict[chromo]['total'].append(count)
chromo_deets_dict[chromo]['nt_list'].append(nt)
genome_list.append(count)
infile.close()
for chromo in chromo_deets_dict:
chromo_deets_dict[chromo]['min'] = min(chromo_deets_dict[chromo]['nt_list'])
chromo_deets_dict[chromo]['max'] = max(chromo_deets_dict[chromo]['nt_list'])
for chromo in ct_dict:
chromo_deets_dict[chromo]['median'] = np.median(chromo_deets_dict[chromo]['total'])
chromo_deets_dict[chromo]['mean'] = np.mean(chromo_deets_dict[chromo]['total'])
chromo_deets_dict[chromo]['std'] = np.std(chromo_deets_dict[chromo]['total'])
genome_median = np.median(genome_list)
if relative_depth_analysis:
file = open(relative_depth_file,'rb')
relative_depth_dict = pickle.load(file)
file.close()
for chromo in | |
import copy
import numpy as np
from .util import is_ccw
from .. import util
from .. import grouping
from .. import constants
try:
import networkx as nx
except BaseException as E:
# create a dummy module which will raise the ImportError
# or other exception only when someone tries to use networkx
from ..exceptions import ExceptionModule
nx = ExceptionModule(E)
def vertex_graph(entities):
"""
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
"""
graph = nx.Graph()
closed = []
for index, entity in enumerate(entities):
if entity.closed:
closed.append(index)
else:
graph.add_edges_from(entity.nodes,
entity_index=index)
return graph, np.array(closed)
def vertex_to_entity_path(vertex_path,
graph,
entities,
vertices=None):
"""
Convert a path of vertex indices to a path of entity indices.
Parameters
----------
vertex_path : (n,) int
Ordered list of vertex indices representing a path
graph : nx.Graph
Vertex connectivity
entities : (m,) list
Entity objects
vertices : (p, dimension) float
Vertex points in space
Returns
----------
entity_path : (q,) int
Entity indices which make up vertex_path
"""
def edge_direction(a, b):
"""
Given two edges, figure out if the first needs to be
reversed to keep the progression forward.
[1,0] [1,2] -1 1
[1,0] [2,1] -1 -1
[0,1] [1,2] 1 1
[0,1] [2,1] 1 -1
Parameters
------------
a : (2,) int
b : (2,) int
Returns
------------
a_direction : int
b_direction : int
"""
if a[0] == b[0]:
return -1, 1
elif a[0] == b[1]:
return -1, -1
elif a[1] == b[0]:
return 1, 1
elif a[1] == b[1]:
return 1, -1
else:
constants.log.debug(
'edges not connected!\n'
'vertex path %s\n'
'entity path: %s\n'
'entity[a]: %s\n'
'entity[b]: %s',
vertex_path,
entity_path,
entities[ea].points,
entities[eb].points)
return None, None
if vertices is None or vertices.shape[1] != 2:
ccw_direction = 1
else:
ccw_check = is_ccw(vertices[np.append(vertex_path,
vertex_path[0])])
ccw_direction = (ccw_check * 2) - 1
# make sure vertex path is correct type
vertex_path = np.asanyarray(vertex_path, dtype=np.int64)
# we will be saving entity indexes
entity_path = []
# loop through pairs of vertices
for i in np.arange(len(vertex_path) + 1):
# get two wrapped vertex positions
vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))
vertex_index = vertex_path[vertex_path_pos]
entity_index = graph.get_edge_data(*vertex_index)['entity_index']
entity_path.append(entity_index)
# remove duplicate entities and order CCW
entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]
# check to make sure there is more than one entity
if len(entity_path) == 1:
# apply CCW reverse in place if necessary
if ccw_direction < 0:
index = entity_path[0]
entities[index].reverse()
return entity_path
# traverse the entity path and reverse entities in place to
# align with this path ordering
round_trip = np.append(entity_path, entity_path[0])
round_trip = zip(round_trip[:-1], round_trip[1:])
for ea, eb in round_trip:
da, db = edge_direction(entities[ea].end_points,
entities[eb].end_points)
if da is not None:
entities[ea].reverse(direction=da)
entities[eb].reverse(direction=db)
entity_path = np.array(entity_path)
return entity_path
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = nx.cycles.cycle_basis(graph)
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
return entity_paths
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete
class PathSample:
def __init__(self, points):
# make sure input array is numpy
self._points = np.array(points)
# find the direction of each segment
self._vectors = np.diff(self._points, axis=0)
# find the length of each segment
self._norms = util.row_norm(self._vectors)
# unit vectors for each segment
nonzero = self._norms > constants.tol_path.zero
self._unit_vec = self._vectors.copy()
self._unit_vec[nonzero] /= self._norms[nonzero].reshape((-1, 1))
# total distance in the path
self.length = self._norms.sum()
# cumulative sum of section length
# note that this is sorted
self._cum_norm = np.cumsum(self._norms)
def sample(self, distances):
# return the indices in cum_norm that each sample would
# need to be inserted at to maintain the sorted property
positions = np.searchsorted(self._cum_norm, distances)
positions = np.clip(positions, 0, len(self._unit_vec) - 1)
offsets = np.append(0, self._cum_norm)[positions]
# the distance past the reference vertex we need to travel
projection = distances - offsets
# find out which dirction we need to project
direction = self._unit_vec[positions]
# find out which vertex we're offset from
origin = self._points[positions]
# just the parametric equation for a line
resampled = origin + (direction * projection.reshape((-1, 1)))
return resampled
def truncate(self, distance):
"""
Return a truncated version of the path.
Only one vertex (at the endpoint) will be added.
"""
position = np.searchsorted(self._cum_norm, distance)
offset = distance - self._cum_norm[position - 1]
if offset < constants.tol_path.merge:
truncated = self._points[:position + 1]
else:
vector = util.unitize(np.diff(
self._points[np.arange(2) + position],
axis=0).reshape(-1))
vector *= offset
endpoint = self._points[position] + vector
truncated = np.vstack((self._points[:position + 1],
endpoint))
assert (util.row_norm(np.diff(
truncated, axis=0)).sum() -
distance) < constants.tol_path.merge
return truncated
def resample_path(points,
count=None,
step=None,
step_round=True):
"""
Given a path along (n,d) points, resample them such that the
distance traversed along the path is constant in between each
of the resampled points. Note that this can produce clipping at
corners, as the original vertices are NOT guaranteed to be in the
new, resampled path.
ONLY ONE of count or step can be specified
Result can be uniformly distributed (np.linspace) by specifying count
Result can have a specific distance (np.arange) by specifying step
Parameters
----------
points: (n, d) float
Points in space
count : int,
Number of points to sample evenly (aka np.linspace)
step : float
Distance each step should take along the path (aka np.arange)
Returns
----------
resampled : (j,d) float
Points on the path
"""
points = np.array(points, dtype=np.float64)
# generate samples along the perimeter from kwarg count or step
if (count is not None) and (step is not None):
raise ValueError('Only step OR count can be specified')
if (count is None) and (step is None):
raise ValueError('Either step or count must be | |
\n(Horizontal × Vertical)")
self.l4a.grid(column=0, row=0, columnspan=3, ipadx=30, pady=5)
self.sizevar = StringVar()
self.r4a = Radiobutton(self.f4, text='1x1', var=self.sizevar, value='1x1', command=self.setsize)
self.r4a.grid(column=0, row=1, padx=2, pady=1)
self.r4a.select()
self.r4b = Radiobutton(self.f4, text='2x1', var=self.sizevar, value='2x1', command=self.setsize)
self.r4b.grid(column=1, row=1, padx=2, pady=1)
self.r4c = Radiobutton(self.f4, text='1x2', var=self.sizevar, value='1x2', command=self.setsize)
self.r4c.grid(column=2, row=1, padx=2, pady=1)
self.r4d = Radiobutton(self.f4, text='2x2', var=self.sizevar, value='2x2', command=self.setsize)
self.r4d.grid(column=0, row=2, padx=2, pady=1)
self.r4e = Radiobutton(self.f4, text='3x2', var=self.sizevar, value='3x2', command=self.setsize)
self.r4e.grid(column=1, row=2, padx=2, pady=1)
self.r4f = Radiobutton(self.f4, text='2x3', var=self.sizevar, value='2x3', command=self.setsize)
self.r4f.grid(column=2, row=2, padx=2, pady=1)
self.r4g = Radiobutton(self.f4, text='3x3', var=self.sizevar, value='3x3', command=self.setsize)
self.r4g.grid(column=0, row=3, padx=2, pady=1)
self.r4h = Radiobutton(self.f4, text='4x3', var=self.sizevar, value='4x3', command=self.setsize)
self.r4h.grid(column=1, row=3, padx=2, pady=1)
self.r4i = Radiobutton(self.f4, text='3x4', var=self.sizevar, value='3x4', command=self.setsize)
self.r4i.grid(column=2, row=3, padx=2, pady=1)
self.f5 = Frame(self.fright)
self.f5.pack(expand=YES, fill=BOTH, ipadx=10, ipady=10)
self.finbtn = Button(self.f5, text="Process Image", width=15, height=2, bg='#ddffbb', activebackground='#55cc11', font=('Calibri', 10, 'bold'),
command=self.processimg)
self.finbtn.pack(anchor='center', padx=20, pady=50)
self.f6 = Frame(self.fright)
self.l6a = Label(self.f6)
self.l6a.grid(column=0, row=0, padx=5, pady=5)
self.l6b = Label(self.f6)
self.l6b.grid(column=1, row=0, padx=5, pady=5)
self.l6c = Label(self.f6)
self.l6c.grid(column=2, row=0, padx=5, pady=5)
self.f7 = Frame(self.fright)
self.l7 = Label(self.f7, text="View the images in full size : ", font=('Calibri', 10, 'italic'))
self.l7.grid(column=0, row=0, columnspan=3, padx=5, pady=5)
self.b7a = Button(self.f7, text='Original\nImage', width=10, height=2, bg='#ddffff', activebackground='#77ffff', command=lambda:self._showimage(0))
self.b7a.grid(column=0, row=1, padx=5, pady=5)
self.b7b = Button(self.f7, text='Resized\nImage', width=10, height=2, bg='#ddffff', activebackground='#77ffff', command=lambda : self._showimage(1))
self.b7b.grid(column=1, row=1, padx=5, pady=5)
self.b7c = Button(self.f7, text='Converted\nImage', width=12, height=2, bg='#ddddff', activebackground='#6622bb', command=lambda : self._showimage(2))
self.b7c.grid(column=2, row=1, padx=5, pady=5)
self.l8 = Label(self.fright, text="5. Save Location", font=('Bahnschrift', 12, 'bold'), anchor='w')
self.f8 = Frame(self.fright)
self.l8a = Label(self.f8, text="Select where to write the functions : ")
self.l8a.grid(column=1, row=0, ipadx=10)
self.l8b = Label(self.f8, text='[No Folder Selected]')
self.l8b.grid(column=1, row=1)
self.b8 = Button(self.f8, text="Select", height=1, width=8, bg='#ddddff', activebackground='#6622bb', command=self.setoutdir)
self.b8.grid(column=0, row=0, padx=30, pady=5, rowspan=2)
self.l9 = Label(self.fright, text="6. Fill setting", font=('Bahnschrift', 12, 'bold'), anchor='w')
self.f9 = Frame(self.fright)
self.l9a = Label(self.f9, text="Select how to build the structure : ")
self.l9a.grid(column=0, row=0, ipadx=10, rowspan=2)
self.ftypevar = StringVar()
self.r9a = Radiobutton(self.f9, text="Keep (Fill only air blocks)", bg='#ddddff', activebackground='#6622bb',
var=self.ftypevar, value=self.FKEEP, command=self.setfilltype)
self.r9a.grid(column=1, row=0, padx=5, pady=5, ipadx=20)
self.r9a.select()
self.r9b = Radiobutton(self.f9, text="Destroy (Replace all blocks)", bg='#ddddff', activebackground='#6622bb',
var=self.ftypevar, value=self.FDEST, command = self.setfilltype)
self.r9b.grid(column=1, row=1, padx=5, pady=5, ipadx=20)
self.l10 = Label(self.fright, text="7. More Options", font=('Bahnschrift', 12, 'bold'), anchor='w')
self.f10 = Frame(self.fright)
self.saveimagevar = BooleanVar()
self.genbpackvar = BooleanVar()
self.linkposvar = BooleanVar()
self.fnfoldervar = BooleanVar()
self.c10a = Checkbutton(self.f10, text="Save processed image locally", justify=LEFT, var=self.saveimagevar)
self.c10a.grid(column=0, row=0, padx=10, pady=5, sticky='w')
self.c10b = Checkbutton(self.f10, text="Auto-generate a behavior pack", justify=LEFT, var=self.genbpackvar)
self.c10b.grid(column=0, row=1, padx=10, pady=5, sticky='w')
self.c10c = Checkbutton(self.f10, text="Link function coordinates", justify=LEFT, var=self.linkposvar)
self.c10c.grid(column=0, row=2, padx=10, pady=5, sticky='w')
self.c10c.invoke()
self.c10d = Checkbutton(self.f10, text="Create sub-folder for functions", justify=LEFT, var=self.fnfoldervar)
self.c10d.grid(column=0, row=3, padx=10, pady=5, sticky='w')
self.c10d.invoke()
self.f11 = Frame(self.fright)
self.writebtn = Button(self.f11, text="Write Functions", width=15, height=1, bg='#ddffbb', activebackground='#55cc11', font=('Calibri', 10, 'bold'),
command=self.create)
self.writebtn.pack(anchor='center', side=LEFT, padx=20, ipady=10)
self.restartbtn = Button(self.f11, text="Create New Art", width=15, height=1, bg='#dddddd', activebackground='#aaaaaa', font=('Calibri', 10, 'bold'),
command=self._restart)
def _hidewidgets2(self):
self.f5.pack(expand=YES, fill=BOTH, ipadx=10, ipady=10)
self.f6.pack_forget()
self.f7.pack_forget()
self.l8.pack_forget()
self.f8.pack_forget()
self.l9.pack_forget()
self.f9.pack_forget()
self.l10.pack_forget()
self.f10.pack_forget()
self.f11.pack_forget()
def _disableall(self):
self.b1.config(state=DISABLED)
self.r2a.config(state=DISABLED)
self.r2b.config(state=DISABLED)
self.r2c.config(state=DISABLED)
self.e3.config(state=DISABLED)
self.r4a.config(state=DISABLED)
self.r4b.config(state=DISABLED)
self.r4c.config(state=DISABLED)
self.r4d.config(state=DISABLED)
self.r4e.config(state=DISABLED)
self.r4f.config(state=DISABLED)
self.r4g.config(state=DISABLED)
self.r4h.config(state=DISABLED)
self.r4i.config(state=DISABLED)
self.b8.config(state=DISABLED)
self.r9a.config(state=DISABLED)
self.r9b.config(state=DISABLED)
self.c10a.config(state=DISABLED)
self.c10b.config(state=DISABLED)
self.writebtn.pack_forget()
def _toggledialogs(self):
self.dialogs = not self.dialogs
def openimage(self):
"""Identify the original image file for processing"""
self._hidewidgets2()
try :
fileaddr = os.path.normpath(fd.askopenfilename(title='Select image',
filetypes=[('All Images',('*.png','*.jpg','*.jpeg','*.dng','*.bmp','*.gif','*.tiff','*.tif')),
("PNG files",'*.png'),("JPEG files",('*.jpg','*.jpeg')),("DNG files",'*.dng'),
("BMP files",'*.bmp'),("GIF files",'*.gif'),("TIFF files",('*.tiff','*.tif')),
("all files",'*.*')]))
if not os.path.isfile(fileaddr):
mbx.showerror('Invalid File', "The given image address does not exist.")
return
fpath, file = os.path.split(fileaddr)
fname, fext = os.path.splitext(file)
self.ORIGFILE = fileaddr
self.ORIGEXT = fext
self.PHOTO = Img.open(fileaddr)
if self.dialogs :
mbx.showinfo('Input Image', "Selected {} as the image.".format(fileaddr))
self.l1b.config(text = file, font=('Calibri', 10, 'bold'))
except :
retry = mbx.askyesno('Error','Failed to load file. Try again ?')
if retry :
self.openimage()
else :
return
def setpalette(self):
"""Set the chosen palette and additional information depending on the choice"""
self._hidewidgets2()
p = self.ptypevar.get()
self.r2a.config(state=DISABLED)
self.r2b.config(state=DISABLED)
self.r2c.config(state=DISABLED)
if p == self.BP :
bloc = opd.askradio(self.materials, 'Select Material', "Choose material to build image out of in minecraft : ", parent=self.root)
if bloc is None :
bloc = 0
self.PALETTE = (self.BP, self.mnames[bloc])
elif p == self.EP :
try :
maxheight = abs(int(sd.askinteger('Maximum Height', "Enter the maximum height of the 3D block structure :\n\n\
Enter 0 to place no limit on maximum height (Note that the max height will not exceed 128 in any case)\n\
Giving a value from 1-127 may force some pixels on the final map to deviate from their calculated shades slightly", parent=self.root)))
if maxheight < 1 or maxheight > 128:
maxheight = 128
except TypeError :
maxheight = 128
self.PALETTE = (self.EP, maxheight)
else :
self.PALETTE = (self.FP, None)
if self.dialogs :
mbx.showinfo('Palette', "Set the colour palette to {}".format(self.PALETTE[0]))
#self.b2.grid(column=0, row=1, padx=30, pady=5, rowspan=2)
self.r2a.config(state=NORMAL)
self.r2b.config(state=NORMAL)
self.r2c.config(state=NORMAL)
def setsize(self):
"""Configures the map size variable and divides the area into 64x128 modules"""
self._hidewidgets2()
s = self.sizevar.get()
self.NUM = 2*int(s[0])*int(s[2])
if self.NUM >= 8 :
if self.dialogs :
mbx.showwarning('Warning', "The selected image size is very large and may cause lag in-game or missing chunks \
due to far corners not loading or loading very slowly. Consider disabling the 'Link function coordinates' option before finishing.")
self.SIZE = (int(s[0]), int(s[2]))
# Break the total area into vertical 64x128 rectangles, one function per chunk
self.CHUNKS = [(64*x, 128*z) for z in range(self.SIZE[1]) for x in range(2*self.SIZE[0])]
if self.dialogs :
mbx.showinfo('Map size', "Set the picture size to {}.\nThis will cover an area of {}x{} blocks and generate {} functions.".format(
self.SIZE, 128*self.SIZE[0], 128*self.SIZE[1], self.NUM))
def setfilltype(self):
"""Set how to fill blocks in the function"""
self.FILLMODE = self.ftypevar.get()
if self.dialogs :
mbx.showinfo('Fill mode selected', "The fill mode is set to '{}'".format(self.FILLMODE))
def _checkvalidname(self, event):
"""Verifies that the entered name can be used as a function name"""
name = self.e3.get()
allowed = [c for c in "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890_"]
if len(name) == 0:
mbx.showerror('No Name', "Please enter a name for the functions")
self.e3.focus_set()
return False
if name is not None and all([char in allowed for char in name]) :
self.NAME = name
return True
else :
mbx.showerror('Invalid Name', "The given name is invalid !\n It must contain only alphanumeric characters and underscore (A-Z, a-z, 0-9, _)")
self.e3.delete(0, END)
self.e3.focus_set()
return False
def processimg(self):
"""Resize the given image and convert using the specified palette.
Display all the options to view it in the window"""
if self.PHOTO is None :
mbx.showwarning('No Picture', "Please select an input image !")
return
if self.PALETTE is None :
mbx.showwarning('No Palette', "Please select a colour palette !")
return
if self.SIZE not in self.sizes or self.CHUNKS == []:
mbx.showwarning('Size Undefined', "Please select an image size !")
return
# Resize first, then quantize
try :
self.RESIZED = self.PHOTO.resize((128*self.SIZE[0],128*self.SIZE[1]), Img.NEAREST)
if self.SIZE[0]*self.SIZE[1] <= 6 :
self.RESIZEDLARGE = self.RESIZED.resize((1024*self.SIZE[0],1024*self.SIZE[1]), Img.NEAREST)
else :
self.RESIZEDLARGE = self.RESIZED.resize((512*self.SIZE[0],512*self.SIZE[1]), Img.NEAREST)
if self.PALETTE[0] == self.BP :
palet = self.BASICPALETTE
elif self.PALETTE[0] == self.FP :
palet = self.FULLPALETTE
elif self.PALETTE[0] == self.EP :
palet = self.EXTENDEDPALETTE
self.blankpaletteimg = Img.new('P', (1,1))
self.blankpaletteimg.putpalette(palet)
try :
self.PROCESSED = self.RESIZED.quantize(palette = self.blankpaletteimg)
except ValueError :
prgb = self.RESIZED.convert('RGB')
self.PROCESSED = prgb.quantize(palette = self.blankpaletteimg)
if self.SIZE[0]*self.SIZE[1] <= 6 :
self.PROCESSEDLARGE = self.PROCESSED.resize((1024*self.SIZE[0],1024*self.SIZE[1]), Img.NEAREST)
else :
self.PROCESSEDLARGE = self.PROCESSED.resize((512*self.SIZE[0],512*self.SIZE[1]), Img.NEAREST)
try :
w, h = self.PHOTO.size
if w/h >= 1 :
self.dispimg1 = ImageTk.PhotoImage(image = self.PHOTO.resize((128, int(128*h/w)), Img.NEAREST))
else :
self.dispimg1 = ImageTk.PhotoImage(image = self.PHOTO.resize((int(128*w/h), 128), Img.NEAREST))
w, h = self.PROCESSED.size
if w/h >= 1 :
self.dispimg2 = ImageTk.PhotoImage(image = self.PROCESSED.resize((128, int(128*h/w)), Img.NEAREST))
else :
self.dispimg2 = ImageTk.PhotoImage(image = self.PROCESSED.resize((int(128*w/h), 128), Img.NEAREST))
except :
pass
except :
mbx.showerror('Error', "An unknown processing error occurred.")
return
self.f5.pack_forget()
self.f6.pack()
self.f6.after(200, self._animateimages, 1)
def _animateimages(self, stage=1):
"""Animates a loading arrow before displaying the images in the tk window"""
if stage == 1 :
self.l6a.config(image = self.dispimg1)
self.l6a.image = self.dispimg1
self.l6b.config(image = self.arrow1)
self.l6b.image = self.arrow1
self.f6.after(200, self._animateimages, 2)
elif stage == 2 :
self.l6b.config(image = self.arrow2)
self.l6b.image = self.arrow2
self.f6.after(200, self._animateimages, 3)
elif stage == 3 :
self.l6b.config(image = self.arrow3)
self.l6b.image = self.arrow3
self.f6.after(200, self._animateimages, 4)
elif | |
<filename>spex60/core.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import numpy as np
import astropy.units as u
from astropy.io import ascii, fits
from .config import config
__all__ = ['SpeX', 'Prism60']
class Calibration:
def __init__(self):
calpath = os.path.join(
config['spextool_path'], 'instruments', 'uspex', 'data')
self.bpm = fits.getdata(os.path.join(calpath, 'uSpeX_bdpxmk.fits'))
self.lincoeff = fits.getdata(
os.path.join(calpath, 'uSpeX_lincorr.fits'))
fn = os.path.join(calpath, 'uSpeX_bias.fits')
self.bias = fits.getdata(fn) / SpeX.read_header(fn)['DIVISOR']
self.linecal = fits.getdata(
os.path.join(calpath, 'Prism_LineCal.fits'))
# Prism_LineCal header is bad and will throw a warning
self.linecal_header = SpeX.read_header(
os.path.join(calpath, 'Prism_LineCal.fits'))
self.lines = ascii.read(os.path.join(calpath, 'lines.dat'),
names=('wave', 'type'))
class SpeX:
"""Basic SpeX file IO."""
def __init__(self, *args, **kwargs):
self.cal = Calibration()
self.mask = ~self.cal.bpm.astype(bool)
self.mask[:, :config['x range'][0]] = 1
self.mask[:, config['x range'][1]:] = 1
self.mask[:config['bottom']] = 1
self.mask[config['top']:] = 1
self.flat = None
self.flat_var = None
self.flat_h = None
def read(self, files, pair=False, ampcor=True, lincor=True, flatcor=True,
abba_test=True):
"""Read uSpeX files.
Parameters
----------
files : string or list
A file name or list thereof.
pair : bool, optional
Assume the observations are taken in AB(BA) mode and return
A-B for each pair.
ampcor : bool optional
Set to `True` to apply the amplifcation noise correction.
lincor : bool, optional
Set to `True` to apply the linearity correction.
flatcor : bool, optional
Set to `True` to apply flat field correction.
abba_test : bool, optional
Set to `True` to test for AB(BA) ordering when `pair` is
`True`. If `abba_test` is `False`, then the file order is
not checked.
Returns
-------
stack : MaskedArray
The resultant image(s). [counts / s]
var : MaskedArray
The variance. [total DN]
headers : list or astropy FITS header
If `pair` is `True`, the headers will be a list of lists,
where each element is a list containing the A and B headers.
"""
from numpy.ma import MaskedArray
if isinstance(files, (list, tuple)):
print('Loading {} files.'.format(len(files)))
stack = MaskedArray(np.empty((len(files), 2048, 2048)))
var = MaskedArray(np.empty((len(files), 2048, 2048)))
headers = []
for i in range(len(files)):
kwargs = dict(pair=False, ampcor=ampcor, lincor=lincor,
flatcor=flatcor)
stack[i], var[i], h = self.read(files[i], **kwargs)
headers.append(h)
if pair:
print('\nAB(BA) pairing and subtracting.')
a = np.flatnonzero(
np.array([h['BEAM'] == 'A' for h in headers]))
b = np.flatnonzero(
np.array([h['BEAM'] == 'B' for h in headers]))
if abba_test:
# require equal numbers of a and b
if len(a) != len(b):
raise ValueError('Number of A beams not equal to'
' number of B beams')
# each A-B pair should be number neighbors
for i, j in zip(a, b):
if abs(i - j) != 1:
raise ValueError('Found invalid A-B pair: '
+ headers[i]['IRAFNAME'] + ' '
+ headers[j]['IRAFNAME'])
stack_AB = []
var_AB = []
headers_AB = []
for i, j in zip(a, b):
stack_AB.append(stack[i] - stack[j])
var_AB.append(var[i] + var[j])
headers_AB.append([headers[i], headers[j]])
stack_AB = np.ma.MaskedArray(stack_AB)
var_AB = np.ma.MaskedArray(var_AB)
return stack_AB, var_AB, headers_AB
return stack, var, headers
print('Reading {}'.format(files))
data = fits.open(files, lazy_load_hdus=False)
data[0].verify('silentfix')
# check if already processed
if 'SPEX60' in data[0].header:
mask = data['mask'].astype(bool)
im = np.ma.MaskedArray(data['sci'].data, mask=mask)
var = data['var'].data
if 'b header' in data:
h = [data['sci'].header, data['b header'].header]
else:
h = data['sci'].header
data.close()
return im, var, h
h = data[0].header.copy()
read_var = (2 * config['readnoise']**2
/ h['NDR']
/ h['CO_ADDS']
/ h['ITIME']**2
/ config['gain']**2)
# TABLE_SE is read time, not sure what crtn is.
crtn = (1 - h['TABLE_SE'] * (h['NDR'] - 1)
/ 3.0 / h['ITIME'] / h['NDR'])
t_exp = h['ITIME'] * h['CO_ADDS']
im_p = data[1].data / h['DIVISOR']
im_s = data[2].data / h['DIVISOR']
data.close()
mask_p = im_p < (self.cal.bias - config['lincor max'])
mask_s = im_s < (self.cal.bias - config['lincor max'])
mask = mask_p + mask_s
h.add_history('Masked saturated pixels.')
im = MaskedArray(im_p - im_s, mask)
if ampcor:
im = self._ampcor(im)
h.add_history('Corrected for amplifier noise.')
if lincor:
cor = self._lincor(im)
cor[mask] = 1.0
cor[:4] = 1.0
cor[:, :4] = 1.0
cor[2044:] = 1.0
cor[:, 2044:] = 1.0
im /= cor
h.add_history('Applied linearity correction.')
if flatcor:
if self.flat is None:
raise ValueError(
"Flat correction requested but flat not loaded.")
im /= self.flat
h.add_history('Flat corrected.')
# total DN
var = (np.abs(im * h['DIVISOR'])
* crtn
/ h['CO_ADDS']**2
/ h['ITIME']**2
/ config['gain']
+ read_var) # / h['DIVISOR']**2 / h['ITIME']**2
# counts / s
im = im / h['ITIME']
im.mask += self.mask
return im, var, h
def find_numbered(self, files, numbered=None, between=None, **kwargs):
"""Read from list based on observation number.
Requires a file name format that ends with "N.a.fits" or
"N.b.fits" where N is an integer.
Parameters
----------
files : list
List of file names to consider.
numbered : list
List of observation numbers to read.
between : list
Read files with observation number starting with
``min(between)`` and ending with ``max(between)``
(inclusive). May be a list of lists for multiple sets.
**kwargs
Keyword arguments to pass to ``SpeX.read``.
"""
def number(files):
pat = re.compile('([0-9]+)\.[ab]\.fits$')
for f in files:
m = re.findall(pat, f)
if m:
yield int(m[0]), f
def find_between(files, between):
read_list = []
between = min(between), max(between)
for n, f in number(files):
if n >= between[0] and n <= between[1]:
read_list.append(f)
return read_list
read_list = []
if numbered is not None:
for n, f in number(files):
if n in numbered:
read_list.append(f)
elif between is not None:
if isinstance(between[0], (list, tuple)):
for b in between:
read_list += find_between(files, b)
else:
read_list += find_between(files, between)
else:
raise ValueError(
'One of ``numbered`` or ``between`` must be provided.')
return read_list
def read_numbered(self, files, numbered=None, between=None, **kwargs):
"""Read from list based on observation number.
Requires a file name format that ends with "N.a.fits" or
"N.b.fits" where N is an integer.
Parameters
----------
files : list
List of file names to consider.
numbered : list
List of observation numbers to read.
between : list
Read files with observation number starting with
``min(between)`` and ending with ``max(between)``
(inclusive). May be a list of lists for multiple sets.
**kwargs
Keyword arguments to pass to ``SpeX.read``.
"""
read_list = self.find_numbered(
files, numbered=numbered, between=between)
return self.read(read_list, **kwargs)
@classmethod
def read_header(cls, filename, ext=0):
"""Read a header from a SpeX FITS file.
SpeX headers tend to be missing quotes around strings. The
header will be silently fixed.
"""
inf = fits.open(filename)
inf[0].verify('silentfix')
h = inf[0].header.copy()
inf.close()
return h
def _combine(self, method, stack, variances, headers, scale=False,
**kwargs):
"""Internal combine function for spectra or images.
Parameters
----------
method : string
median or mean
stack : MaskedArray
variances : MaskedArray
headers : list
From ``SpeX.read()``.
**kwargs
``sigma_clip`` keyword arguments.
Returns
-------
data : MaskedArray
var : MaskedArray
Combined data and variance.
header : list or astropy.fits.Header
Annotated header(s), based on the first item in the stack.
Notes
-----
ITIME and HISTORY are updated in the header.
"""
from astropy.stats import sigma_clip
if isinstance(stack, (list, tuple)):
clip = sigma_clip(stack, **kwargs)
else:
clip = sigma_clip(stack, axis=0, **kwargs)
if method == 'median':
data = np.ma.median(clip, 0)
var = np.ma.median(variances, 0)
elif method == 'mean':
data = np.ma.mean(clip, 0)
var = np.ma.mean(variances, 0)
else:
raise ValueError('Unknown combine value: ' + method)
header = headers[0]
if isinstance(header, list):
for i in range(2):
header[i]['ITIME'] = sum([h[i]['ITIME'] for h in headers])
files = [h[i]['IRAFNAME'] for h in headers]
header[i].add_history(
method.capitalize() +
' combined files: ' +
','.join(files))
else:
header['ITIME'] = sum([h['ITIME'] for h in headers])
files = [h['IRAFNAME'] for h in headers]
header.add_history(
method.capitalize() +
' combined files: ' +
','.join(files))
return data, var, header
def median_combine(self, stack, variances, headers, scale=False,
**kwargs):
"""Sigma-clipped median combine a set of SpeX images or spectra.
Parameters
----------
stack : MaskedArray
variances : MaskedArray
headers : list
From ``SpeX.read()``.
**kwargs
``sigma_clip`` keyword arguments.
Returns
-------
data : MaskedArray
var : MaskedArray
Combined data and variance.
header : list or astropy.fits.Header
Annotated header(s), based on the first item in the stack.
Notes
-----
ITIME and HISTORY are updated in the header.
"""
return self._combine('median', stack, variances, headers,
scale=scale, **kwargs)
def mean_combine(self, stack, variances, headers, scale=False,
**kwargs):
"""Sigma-clipped mean combine a set of SpeX images or spectra.
Parameters
----------
stack : MaskedArray
variances : MaskedArray
headers : list
From ``SpeX.read()``.
**kwargs
``sigma_clip`` keyword arguments.
| |
<gh_stars>0
from . import Math
import bpy
class BezierPoint:
@staticmethod
def FromBlenderBezierPoint(blenderBezierPoint):
return BezierPoint(blenderBezierPoint.handle_left, blenderBezierPoint.co, blenderBezierPoint.handle_right)
def __init__(self, handle_left, co, handle_right):
self.handle_left = handle_left
self.co = co
self.handle_right = handle_right
def Copy(self):
return BezierPoint(self.handle_left.copy(), self.co.copy(), self.handle_right.copy())
def Reversed(self):
return BezierPoint(self.handle_right, self.co, self.handle_left)
def Reverse(self):
tmp = self.handle_left
self.handle_left = self.handle_right
self.handle_right = tmp
class BezierSegment:
@staticmethod
def FromBlenderBezierPoints(blenderBezierPoint1, blenderBezierPoint2):
bp1 = BezierPoint.FromBlenderBezierPoint(blenderBezierPoint1)
bp2 = BezierPoint.FromBlenderBezierPoint(blenderBezierPoint2)
return BezierSegment(bp1, bp2)
def Copy(self):
return BezierSegment(self.bezierPoint1.Copy(), self.bezierPoint2.Copy())
def Reversed(self):
return BezierSegment(self.bezierPoint2.Reversed(), self.bezierPoint1.Reversed())
def Reverse(self):
# make a copy, otherwise neighboring segment may be affected
tmp = self.bezierPoint1.Copy()
self.bezierPoint1 = self.bezierPoint2.Copy()
self.bezierPoint2 = tmp
self.bezierPoint1.Reverse()
self.bezierPoint2.Reverse()
def __init__(self, bezierPoint1, bezierPoint2):
# bpy.types.BezierSplinePoint
# ## NOTE/TIP: copy() helps with repeated (intersection) action -- ??
self.bezierPoint1 = bezierPoint1.Copy()
self.bezierPoint2 = bezierPoint2.Copy()
self.ctrlPnt0 = self.bezierPoint1.co
self.ctrlPnt1 = self.bezierPoint1.handle_right
self.ctrlPnt2 = self.bezierPoint2.handle_left
self.ctrlPnt3 = self.bezierPoint2.co
self.coeff0 = self.ctrlPnt0
self.coeff1 = self.ctrlPnt0 * (-3.0) + self.ctrlPnt1 * (+3.0)
self.coeff2 = self.ctrlPnt0 * (+3.0) + self.ctrlPnt1 * (-6.0) + self.ctrlPnt2 * (+3.0)
self.coeff3 = self.ctrlPnt0 * (-1.0) + self.ctrlPnt1 * (+3.0) + self.ctrlPnt2 * (-3.0) + self.ctrlPnt3
def CalcPoint(self, parameter = 0.5):
parameter2 = parameter * parameter
parameter3 = parameter * parameter2
rvPoint = self.coeff0 + self.coeff1 * parameter + self.coeff2 * parameter2 + self.coeff3 * parameter3
return rvPoint
def CalcDerivative(self, parameter = 0.5):
parameter2 = parameter * parameter
rvPoint = self.coeff1 + self.coeff2 * parameter * 2.0 + self.coeff3 * parameter2 * 3.0
return rvPoint
def CalcLength(self, nrSamples = 2):
nrSamplesFloat = float(nrSamples)
rvLength = 0.0
for iSample in range(nrSamples):
par1 = float(iSample) / nrSamplesFloat
par2 = float(iSample + 1) / nrSamplesFloat
point1 = self.CalcPoint(parameter = par1)
point2 = self.CalcPoint(parameter = par2)
diff12 = point1 - point2
rvLength += diff12.magnitude
return rvLength
#http://en.wikipedia.org/wiki/De_Casteljau's_algorithm
def CalcSplitPoint(self, parameter = 0.5):
par1min = 1.0 - parameter
bez00 = self.ctrlPnt0
bez01 = self.ctrlPnt1
bez02 = self.ctrlPnt2
bez03 = self.ctrlPnt3
bez10 = bez00 * par1min + bez01 * parameter
bez11 = bez01 * par1min + bez02 * parameter
bez12 = bez02 * par1min + bez03 * parameter
bez20 = bez10 * par1min + bez11 * parameter
bez21 = bez11 * par1min + bez12 * parameter
bez30 = bez20 * par1min + bez21 * parameter
bezPoint1 = BezierPoint(self.bezierPoint1.handle_left, bez00, bez10)
bezPointNew = BezierPoint(bez20, bez30, bez21)
bezPoint2 = BezierPoint(bez12, bez03, self.bezierPoint2.handle_right)
return [bezPoint1, bezPointNew, bezPoint2]
class BezierSpline:
@staticmethod
def FromSegments(listSegments):
rvSpline = BezierSpline(None)
rvSpline.segments = listSegments
return rvSpline
def __init__(self, blenderBezierSpline):
if not blenderBezierSpline is None:
if blenderBezierSpline.type != 'BEZIER':
print("## ERROR:", "blenderBezierSpline.type != 'BEZIER'")
raise Exception("blenderBezierSpline.type != 'BEZIER'")
if len(blenderBezierSpline.bezier_points) < 1:
if not blenderBezierSpline.use_cyclic_u:
print("## ERROR:", "len(blenderBezierSpline.bezier_points) < 1")
raise Exception("len(blenderBezierSpline.bezier_points) < 1")
self.bezierSpline = blenderBezierSpline
self.resolution = 12
self.isCyclic = False
if not self.bezierSpline is None:
self.resolution = self.bezierSpline.resolution_u
self.isCyclic = self.bezierSpline.use_cyclic_u
self.segments = self.SetupSegments()
def __getattr__(self, attrName):
if attrName == "nrSegments":
return len(self.segments)
if attrName == "bezierPoints":
rvList = []
for seg in self.segments: rvList.append(seg.bezierPoint1)
if not self.isCyclic: rvList.append(self.segments[-1].bezierPoint2)
return rvList
if attrName == "resolutionPerSegment":
try: rvResPS = int(self.resolution / self.nrSegments)
except: rvResPS = 2
if rvResPS < 2: rvResPS = 2
return rvResPS
if attrName == "length":
return self.CalcLength()
return None
def SetupSegments(self):
rvSegments = []
if self.bezierSpline is None: return rvSegments
nrBezierPoints = len(self.bezierSpline.bezier_points)
for iBezierPoint in range(nrBezierPoints - 1):
bezierPoint1 = self.bezierSpline.bezier_points[iBezierPoint]
bezierPoint2 = self.bezierSpline.bezier_points[iBezierPoint + 1]
rvSegments.append(BezierSegment.FromBlenderBezierPoints(bezierPoint1, bezierPoint2))
if self.isCyclic:
bezierPoint1 = self.bezierSpline.bezier_points[-1]
bezierPoint2 = self.bezierSpline.bezier_points[0]
rvSegments.append(BezierSegment.FromBlenderBezierPoints(bezierPoint1, bezierPoint2))
return rvSegments
def UpdateSegments(self, newSegments):
prevNrSegments = len(self.segments)
diffNrSegments = len(newSegments) - prevNrSegments
if diffNrSegments > 0:
newBezierPoints = []
for segment in newSegments: newBezierPoints.append(segment.bezierPoint1)
if not self.isCyclic: newBezierPoints.append(newSegments[-1].bezierPoint2)
self.bezierSpline.bezier_points.add(diffNrSegments)
for i, bezPoint in enumerate(newBezierPoints):
blBezPoint = self.bezierSpline.bezier_points[i]
blBezPoint.tilt = 0
blBezPoint.radius = 1.0
blBezPoint.handle_left_type = 'FREE'
blBezPoint.handle_left = bezPoint.handle_left
blBezPoint.co = bezPoint.co
blBezPoint.handle_right_type = 'FREE'
blBezPoint.handle_right = bezPoint.handle_right
self.segments = newSegments
else:
print("### WARNING: UpdateSegments(): not diffNrSegments > 0")
def Reversed(self):
revSegments = []
for iSeg in reversed(range(self.nrSegments)): revSegments.append(self.segments[iSeg].Reversed())
rvSpline = BezierSpline.FromSegments(revSegments)
rvSpline.resolution = self.resolution
rvSpline.isCyclic = self.isCyclic
return rvSpline
def Reverse(self):
revSegments = []
for iSeg in reversed(range(self.nrSegments)):
self.segments[iSeg].Reverse()
revSegments.append(self.segments[iSeg])
self.segments = revSegments
def CalcDivideResolution(self, segment, parameter):
if not segment in self.segments:
print("### WARNING: InsertPoint(): not segment in self.segments")
return None
iSeg = self.segments.index(segment)
dPar = 1.0 / self.nrSegments
splinePar = dPar * (parameter + float(iSeg))
res1 = int(splinePar * self.resolution)
if res1 < 2:
print("### WARNING: CalcDivideResolution(): res1 < 2 -- res1: %d" % res1, "-- setting it to 2")
res1 = 2
res2 = int((1.0 - splinePar) * self.resolution)
if res2 < 2:
print("### WARNING: CalcDivideResolution(): res2 < 2 -- res2: %d" % res2, "-- setting it to 2")
res2 = 2
return [res1, res2]
# return [self.resolution, self.resolution]
def CalcPoint(self, parameter):
nrSegs = self.nrSegments
segmentIndex = int(nrSegs * parameter)
if segmentIndex < 0: segmentIndex = 0
if segmentIndex > (nrSegs - 1): segmentIndex = nrSegs - 1
segmentParameter = nrSegs * parameter - segmentIndex
if segmentParameter < 0.0: segmentParameter = 0.0
if segmentParameter > 1.0: segmentParameter = 1.0
return self.segments[segmentIndex].CalcPoint(parameter = segmentParameter)
def CalcDerivative(self, parameter):
nrSegs = self.nrSegments
segmentIndex = int(nrSegs * parameter)
if segmentIndex < 0: segmentIndex = 0
if segmentIndex > (nrSegs - 1): segmentIndex = nrSegs - 1
segmentParameter = nrSegs * parameter - segmentIndex
if segmentParameter < 0.0: segmentParameter = 0.0
if segmentParameter > 1.0: segmentParameter = 1.0
return self.segments[segmentIndex].CalcDerivative(parameter = segmentParameter)
def InsertPoint(self, segment, parameter):
if not segment in self.segments:
print("### WARNING: InsertPoint(): not segment in self.segments")
return
iSeg = self.segments.index(segment)
nrSegments = len(self.segments)
splitPoints = segment.CalcSplitPoint(parameter = parameter)
bezPoint1 = splitPoints[0]
bezPointNew = splitPoints[1]
bezPoint2 = splitPoints[2]
segment.bezierPoint1.handle_right = bezPoint1.handle_right
segment.bezierPoint2 = bezPointNew
if iSeg < (nrSegments - 1):
nextSeg = self.segments[iSeg + 1]
nextSeg.bezierPoint1.handle_left = bezPoint2.handle_left
else:
if self.isCyclic:
nextSeg = self.segments[0]
nextSeg.bezierPoint1.handle_left = bezPoint2.handle_left
newSeg = BezierSegment(bezPointNew, bezPoint2)
self.segments.insert(iSeg + 1, newSeg)
def Split(self, segment, parameter):
if not segment in self.segments:
print("### WARNING: InsertPoint(): not segment in self.segments")
return None
iSeg = self.segments.index(segment)
nrSegments = len(self.segments)
splitPoints = segment.CalcSplitPoint(parameter = parameter)
bezPoint1 = splitPoints[0]
bezPointNew = splitPoints[1]
bezPoint2 = splitPoints[2]
newSpline1Segments = []
for iSeg1 in range(iSeg): newSpline1Segments.append(self.segments[iSeg1])
if len(newSpline1Segments) > 0: newSpline1Segments[-1].bezierPoint2.handle_right = bezPoint1.handle_right
newSpline1Segments.append(BezierSegment(bezPoint1, bezPointNew))
newSpline2Segments = []
newSpline2Segments.append(BezierSegment(bezPointNew, bezPoint2))
for iSeg2 in range(iSeg + 1, nrSegments): newSpline2Segments.append(self.segments[iSeg2])
if len(newSpline2Segments) > 1: newSpline2Segments[1].bezierPoint1.handle_left = newSpline2Segments[0].bezierPoint2.handle_left
newSpline1 = BezierSpline.FromSegments(newSpline1Segments)
newSpline2 = BezierSpline.FromSegments(newSpline2Segments)
return [newSpline1, newSpline2]
def Join(self, spline2, mode = 'At midpoint'):
if mode == 'At midpoint':
self.JoinAtMidpoint(spline2)
return
if mode == 'Insert segment':
self.JoinInsertSegment(spline2)
return
print("### ERROR: Join(): unknown mode:", mode)
def JoinAtMidpoint(self, spline2):
bezPoint1 = self.segments[-1].bezierPoint2
bezPoint2 = spline2.segments[0].bezierPoint1
mpHandleLeft = bezPoint1.handle_left.copy()
mpCo = (bezPoint1.co + bezPoint2.co) * 0.5
mpHandleRight = bezPoint2.handle_right.copy()
mpBezPoint = BezierPoint(mpHandleLeft, mpCo, mpHandleRight)
self.segments[-1].bezierPoint2 = mpBezPoint
spline2.segments[0].bezierPoint1 = mpBezPoint
for seg2 in spline2.segments: self.segments.append(seg2)
self.resolution += spline2.resolution
self.isCyclic = False # is this ok?
def JoinInsertSegment(self, spline2):
self.segments.append(BezierSegment(self.segments[-1].bezierPoint2, spline2.segments[0].bezierPoint1))
for seg2 in spline2.segments: self.segments.append(seg2)
self.resolution += spline2.resolution # extra segment will usually be short -- impact on resolution negligable
self.isCyclic = False # is this ok?
def RefreshInScene(self):
bezierPoints = self.bezierPoints
currNrBezierPoints = len(self.bezierSpline.bezier_points)
diffNrBezierPoints = len(bezierPoints) - currNrBezierPoints
if diffNrBezierPoints > 0: self.bezierSpline.bezier_points.add(diffNrBezierPoints)
for i, bezPoint in enumerate(bezierPoints):
blBezPoint = self.bezierSpline.bezier_points[i]
blBezPoint.tilt = 0
blBezPoint.radius = 1.0
blBezPoint.handle_left_type = 'FREE'
blBezPoint.handle_left = bezPoint.handle_left
blBezPoint.co = bezPoint.co
| |
<filename>lexpredict_openedgar/openedgar/tasks.py<gh_stars>0
"""
MIT License
Copyright (c) 2018 ContraxSuite, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Libraries
import datetime
import hashlib
import logging
import tempfile
import os
import pathlib
from typing import Iterable, Union
import pandas
from lxml.html import parse
import urllib
import re
import numpy
# Packages
import dateutil.parser
import django.db.utils
from celery import shared_task
# Project
from config.settings.base import S3_DOCUMENT_PATH
from openedgar.clients.s3 import S3Client
from openedgar.clients.local import LocalClient
import openedgar.clients.edgar
import openedgar.parsers.edgar
from openedgar.models import Filing, CompanyInfo, Company, FilingDocument, SearchQuery, SearchQueryTerm, \
SearchQueryResult, FilingIndex, TableBookmark
# LexNLP imports
import lexnlp.nlp.en.tokens
# Logging setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
CLIENT_TYPE = "LOCAL_CLIENT"
LOCAL_DOCUMENT_PATH = os.environ["DOWNLOAD_PATH"]
DOCUMENT_PATH = ""
if CLIENT_TYPE == "S3":
client = S3Client()
DOCUMENT_PATH = S3_DOCUMENT_PATH
else:
client = LocalClient()
DOCUMENT_PATH = LOCAL_DOCUMENT_PATH
def process_company_filings(client_type: str, cik: str, store_raw: bool = False, store_text: bool = False):
"""
Process a filing index from an S3 path or buffer.
:param file_path: S3 or local path to process; if filing_index_buffer is none, retrieved from here
:param filing_index_buffer: buffer; if not present, s3_path must be set
:param form_type_list: optional list of form type to process
:param store_raw:
:param store_text:
:return:
"""
# Log entry
logger.info("Processing company cik {0}...".format(cik))
# Get path to filings folder for cik
cik_path = openedgar.clients.edgar.get_cik_path(cik)
links = links_10k(cik)
if client_type == "S3":
client = S3Client()
else:
client = LocalClient()
# Iterate through links
bad_record_count = 0
for row in links:
# Cleanup path
if row.lower().startswith("data/"):
filing_path = "edgar/{0}".format(row)
elif row.lower().startswith("edgar/"):
filing_path = row
# Check if filing record exists
try:
filing = Filing.objects.get(s3_path=filing_path)
logger.info("Filing record already exists: {0}".format(filing))
except Filing.MultipleObjectsReturned as e:
# Create new filing record
logger.error("Multiple Filing records found for s3_path={0}, skipping...".format(filing_path))
logger.info("Raw exception: {0}".format(e))
continue
except Filing.DoesNotExist as f:
# Create new filing record
logger.info("No Filing record found for {0}, creating...".format(filing_path))
logger.info("Raw exception: {0}".format(f))
# Check if exists; download and upload to S3 if missing
if not client.path_exists(filing_path):
# Download
try:
filing_buffer, _ = openedgar.clients.edgar.get_buffer("/Archives/{0}".format(filing_path))
except RuntimeError as g:
logger.error("Unable to access resource {0} from EDGAR: {1}".format(filing_path, g))
bad_record_count += 1
#create_filing_error(row, filing_path)
continue
# Upload
client.put_buffer(filing_path, filing_buffer)
logger.info("Downloaded from EDGAR and uploaded to {}...".format(client_type))
else:
# Download
logger.info("File already stored on {}, retrieving and processing...".format(client_type))
filing_buffer = client.get_buffer(filing_path)
# Parse
filing_result = process_filing(client, filing_path, filing_buffer, store_raw=store_raw, store_text=store_text)
if filing_result is None:
logger.error("Unable to process filing.")
bad_record_count += 1
#create_filing_error(row, filing_path)
def bulk_create_bookmarks(filename, label):
data_file = pandas.read_csv(filename)
data_file = data_file.to_dict("records")
for item in data_file:
label = label
start_index = item["table_index"]
end_index = None
filing = Filing.objects.get(id=item["id"])
existing = filing.tablebookmark_set.filter(label=label)
if existing:
tb = existing.first()
tb.start_index = start_index
tb.end_index = None
tb.save()
else:
filing.tablebookmark_set.create(label = label, start_index=start_index, end_index=end_index)
def bulk_create_bookmarks2(filename):
data_file = pandas.read_csv(filename).fillna("")
data_file = data_file.to_dict("records")
for item in data_file:
filing = Filing.objects.get(id=item["id"])
for key, value in item.items():
if not key == "id":
if value:
if "-" in str(value):
split_value = str(value).split("-")
start_index = int(split_value[0])
end_index = int(split_value[1])
else:
start_index = int(value)
end_index = None
existing = filing.tablebookmark_set.filter(label=key)
if existing:
tb=existing.first()
tb.start_index = start_index
tb.end_index = end_index
tb.save()
# print("existing: ", key, start_index, end_index)
else:
# print(key, start_index, end_index)
filing.tablebookmark_set.create(label = key, start_index=start_index, end_index=end_index)
def create_bookmarks(filing, file):
data_file = pandas.read_csv(file)
data_file = data_file.where(pandas.notnull(data_file), None)
data_file = data_file.to_dict("records")
filing = filing
for item in data_file:
label = item["label"]
start_index = item["start_index"]
end_index = None
existing = filing.tablebookmark_set.filter(label=label)
if existing:
existing.first()
tb = existing.first()
tb.start_index = start_index
tb.end_index = end_index
tb.save()
else:
filing.tablebookmark_set.create(label = label, start_index=start_index, end_index=end_index)
def write_all_comp_diluted_eps():
comps = Company.objects.filter(cik__in=downloaded_companies())
fname = "/storage/openedgar_eps.csv"
done_comps = pandas.read_csv(fname)["cik"]
comps = comps.exclude(cik__in=done_comps)
for n in comps:
print("###############################")
try:
data = n.full_company_data()
except:
continue
company = None
if data is not None and not data.empty:
company = openedgar.parsers.a.CompanyCSV(data)
if company:
cols = pandas.Index(range(1990, 2021))
cols = cols.insert(0, "sic")
cols = cols.insert(0, "cik")
master_df = pandas.DataFrame([], columns = cols)
new_df = (company.print_stats())
if new_df is not None and not new_df.empty:
c_name = new_df['company_name'].iat[0]
sic = new_df['sic'].iat[0]
cik = new_df['cik'].iat[0]
just_values_dates = new_df.sort_values("date")[['date', 'value']]
horizontal = just_values_dates.T
horizontal.columns = horizontal.loc["date", :]
new_df = horizontal.drop("date").rename({"value": c_name})
new_df.insert(0, "sic", sic)
new_df.insert(0, "cik", cik)
master_df = pandas.concat([master_df, new_df])
if os.path.isfile(fname):
print("path exists")
master_df.to_csv(fname, mode="a", header=False)
else:
print("path does not exist")
master_df.to_csv(fname, mode="w", header=True)
def links_10k(cik):
url = "https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK={0}&type=10-K&dateb=&owner=include&count=100".format(cik)
print("links url: {0}".format(url))
parsed = parse(urllib.request.urlopen(url))
doc = parsed.getroot()
print("parsed doc root tag: {0}".format(doc.tag))
links = doc.xpath("//a[@id='documentsbutton']")
link_strings = []
for e in links:
link_strings.append(e.get("href"))
accession = []
for l in link_strings:
new_string = l
new_string = re.sub(r'-index.htm[l]*', '.txt', new_string)
new_string = re.sub(r'/Archives/', "", new_string)
accession.append(new_string)
return accession
def download_10ks(cik):
print("downloading 10-k accession numbers")
accession_nums = links_10k(cik)
for n in accession_nums:
print("Creating: {}".format(n))
create_filing(cik, "10-K", n)
def downloaded_companies():
companies = [yo.company.cik for yo in Filing.objects.filter(form_type="10-K").filter(is_processed=True).select_related("company")]
companies = list(set(companies))
return companies
def download_all():
ciks = pandas.read_csv("/storage/fidelity_all_stocks_cik_and_ticker.csv")
unique_jack = downloaded_companies()
cik_filter = ciks['cik'].isin(unique_jack)
new_ciks = ciks[~cik_filter]
for c in new_ciks['cik']:
try:
x = Company.objects.get(cik=c).filing_set.filter(form_type="10-K").order_by("-date_filed").first().is_processed
if not x:
print("staring download for cik: {}".format(c))
download_10ks(c)
except:
print("staring download for cik: {}".format(c))
download_10ks(c)
def create_filing(cik, form_type, filing_path):
row = {
"CIK": cik,
"Form Type": form_type,
"File Name": filing_path,
"Company Name": "ERROR",
"Date Filed": "19000101"
}
# Check if exists; download and upload to S3 if missing
if not client.path_exists(filing_path):
# Download
try:
filing_buffer, _ = openedgar.clients.edgar.get_buffer("/Archives/{0}".format(filing_path))
except RuntimeError as g:
logger.error("Unable to access resource {0} from EDGAR: {1}".format(filing_path, g))
create_filing_error(row, filing_path)
# Upload
client.put_buffer(filing_path, filing_buffer)
logger.info("Downloaded from EDGAR and uploaded to {}...".format(CLIENT_TYPE))
else:
# Download
logger.info("File already stored on {}, retrieving and processing...".format(CLIENT_TYPE))
filing_buffer = client.get_buffer(filing_path)
filing_result = process_filing(client, filing_path, filing_buffer, store_raw=False, store_text=False)
if filing_result is None:
logger.error("Unable to process filing.")
create_filing_error(row, filing_path)
def uploading_text_in_filing_documents(store_raw: False, store_text: True):
client=LocalClient()
processed_filings = Filing.objects.filter(is_processed=True)
for filing in processed_filings:
buffer_data = client.get_buffer(filing.s3_path)
logger.info("parsing id# {0} s3_path: {1}".format(filing.id, filing.s3_path))
filing_data = openedgar.parsers.edgar.parse_filing(buffer_data, extract=True)
filing_documents = filing.filingdocument_set.all()
logger.info("number of FilingDocument objects calculated: {0}".format(len(filing_documents)) )
documents_data = filing_data["documents"]
logger.info("number of documents coming from data stream: {0}".format(len(documents_data)) )
# Iterate through documents
for document in filing_documents:
logger.info("WE'RE IN!!!!!!!!!!!!!!!!!!!!!")
filing_data = None
for d in documents_data:
logger.info("documents_data sequence: {0} type: {1}".format(d["sequence"], type(d["sequence"])))
logger.info("FilingDocument sequence: {0} type: {1}".format(document.sequence,type(document.sequence)))
if int(d["sequence"]) == document.sequence:
logger.info("YAY")
filing_data = d
if filing_data is not None:
# Upload text to S3 if requested
if store_text and filing_data["content_text"] is not None:
raw_path = pathlib.Path(DOCUMENT_PATH, "text", filing_data["sha1"]).as_posix()
if not client.path_exists(raw_path):
client.put_buffer(raw_path, filing_data["content_text"], write_bytes=False)
logger.info("Uploaded text contents for filing={0}, sequence={1}, sha1={2}"
.format(filing, filing_data["sequence"], filing_data["sha1"]))
else:
logger.info("Text contents for filing={0}, sequence={1}, sha1={2} already exists on S3"
.format(filing, filing_data["sequence"], filing_data["sha1"]))
else:
document.is_processed = False
document.is_error = True
document.save()
def create_filing_documents(client, documents, filing, store_raw: bool = False, store_text: bool = False):
"""
Create filing document records given a list of documents
and a filing record.
:param documents: list of documents from parse_filing
:param filing: Filing record
:param store_raw: whether to store raw contents
:param store_text: whether to store text contents
:return:
"""
# Iterate through documents
document_records = []
for document in documents:
# Create DB object
filing_doc = FilingDocument()
filing_doc.filing = filing
filing_doc.type = document["type"]
filing_doc.sequence = document["sequence"]
filing_doc.file_name = document["file_name"]
filing_doc.content_type = document["content_type"]
filing_doc.description = document["description"]
filing_doc.sha1 = document["sha1"]
filing_doc.start_pos = document["start_pos"]
filing_doc.end_pos = document["end_pos"]
filing_doc.is_processed = True
filing_doc.is_error = len(document["content"]) > 0
document_records.append(filing_doc)
# Upload raw if requested
if store_raw and len(document["content"]) > 0:
raw_path = pathlib.Path(DOCUMENT_PATH, "raw", document["sha1"]).as_posix()
if not client.path_exists(raw_path):
client.put_buffer(raw_path, document["content"])
logger.info("Uploaded raw file for filing={0}, sequence={1}, | |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(MincoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work | |
# Circular planar piston
# Evaluate the acoustic field generated by a circular planar piston
# Collocation ("discrete dipole") approximation of the volume integral
# equation for 3D acoustic scattering
import os
import sys
from IPython import embed
# FIXME: figure out how to avoid this sys.path stuff
sys.path.append(os.path.join(os.path.dirname(__file__),'../../'))
import numpy as np
# from piston import plane_circular_piston
from vines.geometry.geometry import generatedomain
from vines.fields.piston import plane_circular_piston
k1 = (4225.410428500058 + 0.02498j)
pressure_surface = 4*10**6
radius = 0.005
# n_elements = 2**12
# radius = 0.02 # 7.5cm
# aperture_radius = 0.00 # 2cm
# focal_length = 0.03 # 13cm
# focus = [0., 0., 0.]
lam = 2 * np.pi / np.real(k1)
print('lam = ', lam)
def attenuation(f0):
alpha = 0.217 * (f0 * 1e-6)**2
return alpha
k1 = np.real(k1) + 1j * attenuation(1e6)
c0 = 1487.0 # wavespeed
# p0 = 1.35e4 # initial pressure amplitude
# p0 = 4.41e6
beta = 3.5e0
rho = 998
nPerLam = 5 # number of voxels per interior wavelength
res = lam / nPerLam
# Dimension of computation domain
# x_start = -0.98 * np.sqrt(focal_length**2 - radius**2) # just to the right of bowl
# x_end = 0.01
# wx = x_end - x_start
# wy = 2 * 0.83 * radius # slightly narrower than bowl
# # wy = 2 * radius # same width as bowl
# wz = wy
x_start = 0.0005
x_end = 0.06
wx = x_end - x_start
wy = 2 * radius
wz = wy
# embed()
import time
start = time.time()
r, L, M, N = generatedomain(res, wx, wy, wz)
# Adjust r
r[:, :, :, 0] = r[:, :, :, 0] - r[0, 0, 0, 0] + x_start
end = time.time()
print('Mesh generation time:', end-start)
# embed()
points = r.reshape(L*M*N, 3, order='F')
start = time.time()
p = plane_circular_piston(radius, k1, points.T)
p *= pressure_surface * 2j * k1
P = p.reshape(L, M, N, order='F')
end = time.time()
print('Incident field evaluation time (s):', end-start)
# Array to be populated with different harmonics evaluated on central axis
ny_centre = np.int(np.floor(M/2))
nz_centre = np.int(np.floor(N/2))
harmonics = np.zeros((4, L), dtype=np.complex128)
# First harmonic (i.e., incident field)
harmonics[0, :] = P[:, ny_centre, nz_centre]
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 22})
# plt.rc('font', family='serif')
fig = plt.figure(figsize=(15, 5))
ax = fig.gca()
plt.imshow(np.real(P[:, :, np.int(np.floor(N/2))].T),
cmap=plt.cm.get_cmap('viridis'), interpolation='spline16')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.colorbar()
fig.savefig('VIE_bowl_transducer_1p5cm.png')
plt.close()
# embed()
# Generate volume potential operator for 2xk
dx = r[1, 0, 0, 0] - r[0, 0, 0, 0]
vol = (dx)**3 # voxel volume
a = (3/4 * vol / np.pi)**(1/3) # radius of sphere of same volume
# ################# SECOND HARMONIC ########################
# # ko = 2 * k1
# ko = 2 * np.real(k1) + 1j * attenuation(2e6)
# # Create Toeplitz operator
# R0 = r[0, 0, 0, :]
# self = (1/ko**2 - 1j*a/ko) * np.exp(1j*ko*a) - 1/ko**2
# nearby_quad = 'off'
# n_quad = 10
# xG, wG = np.polynomial.legendre.leggauss(n_quad)
# XG, YG, ZG = np.meshgrid(xG, xG, xG)
# XW, YW, ZW = np.meshgrid(wG*0.5, wG*0.5, wG*0.5)
# # from operators import potential_fast
# from numba import jit, njit, prange
# @njit(parallel=True)
# def potential_fast(ko):
# toep = np.zeros((L, M, N), dtype=np.complex128)
# for i in prange(0, L):
# for j in range(0, M):
# for k in range(0, N):
# R1 = r[i,j,k,:]
# rk_to_rj = R1-R0
# rjk = np.linalg.norm(rk_to_rj)
# if nearby_quad in 'on':
# if rjk < 5 * dx and rjk > 1e-15:
# x_grid = R1[0] + dx/2 * XG
# y_grid = R1[1] + dx/2 * YG
# z_grid = R1[2] + dx/2 * ZG
# temp = 0.0+0.0j
# for iQ in range(0, n_quad):
# for jQ in range(0, n_quad):
# for kQ in range(0, n_quad):
# RQ = np.array([x_grid[iQ, jQ, kQ],
# y_grid[iQ, jQ, kQ],z_grid[iQ, jQ, kQ]])
# rk_to_rj = RQ - R0
# rjk = np.linalg.norm(rk_to_rj)
# rjk_hat = rk_to_rj / rjk
# rjkrjk = np.outer(rjk_hat, rjk_hat)
# Ajk = np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# # Draine & Flatau
# temp = temp + Ajk * XW[iQ, jQ, kQ] * YW[iQ, jQ, kQ] * ZW[iQ, jQ, kQ]
# # from IPython import embed; embed()
# toep[i, j, k] = temp
# else:
# if np.abs(rjk) > 1e-15:
# toep[i, j, k] = \
# np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# else:
# toep[i, j, k] = self
# else:
# if np.abs(rjk) > 1e-15:
# toep[i, j, k] = \
# np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# else:
# toep[i, j, k] = self
# return toep
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# # toep = ko**2 * toep
# toep = toep
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# from matvecs_acoustic import mvp_vec
# xIn = np.zeros((L, M, N), dtype=np.complex128)
# xIn = P
# xInVec = xIn.reshape((L*M*N, 1), order='F')
# idx = np.ones((L, M, N), dtype=bool)
# mvp = lambda x: mvp_vec(x, circ_op, idx, Mr)
# # Voxel permittivities
# Mr = np.ones((L, M, N), dtype=np.complex128)
# start = time.time()
# xOut = mvp(2 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# P2 = xOut.reshape(L, M, N, order='F')
# #-------------- Third harmonic ----------------#
# # Create volume potential to evaluate next harmonic
# # ko = 3 * k1
# ko = 3 * np.real(k1) + 1j * attenuation(3e6)
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# xIn = np.zeros((L, M, N), dtype=np.complex128)
# f_rhs = P * P2
# xInVec = f_rhs.reshape((L*M*N, 1), order='F')
# start = time.time()
# xOut = mvp(9 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# P3 = xOut.reshape(L, M, N, order='F')
# total = P + P2 + P3
# # Plot harmonics along central axis
# x_line = (r[:, ny_centre, nz_centre, 0]) * 100
# fig = plt.figure(figsize=(14, 8))
# ax = fig.gca()
# plt.plot(x_line, np.abs(P[:, ny_centre, nz_centre])/1e6,'k-', linewidth=2)
# plt.plot(x_line, np.abs(P2[:, ny_centre, nz_centre])/1e6,'r-', linewidth=2)
# plt.plot(x_line, np.abs(P3[:, ny_centre, nz_centre])/1e6,'b-', linewidth=2)
# plt.plot(x_line, np.abs(total[:, ny_centre, nz_centre])/1e6,'g-', linewidth=2)
# plt.grid(True)
# # plt.xlim([1, 7])
# # plt.ylim([0, 9])
# plt.xlabel(r'Axial distance (cm)')
# plt.ylabel(r'Pressure (MPa)')
# fig.savefig('images/VIE_piston_harms_axis.png')
# plt.close()
# exit(1)
# # Right-hand side for computation of next harmonic
# f_rhs = P2 * P2 + 2 * P * P3
# # Plot f_rhs
# rel_p = np.log10(np.abs(f_rhs) / np.max(np.abs(f_rhs)))
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# matplotlib.rcParams.update({'font.size': 26})
# # matplotlib.rc('font',**{'family':'serif','serif':['Palatino']})
# # plt.rc('text', usetex=True)
# fig = plt.figure(figsize=(14, 8))
# ax = fig.gca()
# CS = plt.contourf(rel_p[:, :, np.int(np.round(N/2))].T,
# extent=[x_start, x_end, -wy/2, wy/2],
# levels=[-4, -3.5, -2.5, -2.0, -1.5, -1, -0.5, 0],
# cmap=plt.cm.viridis,
# extend='both')
# cbar = plt.colorbar(CS)
# CS.cmap.set_under('black')
# CS.changed()
# cbar.ax.set_ylabel('log$_{10}(|f_4|$/max$|f_4|)$')
# # labels
# plt.ylabel('$y$ (cm)')
# plt.xlabel('$z$ (cm)')
# fig.savefig('images/test_p4.png', dpi=300)
# plt.close()
# ################### P4 computation ######################
# # Create volume potential to evaluate next harmonic
# ko = 4 * k1
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# # For chopping of domain where f_rhs below a certain size
# rel_p = np.log10(np.abs(f_rhs)/np.max(np.abs(f_rhs)))
# # TOL = np.array([-0.5, -1, -1.5, -2, -2.5, -3, -3.5, -4])
# TOL = np.array([-0.5, -0.75, -1, -1.25, -1.5, -1.75, -2, -2.25, -2.5, -2.75,
# -3, -3.25, -3.5, -3.75, -4])
# line_harmonic = np.zeros((TOL.shape[0], L), dtype=np.complex128)
# xMinVals = np.zeros(TOL.shape[0])
# xMaxVals = np.zeros(TOL.shape[0])
# yMinVals = np.zeros(TOL.shape[0])
# yMaxVals = np.zeros(TOL.shape[0])
# for i_tol in range(TOL.shape[0]):
# where_bigger = np.argwhere(rel_p > TOL[i_tol])
# min_x_idx = np.min(where_bigger[:, 0])
# max_x_idx = np.max(where_bigger[:, 0])
# min_y_idx = np.min(where_bigger[:, 1])
# max_y_idx = np.max(where_bigger[:, 1])
# min_z_idx = np.min(where_bigger[:, 2])
# max_z_idx = np.max(where_bigger[:, 2])
# xMinVals[i_tol] = r[min_x_idx,0,0,0]
# xMaxVals[i_tol] = r[max_x_idx,0,0,0]
# yMinVals[i_tol] = r[0,min_y_idx,0,1]
# yMaxVals[i_tol] = r[0,max_y_idx,0,1]
# print('Size x = ', (max_x_idx-min_x_idx)*dx)
# print('Size y,z = ', (max_y_idx-min_y_idx)*dx)
# P_trim = np.zeros((L, M, N), dtype=np.complex128)
# P_trim[min_x_idx:max_x_idx, min_y_idx:max_y_idx, min_z_idx:max_z_idx] = \
# f_rhs[min_x_idx:max_x_idx, min_y_idx:max_y_idx, min_z_idx:max_z_idx]
# xIn = P_trim
# xInVec = xIn.reshape((L*M*N, 1), order='F')
# idx = np.ones((L, M, N), dtype=bool)
# mvp = lambda x: mvp_vec(x, circ_op, idx, Mr)
# # Voxel permittivities
# Mr = np.ones((L, M, N), dtype=np.complex128)
# start = time.time()
# xOut = mvp(8 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# field = | |
<gh_stars>0
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pandas as pd # type: ignore[import]
from datetime import datetime, timedelta, timezone
from pyspark.sql import DataFrame, SparkSession, functions as funcs
from typing import Any, Dict, List, Optional, Tuple
from auto_tracking import auto_tracking
from ptesting import github_utils
def _setup_logger() -> Any:
from logging import getLogger, NullHandler, INFO
logger = getLogger(__name__)
logger.setLevel(INFO)
logger.addHandler(NullHandler())
return logger
_logger = _setup_logger()
def _to_datetime(d: str, fmt: str) -> datetime:
return datetime.strptime(d, fmt).replace(tzinfo=timezone.utc) # type: ignore
def _create_func_to_enrich_authors(spark: SparkSession,
contributor_stats: Optional[List[Tuple[str, str]]],
input_col: str) -> Tuple[Any, List[str]]:
if not contributor_stats:
return lambda df: df.withColumn('num_commits', funcs.expr('0')), []
contributor_stat_df = spark.createDataFrame(contributor_stats, schema=f'author: string, num_commits: int')
@auto_tracking
def enrich_authors(df: DataFrame) -> DataFrame:
return df.join(contributor_stat_df, df[input_col] == contributor_stat_df.author, 'LEFT_OUTER') \
.na.fill({'num_commits': 0})
return enrich_authors, [input_col]
def _create_func_to_enrich_files(spark: SparkSession,
commits: List[datetime],
updated_file_stats: Dict[str, List[Tuple[str, str, str, str]]],
input_commit_date: str,
input_filenames: str) -> Tuple[Any, List[str]]:
broadcasted_updated_file_stats = spark.sparkContext.broadcast(updated_file_stats)
broadcasted_commits = spark.sparkContext.broadcast(commits)
@auto_tracking
def enrich_files(df: DataFrame) -> DataFrame:
@funcs.pandas_udf("string") # type: ignore
def _enrich_files(dates: pd.Series, filenames: pd.Series) -> pd.Series:
updated_file_stats = broadcasted_updated_file_stats.value
commits = broadcasted_commits.value
ret = []
for commit_date, files in zip(dates, filenames):
base_date = _to_datetime(commit_date, '%Y/%m/%d %H:%M:%S')
is_updated_in_days = lambda interval, date: \
base_date - timedelta(interval) <= date and base_date >= date
def is_updated_in_commits(num_commits: int, date: Any) -> bool:
cur_pos = 0
while cur_pos < len(commits):
if commits[cur_pos] <= base_date:
target_pos = cur_pos + min([num_commits, len(commits) - cur_pos - 1])
return commits[target_pos] <= date and commits[cur_pos] >= date
cur_pos += 1
return False
# Time-dependent features
updated_num_3d = 0
updated_num_14d = 0
updated_num_56d = 0
# Commit-dependent features
updated_num_3c = 0
updated_num_14c = 0
updated_num_56c = 0
for file in json.loads(files):
if file in updated_file_stats:
for update_date, _, _, _ in updated_file_stats[file]:
udate = github_utils.from_github_datetime(update_date)
if is_updated_in_days(3, udate):
updated_num_3d += 1
if is_updated_in_days(14, udate):
updated_num_14d += 1
if is_updated_in_days(56, udate):
updated_num_56d += 1
if is_updated_in_commits(3, udate):
updated_num_3c += 1
if is_updated_in_commits(14, udate):
updated_num_14c += 1
if is_updated_in_commits(56, udate):
updated_num_56c += 1
ret.append(json.dumps({
'n3d': updated_num_3d,
'n14d': updated_num_14d,
'n56d': updated_num_56d,
'n3c': updated_num_3c,
'n14c': updated_num_14c,
'n56c': updated_num_56c
}))
return pd.Series(ret)
enrich_files_expr = _enrich_files(funcs.expr(input_commit_date), funcs.expr(f'to_json({input_filenames})'))
enrich_files_schema = 'struct<n3d: int, n14d: int, n56d: int, n3c: int, n14c: int, n56c: int>'
return df.withColumn('updated_file_stats', enrich_files_expr) \
.withColumn('ufs', funcs.expr(f'from_json(updated_file_stats, "{enrich_files_schema}")')) \
.withColumn('updated_num_3d', funcs.expr('ufs.n3d')) \
.withColumn('updated_num_14d', funcs.expr('ufs.n14d')) \
.withColumn('updated_num_56d', funcs.expr('ufs.n56d')) \
.withColumn('updated_num_3c', funcs.expr('ufs.n3c')) \
.withColumn('updated_num_14c', funcs.expr('ufs.n14c')) \
.withColumn('updated_num_56c', funcs.expr('ufs.n56c'))
return enrich_files, [input_commit_date, input_filenames]
def _create_func_to_enumerate_related_tests(spark: SparkSession,
dep_graph: Dict[str, List[str]],
corr_map: Dict[str, List[str]],
test_files: Dict[str, str],
included_tests: List[str],
input_files: str,
depth: int) -> Tuple[Any, List[str]]:
broadcasted_dep_graph = spark.sparkContext.broadcast(dep_graph)
broadcasted_corr_map = spark.sparkContext.broadcast(corr_map)
broadcasted_test_files = spark.sparkContext.broadcast(test_files)
broadcasted_included_tests = spark.sparkContext.broadcast(included_tests)
# This method lists up related tests by using two relations as follows:
# - File correlation in commits: if files were merged in a single commit, classes in the files are assumed
# to have correlated between each other.
# - Control flow graph: if a method in a class A calls a method in a class B, the class A depends on the class B.
# Since various factors (e.g., class hierarchy and runtime reflection) can affect which methods are called
# in a class, it is hard to analyze control flow precisely. Therefore, we analyze it in a coarse-grain way;
# if a class file A contains a JVM opcode 'invoke' for a class B, the class A is assumed
# to depend on the class B.
@auto_tracking
def enumerate_related_tests(df: DataFrame) -> DataFrame:
@funcs.pandas_udf("string") # type: ignore
def _enumerate_tests(file_paths: pd.Series) -> pd.Series:
# TODO: Removes package-depenent stuffs
import spark_utils
parse_path = spark_utils.create_func_to_transform_path_to_qualified_name()
dep_graph = broadcasted_dep_graph.value
corr_map = broadcasted_corr_map.value
test_files = broadcasted_test_files.value
included_tests = broadcasted_included_tests.value
def _enumerate_tests_from_dep_graph(target): # type: ignore
subgraph = {}
visited_nodes = set()
keys = list([target])
for i in range(0, depth):
if len(keys) == 0:
break
next_keys = set()
for key in keys:
if key in dep_graph and key not in visited_nodes:
nodes = dep_graph[key]
next_keys.update(nodes)
visited_nodes.update(keys)
keys = list(next_keys)
if keys is not None:
visited_nodes.update(keys)
tests = list(filter(lambda n: n.endswith('Suite'), visited_nodes))
return tests
ret = []
for file_path in file_paths:
correlated_files = corr_map[file_path] if file_path in corr_map else []
related_tests = set()
related_tests.update(list(filter(lambda f: f in test_files, correlated_files)))
related_tests.update(included_tests)
if file_path:
result = parse_path(file_path)
if result:
dependant_tests = _enumerate_tests_from_dep_graph(result)
related_tests.update(dependant_tests)
ret.append(json.dumps({'tests': list(related_tests)}))
return pd.Series(ret)
related_test_df = df.selectExpr('sha', f'explode_outer({input_files}) filename') \
.withColumn('tests', _enumerate_tests(funcs.expr('filename'))) \
.selectExpr('sha', 'from_json(tests, "tests ARRAY<STRING>").tests tests') \
.selectExpr('sha', 'explode_outer(tests) test') \
.groupBy('sha') \
.agg(funcs.expr(f'collect_set(test) tests')) \
.selectExpr('sha', 'size(tests) target_card', f'tests related_tests') \
.where(f'related_tests IS NOT NULL')
return df.join(related_test_df, 'sha', 'LEFT_OUTER')
return enumerate_related_tests, ['sha', 'files']
def _create_func_to_enumerate_all_tests(spark: SparkSession, test_files: Dict[str, str]) -> Tuple[Any, List[str]]:
all_test_df = spark.createDataFrame(list(test_files.items()), ['test', 'path'])
@auto_tracking
def enumerate_all_tests(df: DataFrame) -> DataFrame:
return df.join(all_test_df.selectExpr(f'collect_set(test) all_tests')) \
.withColumn('target_card', funcs.expr(f'size(all_tests)'))
return enumerate_all_tests, []
def _create_func_to_enrich_tests(spark: SparkSession,
commits: List[datetime],
failed_tests: Dict[str, List[str]],
input_commit_date: str,
input_test: str) -> Tuple[Any, List[str]]:
broadcasted_failed_tests = spark.sparkContext.broadcast(failed_tests)
broadcasted_commits = spark.sparkContext.broadcast(commits)
@auto_tracking
def enrich_tests(df: DataFrame) -> DataFrame:
@funcs.pandas_udf("string") # type: ignore
def _enrich_tests(dates: pd.Series, tests: pd.Series) -> pd.Series:
failed_tests = broadcasted_failed_tests.value
commits = broadcasted_commits.value
ret = []
for commit_date, test in zip(dates, tests):
base_date = _to_datetime(commit_date, '%Y/%m/%d %H:%M:%S')
failed_in_days = lambda interval, date: \
base_date - timedelta(interval) <= date and base_date >= date
def failed_in_commits(num_commits: int, date: Any) -> bool:
cur_pos = 0
while cur_pos < len(commits):
if commits[cur_pos] <= base_date:
target_pos = cur_pos + min([num_commits, len(commits) - cur_pos - 1])
return commits[target_pos] <= date and commits[cur_pos] >= date
cur_pos += 1
return False
# Time-dependent features
failed_num_7d = 0
failed_num_14d = 0
failed_num_28d = 0
# Commit-dependent features
failed_num_7c = 0
failed_num_14c = 0
failed_num_28c = 0
total_failed_num = 0
if test in failed_tests:
for failed_date in failed_tests[test]:
failed_date = _to_datetime(failed_date, '%Y/%m/%d %H:%M:%S') # type: ignore
if failed_in_days(7, failed_date):
failed_num_7d += 1
if failed_in_days(14, failed_date):
failed_num_14d += 1
if failed_in_days(28, failed_date):
failed_num_28d += 1
if failed_in_commits(7, failed_date):
failed_num_7c += 1
if failed_in_commits(14, failed_date):
failed_num_14c += 1
if failed_in_commits(28, failed_date):
failed_num_28c += 1
total_failed_num += 1
ret.append(json.dumps({
'n7d': failed_num_7d,
'n14d': failed_num_14d,
'n28d': failed_num_28d,
'n7c': failed_num_7c,
'n14c': failed_num_14c,
'n28c': failed_num_28c,
'total': total_failed_num
}))
return pd.Series(ret)
enrich_tests_expr = _enrich_tests(funcs.expr(input_commit_date), funcs.expr(input_test))
failed_test_stats_schema = 'struct<n7d: int, n14d: int, n28d: int, n7c: int, n14c: int, n28c: int, total: int>'
return df.withColumn('failed_test_stats', enrich_tests_expr) \
.withColumn('fts', funcs.expr(f'from_json(failed_test_stats, "{failed_test_stats_schema}")')) \
.withColumn('failed_num_7d', funcs.expr('fts.n7d')) \
.withColumn('failed_num_14d', funcs.expr('fts.n14d')) \
.withColumn('failed_num_28d', funcs.expr('fts.n28d')) \
.withColumn('failed_num_7c', funcs.expr('fts.n7c')) \
.withColumn('failed_num_14c', funcs.expr('fts.n14c')) \
.withColumn('failed_num_28c', funcs.expr('fts.n28c')) \
.withColumn('total_failed_num', funcs.expr('fts.total'))
return enrich_tests, [input_commit_date, input_test]
def _create_func_to_compute_distances(spark: SparkSession,
dep_graph: Dict[str, List[str]], test_files: Dict[str, str],
input_files: str,
input_test: str) -> Tuple[Any, List[str]]:
broadcasted_dep_graph = spark.sparkContext.broadcast(dep_graph)
broadcasted_test_files = spark.sparkContext.broadcast(test_files)
@funcs.pandas_udf("int") # type: ignore
def _compute_path_diff(filenames: pd.Series, test: pd.Series) -> pd.Series:
# TODO: Removes package-depenent stuffs
import spark_utils
compute_path_diff = spark_utils.create_func_to_computer_path_difference()
test_files = broadcasted_test_files.value
ret = []
for names, t in zip(filenames, test):
if t in test_files:
distances = []
for n in json.loads(names):
distances.append(compute_path_diff(n, test_files[t]))
ret.append(min(distances) if distances else 128)
else:
ret.append(128)
return pd.Series(ret)
@funcs.pandas_udf("int") # type: ignore
def _compute_distance(filenames: pd.Series, test: pd.Series) -> pd.Series:
# TODO: Removes package-depenent stuffs
import spark_utils
parse_path = spark_utils.create_func_to_transform_path_to_qualified_name()
dep_graph = broadcasted_dep_graph.value
ret = []
for names, t in zip(filenames, test):
distances = [128]
for n in json.loads(names):
ident = parse_path(n)
if ident:
if ident == t:
distances.append(0)
break
visited_nodes = set()
keys = list([ident])
for i in range(0, 16):
if len(keys) == 0:
break
next_keys = set()
for key in keys:
if key in dep_graph and key not in visited_nodes:
nodes = dep_graph[key]
next_keys.update(nodes)
if t in | |
<reponame>Petersontylerd/quickplot<gh_stars>1-10
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
import prettierplot.style as style
import prettierplot.util as util
import textwrap
def facet_cat(self, df, feature, label_rotate=0, x_units="s", y_units="f", bbox=(1.2, 0.9), alpha=0.8,
legend_labels=None, color_map="viridis", ax=None):
"""
Documentation:
---
Description:
Creates a count plot for a categorical variable and facet the variable by another
categorical variable.
---
Parameters:
df : Pandas DataFrame
Pandas DataFrame containing data for plotting.
feature : str
Name of column that contains the category values to be used for faceting/
label_rotate : float or int, default=0
Number of degrees to rotate the x-tick labels.
x_units : str, default='f'
Determines unit of measurement for x-axis tick labels. 's' displays string. 'f' displays
float. 'p' displays percentages, 'd' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
y_units : str, default='s'
Determines unit of measurement for y-axis tick labels. 's' displays string. 'f' displays
float. 'p' displays percentages, 'd' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
bbox : tuple of floats, default=(1.2, 0.9)
Coordinates for determining legend position.
alpha : float, default=0.8
Controls transparency of objects. Accepts value between 0.0 and 1.0.
legend_labels : list, default=None
Custom legend labels.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
ax : axes object, default=None
Axis object for the visualization.
"""
if ax is None:
ax = self.ax
ixs = np.arange(df.shape[0])
bar_width = 0.35
feature_dict = {}
for feature in df.columns[1:]:
feature_dict[feature] = df[feature].values.tolist()
# generate color list
if isinstance(color_map, str):
color_list = style.color_gen(name=color_map, num=len(feature_dict.keys()))
elif isinstance(color_map, list):
color_list = color_map
for feature_ix, (k, v) in enumerate(feature_dict.items()):
plt.bar(
ixs + (bar_width * feature_ix),
feature_dict[k],
bar_width,
alpha=alpha,
color=color_list[feature_ix],
label=str(k),
)
# wrap long x-tick labels
plt.xticks(
ixs[: df.shape[0]] + bar_width / 2,
[
"\n".join(textwrap.wrap(str(i).replace("_", " "), 12))
for i in df.iloc[:, 0].values
],
)
plt.xticks(rotation=label_rotate)
## create custom legend
# create labels
if legend_labels is None:
legend_labels = np.arange(len(color_list))
else:
legend_labels = np.array(legend_labels)
# define colors
label_color = {}
for ix, i in enumerate(legend_labels):
label_color[i] = color_list[ix]
# create legend Patches
patches = [Patch(color=v, label=k, alpha=alpha) for k, v in label_color.items()]
# draw legend
leg = plt.legend(
handles=patches,
fontsize=0.95 * self.chart_scale,
loc="upper right",
markerscale=0.3 * self.chart_scale,
ncol=1,
bbox_to_anchor=bbox,
)
# label font color
for text in leg.get_texts():
plt.setp(text, color="grey")
### general formatting
# if data is float dtype, then format as a number
if df.iloc[:, 0].values.dtype == np.float:
x_units = "f"
# otherwise represent data as a string
else:
x_units = "s"
# use label formatter utility function to customize chart labels
util.util_label_formatter(ax=ax, x_units=x_units, y_units=y_units)
# tick label font size
ax.tick_params(axis="both", colors=style.style_grey, labelsize=1.2 * self.chart_scale)
# dynamically set x-axis label size
if 7 < len(feature_dict[feature]) <= 10:
ax.tick_params(axis="x", colors=style.style_grey, labelsize=0.9 * self.chart_scale)
elif 10 < len(feature_dict[feature]) <= 20:
ax.tick_params(axis="x", colors=style.style_grey, labelsize=0.75 * self.chart_scale)
elif len(feature_dict[feature]) > 20:
ax.tick_params(axis="x", colors=style.style_grey, labelsize=0.6 * self.chart_scale)
def facet_two_cat_bar(self, df, x, y, split, x_units=None, y_units=None, bbox=None, alpha=0.8,
legend_labels=None, filter_nan=True, color_map="viridis", ax=None):
"""
Documentation:
Description:
Creates a series of bar plots that count a variable along the y_axis and separate the counts
into bins based on two category variables.
---
Parameters:
df : Pandas DataFrame
Pandas DataFrame containing data for plotting.
x : str
Categorical variable to plot along x-axis.
y : str
Pandas DataFrame containing data for plotting.
ariable to be counted along y-axis.
split : str
Categorical variable for faceting the num_col variable.
x_units : str, default=None
Determines unit of measurement for x-axis tick labels. 's' displays string. 'f' displays
float. 'p' displays percentages, 'd' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
y_units : str, default=None
Determines unit of measurement for x-axis tick labels. 's' displays string. 'f' displays
float. 'p' displays percentages, 'd' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
bbox : tuple of floats, default=None
Coordinates for determining legend position.
alpha : float, default=0.8
Controls transparency of objects. Accepts value between 0.0 and 1.0.
legend_labels : list, default=None
Custom legend labels.
filter_nan : bool, default=True
Remove records that have a null value in the column specified by the 'x' parameter.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
ax : axes object, default=None
Axis object for the visualization.
"""
if ax is None:
ax = self.ax
# remove nans from x columns
if filter_nan:
df = df.dropna(subset=[x])
# create bar plot
g = sns.barplot(
x=x,
y=y,
hue=split,
data=df,
palette=sns.color_palette(
style.color_gen("viridis", num=len(np.unique(df[split].values)))
),
order=df[x].sort_values().drop_duplicates().values.tolist(),
hue_order=df[split].sort_values().drop_duplicates().values.tolist()
if split is not None
else None,
ax=ax,
ci=None,
)
# format x-tick labels
g.set_xticklabels(
g.get_xticklabels(),
rotation=0,
fontsize=1.05 * self.chart_scale,
color=style.style_grey,
)
# format y-tick labels
g.set_yticklabels(
g.get_yticklabels() * 100 if "p" in y_units else g.get_yticklabels(),
rotation=0,
fontsize=1.05 * self.chart_scale,
color=style.style_grey,
)
# format x-axis label
g.set_xlabel(
g.get_xlabel(),
rotation=0,
fontsize=1.35 * self.chart_scale,
color=style.style_grey,
)
# format y-axis label
g.set_ylabel(
g.get_ylabel(),
rotation=90,
fontsize=1.35 * self.chart_scale,
color=style.style_grey,
)
# format title
g.set_title(
g.get_title(),
rotation=0,
fontsize=1.5 * self.chart_scale,
color=style.style_grey,
)
## create custom legend
# create labels
if split is not None:
if legend_labels is None:
legend_labels = (
df[df[split].notnull()][split]
.sort_values()
.drop_duplicates()
.values.tolist()
)
else:
legend_labels = np.array(legend_labels)
# generate colors
color_list = style.color_gen(color_map, num=len(legend_labels))
label_color = {}
for ix, i in enumerate(legend_labels):
label_color[i] = color_list[ix]
# create legend Patches
patches = [Patch(color=v, label=k, alpha=alpha) for k, v in label_color.items()]
# draw legend
leg = plt.legend(
handles=patches,
fontsize=1.25 * self.chart_scale,
loc="upper right",
markerscale=0.5 * self.chart_scale,
ncol=1,
bbox_to_anchor=bbox,
)
# label font color
for text in leg.get_texts():
plt.setp(text, color="grey")
# use label formatter utility function to customize chart labels
util.util_label_formatter(ax=ax, x_units=x_units, y_units=y_units)
def facet_cat_num_scatter(self, df, x, y, cat_row=None, cat_col=None, split=None, bbox=None, aspect=1, alpha=0.8,
height=4, legend_labels=None, x_units="f", y_units="f", color_map="viridis"):
"""
Documentation:
---
Description:
Creates scatter plots of two numeric variables and allows for faceting by up to two
categorical variables along the column and/or row axes of the figure.
---
Parameters:
df : Pandas DataFrame
Pandas DataFrame containing data for plotting.
x : str
Numeric variable to plot along x-axis.
y : str
Numeric variable to plot along y-axis.
cat_row : str
Categorical variable faceted along the row axis.
cat_col : str
Categorical variable faceted along the column axis.
split : str
Categorical variable for faceting the num_col variable.
bbox : tuple of floats, default=None
Coordinates for determining legend position.
aspect : float, default=1
Higher values create wider plot, lower values create narrow plot, while
keeping height constant.
alpha : float, default=0.8
Controls transparency of objects. Accepts value between 0.0 and 1.0.
height : float, default=4
Height in inches of each facet.
legend_labels : list, default=None
Custom legend labels.
x_units : str, default='f'
Determines unit of measurement for x-axis tick labels. 'f' displays float. 'p'
displays percentages, d' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
y_units : str, default='f'
Determines unit of measurement for x-axis tick labels. 'f' displays float. 'p'
displays percentages, d' displays dollars. Repeat character (e.g 'ff' or 'ddd')
for additional decimal places.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
"""
# create FacetGrid object
g = sns.FacetGrid(
df,
col=cat_col,
row=cat_row,
hue=split,
palette=sns.color_palette(
style.color_gen(color_map, num=len(np.unique(df[split].values)))
),
hue_order=df[split].sort_values().drop_duplicates().values.tolist()
if split is not None
else None,
height=height,
aspect=aspect,
margin_titles=True,
)
# map scatter plot to FacetGrid object
g = g.map(plt.scatter, x, y, s=1.2 * self.chart_scale)
# format x any y ticklabels, x and y labels, and main title
for ax in g.axes.flat:
_ = ax.set_yticklabels(
ax.get_yticklabels() * 100 if "p" in y_units else ax.get_yticklabels(),
rotation=0,
fontsize=0.8 * self.chart_scale,
color=style.style_grey,
)
_ = ax.set_xticklabels(
ax.get_xticklabels(),
rotation=0,
fontsize=0.8 * self.chart_scale,
color=style.style_grey,
)
_ = ax.set_ylabel(
ax.get_ylabel(),
rotation=90,
fontsize=1.05 * self.chart_scale,
color=style.style_grey,
)
_ = ax.set_xlabel(
ax.get_xlabel(),
rotation=0,
fontsize=1.05 * self.chart_scale,
color=style.style_grey,
)
_ = ax.set_title(
ax.get_title(),
rotation=0,
fontsize=1.05 * self.chart_scale,
color=style.style_grey,
)
# custom tick label formatting
util.util_label_formatter(ax=ax, x_units=x_units, y_units=y_units)
if ax.texts:
# | |
if mask[i, img_l:img_r].sum() > 0:
out_t = i
break
for j in range(img_b, img_t, -1):
if mask[j - 1, img_l:img_r].sum() > 0:
out_b = j
break
# Find leftmost and rightmost nonzero column.
for k in range(img_l, img_r):
if mask[img_t:img_b, k].sum() > 0:
out_l = k
break
for l in range(img_r, img_l, -1):
if mask[img_t:img_b, l - 1].sum() > 0:
out_r = l
break
_trim_end_time = time.clock()
logging.info('T-Lasso: Trimming took {0:.4f} s'.format(_trim_end_time - _trim_start_time))
logging.info('T-Lasso: Output={0}'.format((out_t, out_l, out_b, out_r)))
# Rounding errors when converting m --> w --> m integers!
# - Output
if (out_b > out_t) and (out_r > out_l):
return out_t, out_l, out_b, out_r
def model_selection_from_points(self, points):
# This should go away anyway.
model_bbox = self.model_bbox_from_points(points)
if model_bbox is not None:
t, l, b, r = model_bbox
return {'top': t, 'left': l, 'bottom': b, 'right': r}
else:
return None
def selection_from_points(self, points):
model_bbox = self.model_bbox_from_points(points)
if model_bbox is None:
return None
ed_t, ed_l, ed_b, ed_r = self.model_to_editor_bbox(*model_bbox)
logging.info('T-Lasso: editor-coord output bbox {0}'
''.format((ed_t, ed_l, ed_b, ed_r)))
output = {'top': ed_t,
'bottom': ed_b,
'left': ed_l,
'right': ed_r}
return output
###############################################################################
class MaskEraserTool(LassoBoundingBoxSelectTool):
"""Removes the given area from all selected symbols' masks."""
def __init__(self, do_split, **kwargs):
super(MaskEraserTool, self).__init__(**kwargs)
self.do_split = do_split
def on_current_cropobject_model_selection(self, instance, pos):
"""Here, instead of adding a new CropObject as the other lasso
tools do, modify selected cropobjects' masks."""
t, l, b, r = pos['top'], pos['left'], pos['bottom'], pos['right']
bbox = bbox_to_integer_bounds(t, l, b, r)
logging.info('MaskEraser: got bounding box: {0}'.format(bbox))
for cropobject_view in self.app_ref.cropobject_list_renderer.view.selected_views:
c = copy.deepcopy(cropobject_view._model_counterpart)
# Guards:
if c.mask is None:
logging.info('MaskErarser: cropobject {0} has no mask.'
''.format(c.objid))
continue
if not c.overlaps(bbox):
logging.info('MaskErarser: cropobject {0} (bbox {1})'
'does not overlap.'
''.format(c.objid, c.bounding_box))
continue
logging.info('MaskErarser: processing cropobject {0}.'
''.format(c.objid))
i_t, i_l, i_b, i_r = bbox_intersection(c.bounding_box, bbox)
m_t, m_l, m_b, m_r = bbox_intersection(bbox, c.bounding_box)
logging.info('MaskEraser: got cropobject intersection {0}'
''.format((i_t, i_l, i_b, i_r)))
logging.info('MaskEraser: got mask intersection {0}'
''.format((m_t, m_l, m_b, m_r)))
logging.info('MaskEraser: cropobject nnz previous = {0}'
''.format(c.mask.sum()))
# We need to invert the current mask, as we want to mask *out*
# whatever is *in* the mask now.
inverse_mask = c.mask.max() - self.current_cropobject_mask[m_t:m_b, m_l:m_r]
c.mask[i_t:i_b, i_l:i_r] *= inverse_mask
logging.info('MaskEraser: cropobject nnz after = {0}'
''.format(c.mask.sum()))
c.crop_to_mask()
# We do the removal through the view, so that deselection
# and other stuff is handled.
cropobject_view.remove_from_model()
if self.do_split:
_next_objid = self._model.get_next_cropobject_id()
output_cropobjects = split_cropobject_on_connected_components(c, _next_objid)
else:
output_cropobjects = [c]
for c in output_cropobjects:
# Now add the CropObject back to redraw. Note that this way,
# the object's objid stays the same, which is essential for
# maintaining intact inlinks and outlinks!
logging.info('MaskEraser: New object data dict: {0}'
''.format(c.data))
self._model.add_cropobject(c)
try:
new_view = self.app_ref.cropobject_list_renderer.view.get_cropobject_view(c.objid)
new_view.ensure_selected()
except KeyError:
logging.info('MaskEraser: View for modified CropObject {0} has'
' not been rendered yet, cannot select it.'
''.format(c.objid))
logging.info('MaskEraser: Forcing redraw.')
self.app_ref.cropobject_list_renderer.redraw += 1
self.app_ref.graph_renderer.redraw += 1
self.editor_widgets['line_tracer'].clear()
class MaskAdditionTool(LassoBoundingBoxSelectTool):
def on_current_cropobject_model_selection(self, instance, pos):
"""Here, instead of adding a new CropObject like the other
Lasso tools, we instead modify the mask of selected CropObjects
by adding the lasso-ed area."""
c_lasso = self.app_ref.generate_cropobject_from_model_selection(
selection=pos,
mask=self.current_cropobject_mask)
c_lasso.crop_to_mask()
for cropobject_view in self.app_ref.cropobject_list_renderer.view.selected_views:
c = copy.deepcopy(cropobject_view._model_counterpart)
c.join(c_lasso)
# Redraw:
cropobject_view.remove_from_model()
logging.info('MaskEraser: New object data dict: {0}'
''.format(c.data))
self._model.add_cropobject(c)
# Try reselecting the selected objects:
try:
new_view = self.app_ref.cropobject_list_renderer.view.get_cropobject_view(c.objid)
new_view.ensure_selected()
except KeyError:
logging.info('MaskEraser: View for modified CropObject {0} has'
' not been rendered yet, cannot select it.'
''.format(c.objid))
logging.info('MaskAddition: Forcing redraw.')
self.app_ref.cropobject_list_renderer.redraw += 1
self.app_ref.graph_renderer.redraw += 1
self.editor_widgets['line_tracer'].clear()
###############################################################################
class GestureSelectTool(LassoBoundingBoxSelectTool):
"""The GestureSelectTool tries to find the best approximation
to a user gesture, as though the user is writing the score
instead of annotating it.
Run bounds
----------
* Top: topmost coordinate of all accepted runs.
* Bottom: bottom-most coordinate of all accepted runs.
* Left: leftmost coordinates of all runs over the lower limit.
* Right: rightmost coordinate of all runs over the lower limit.
NOTE: Currently only supports horizontal strokes
NOTE: Not resistant to the gesture leaving and re-entering
a stroke region.
"""
current_cropobject_selection = ObjectProperty(None)
current_cropobject_mask = ObjectProperty(None)
def create_editor_widgets(self):
editor_widgets = collections.OrderedDict()
editor_widgets['line_tracer'] = LineTracer()
editor_widgets['line_tracer'].bind(points=self.current_selection_from_points)
return editor_widgets
def current_selection_from_points(self, instance, pos):
# Map points to model
# - get model coordinates of points
e_points = numpy.array([list(p) for i, p in enumerate(zip(pos[:-1], pos[1:]))
if i % 2 == 0])
# We don't just need the points, we need their order as well...
m_points = numpy.array([self.app_ref.map_point_from_editor_to_model(*p)
for p in e_points]).astype('uint16')
# Make them unique
m_points_uniq = numpy.array([m_points[0]] +
[m_points[i] for i in range(1, len(m_points))
if (m_points[i] - m_points[i - 1]).sum() == 0.0])
logging.info('Gesture: total M-Points: {0}, unique: {1}'
''.format(len(m_points), len(m_points_uniq)))
# Get image
image = self.app_ref.annot_model.image
# Now the intelligent part starts.
# - If more vertical than horizontal, record horizontal runs.
e_sel = self.selection_from_points(pos)
m_bbox = self.app_ref.generate_model_bbox_from_selection(e_sel)
m_int_bbox = bbox_to_integer_bounds(*m_bbox)
height = m_int_bbox[2] - m_int_bbox[0]
width = m_int_bbox[3] - m_int_bbox[1]
is_vertical = False
if height >= 2 * width:
is_vertical = True
if is_vertical:
raise NotImplementedError('Sorry, currently only supporting horizontal'
' strokes.')
# TODO: make points also unique column-wise
# - Get all vertical runs the stroke goes through
# - Find stroke mask (approximate with straight lines) and
# collect all stroke points
# - For each point:
stroke_mask = numpy.zeros(image.shape, dtype=image.dtype)
all_points = [[], []]
for i, (a, b) in enumerate(zip(m_points_uniq[:-1], m_points_uniq[1:])):
l = line(a[0], a[1], b[0], b[1])
all_points[0].extend(list(l[0]))
all_points[1].extend(list(l[1]))
stroke_mask[l] = 1
runs = []
# Each point's run is represented as a [top, bottom] pair,
# empty runs are represented as (x, x).
for x, y in zip(*all_points):
t = x
while (image[t, y] != 0) and (t >= 0):
t -= 1
b = x
while (image[b, y] != 0) and (b >= 0):
b += 1
runs.append([t, b])
# - Compute stroke width histograms from connected components.
run_widths = numpy.array([b - t for t, b in runs])
nnz_run_widths = numpy.array([w for w in run_widths if w > 0])
# Average is too high because of crossing strokes, we should use median.
rw_med = numpy.median(nnz_run_widths)
logging.info('Gesture: Collected stroke vertical runs, {0} in total,'
' avg. width {1:.2f}'.format(len(runs),
rw_med))
# - Compute run width bounds
rw_lower = 2
rw_upper = int(rw_med * 1.1 + 1)
# - Sort out which runs are within, under, and over the width range
runs_mask = [rw_lower <= (b - t) <= rw_upper for t, b in runs]
runs_under = [(b - t) < rw_lower for t, b in runs]
runs_over = [(b - t) > rw_upper for t, b in runs]
runs_accepted = [r for i, r in enumerate(runs) if runs_mask[i]]
ra_npy = numpy.array(runs_accepted)
logging.info('Gesture: run bounds [{0}, {1}]'.format(rw_lower, rw_upper))
logging.info('Gesture: Accepted: {0}, under: {1}, over: {2}'
''.format(len(runs_accepted), sum(runs_under), sum(runs_over)))
# - Get run bounds
out_t = ra_npy[:, 0].min()
out_b = ra_npy[:, 1].max()
out_l = min([all_points[1][i] for i, r in enumerate(runs_under) if not r])
out_r = max([all_points[1][i] for i, r in enumerate(runs_under) if not r])
logging.info('Gesture: model bounds = {0}'.format((out_t, out_l, out_b, out_r)))
if (out_b > out_t) and (out_r > out_l):
ed_t, ed_l, ed_b, ed_r = self.model_to_editor_bbox(out_t,
out_l,
out_b,
out_r)
logging.info('Gesture: editor-coord output bbox {0}'
''.format((ed_t, ed_l, ed_b, ed_r)))
self.current_cropobject_selection = {'top': ed_t,
'bottom': ed_b,
'left': ed_l,
'right': ed_r}
###############################################################################
class BaseListItemViewsOperationTool(MUSCIMarkerTool):
"""This is a base class for tools manipulating ListItemViews.
Override select_applicable_objects to define how the ListItemViews
should be selected.
Override ``@property list_view`` to point to the desired ListView.
Override ``@property available_views`` if the default
``self.list_view.container.children[:]`` is not correct.
Override the ``apply_operation`` method to get tools that actually do
something to the CropObjectViews that correspond to CropObjects
overlapping the lasso-ed area."""
use_mask_to_determine_selection = BooleanProperty(False)
line_color = ListProperty([0.6, 0.6, 0.6])
forgetful = BooleanProperty(True)
'''If True, will always forget prior selection. If False, will
be "additive".'''
active_selection = BooleanProperty(True)
'''If True, will show the current state of the selection.'''
def __init__(self, app, editor_widget, command_widget, active_selection=True,
**kwargs):
# Settings like this have to be provided *before* create_editor_widgets
# is called by super.__init__
self.active_selection | |
correction of the block's short timestamps.
:cvar elementName: The name of the element handled by this parser
:cvar product: The class of object generated by the parser
"""
product = SimpleChannelDataBlock
elementName = product.__name__
isHeader = False
# The default block time scalar.
timeScalar = 1000000.0 / 2**15
def __init__(self, doc, **kwargs):
super(SimpleChannelDataBlockParser, self).__init__(doc, **kwargs)
# Timestamp conversion/correction is done per channel
self.timestampOffset = {}
self.lastStamp = {}
self.timeScalars = {}
self.timeModulus = {}
def fixOverflow(self, block, timestamp):
""" Return an adjusted, scaled time from a low-resolution timestamp.
"""
# TODO: Identify blocks with non-modulo timestamps and just return the
# unmodified timestamp. Will be slightly more efficient.
channel = block.getHeader()[1]
modulus = self.timeModulus.setdefault(channel, block.maxTimestamp)
offset = self.timestampOffset.setdefault(channel, 0)
if timestamp > modulus:
# Timestamp is (probably) not modulo; will occur in split files.
offset = timestamp - (timestamp % modulus)
timestamp = timestamp % modulus
self.timestampOffset[channel] = offset
elif timestamp < self.lastStamp.get(channel, 0):
# Modulo rollover (probably) occurred.
offset += modulus
self.timestampOffset[channel] = offset
self.lastStamp[channel] = timestamp
timestamp += self.timestampOffset[channel]
return timestamp * self.timeScalars.setdefault(channel, self.timeScalar)
def parse(self, element, sessionId=None, timeOffset=0):
""" Create a (Simple)ChannelDataBlock from the given EBML element.
:param element: A sample-carrying EBML element.
:keyword sessionId: The session currently being read; defaults to
whatever the Dataset says is current.
:return: The number of subsamples read from the element's payload.
"""
try:
block = self.product(element)
timestamp, channel = block.getHeader()
except struct.error as e:
raise ParsingError("Element would not parse: %s (ID %d) @%d (%s)" %
(element.name, element.id, element.offset, e))
except AttributeError:
# Can happen if the block had no timestamp (broken imported data?)
# TODO: Actually handle, instead of ignoring?
logger.warning("XXX: bad attribute in element %s" % element)
return 0
block.startTime = timeOffset + int(self.fixOverflow(block, timestamp))
if block.endTime is not None:
block.endTime = timeOffset + int(self.fixOverflow(block, block.endTime))
if channel not in self.doc.channels:
# Unknown channel; could be debugging info, so that might be okay.
# FUTURE: Better handling of unknown channel types. Low priority.
return 0
try:
ch = self.doc.channels[channel]
ch.getSession(sessionId).append(block)
return block.getNumSamples(ch.parser) * len(ch.children)
except ZeroDivisionError:
return 0
#===============================================================================
# ChannelDataBlock: Element wrapper and handler
#===============================================================================
class ChannelDataBlock(BaseDataBlock):
""" Wrapper for ChannelDataBlock elements, which features additional data
excluded from the simple version. ChannelDataBlock elements are 'master'
elements with several child elements, such as full timestamps and
and sample minimum/mean/maximum.
"""
maxTimestamp = 2**24
def __init__(self, element):
super(ChannelDataBlock, self).__init__(element)
self._payloadIdx = None
self._payloadEl = None
self._minMeanMaxEl = None
self._minMeanMax = None
self.element = element
for el in element:
# These are roughly in order of probability, optional and/or
# unimplemented elements are at the end.
if el.name == "ChannelIDRef":
self.channel = el.value
elif el.name == "ChannelDataPayload":
self._payloadEl = el
self.payloadSize = el.size
elif el.name == "StartTimeCodeAbsMod":
self.startTime = el.value
self._timestamp = el.value
elif el.name == "EndTimeCodeAbsMod":
self.endTime = el.value
elif el.name == "ChannelDataMinMeanMax":
# self.minMeanMax = el.value
self._minMeanMaxEl = el
elif el.name == "Void":
continue
elif el.name == 'Attribute':
parseAttribute(self, el)
el.gc()
elif el.name == "StartTimeCodeAbs":
# TODO: store indicator that the start timestamp is non-modulo?
self.startTime = el.value
self._timestamp = el.value
elif el.name == "EndTimeCodeAbs":
# TODO: store indicator that the end timestamp is non-modulo?
self.endTime = el.value
elif el.name == "ChannelFlags":
# FUTURE: Handle channel flag bits
continue
# Add other child element handlers here.
element.gc(recurse=False)
# Single-sample blocks have a total time of 0. Old files did not write
# the end timestamp; if it's missing, duplicate the starting time.
if self.endTime is None:
self.endTime = self.startTime
self._payload = None
self._parser = None
self._streamDtype = None
self._commonDtype = None
@property
def payload(self):
if self._payload is None:
self._payload = np.array(self._payloadEl.value)
self._payloadEl.gc()
return self._payload
# Define standard mapping from struct to numpy typestring
# (conversions taken from struct & numpy docs:)
# https://docs.python.org/3/library/struct.html#format-characters
# https://numpy.org/doc/stable/reference/arrays.dtypes.html#specifying-and-constructing-data-types
TO_NP_TYPESTR = {
# 'x': '',
'c': 'b',
'b': 'b',
'B': 'B',
'?': '?',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
# 'n': '',
# 'N': '',
# 'e': 'f2', unsupported in Python3.5
'f': 'f4',
'd': 'f8',
# 's': '',
# 'p': '',
# 'P': '',
}
@property
def minMeanMax(self):
if self._minMeanMaxEl is None:
return self._minMeanMax
return self._minMeanMaxEl.value
@minMeanMax.setter
def minMeanMax(self, v):
# Explicitly set (done when block contains a single sample)
self._minMeanMax = v
def getHeader(self):
""" Extract the block's header info (timestamp and channel ID).
"""
return self._timestamp, self.channel
class ChannelDataBlockParser(SimpleChannelDataBlockParser):
""" Factory for ChannelDataBlock elements. Instantiated once per
session/channel, handles modulus correction for the blocks' timestamps.
Unlike the ChannelDataBlockParser, this returns blocks which store
(cache?) data as numpy arrays.
:cvar product: The class of object generated by the parser
"""
product = ChannelDataBlock
elementName = product.__name__
timeScalar = 1e6 / 2**15
################################################################################
#===============================================================================
#--- RecordingProperties element and sub-element parsers
#===============================================================================
################################################################################
class RecorderPropertyParser(ElementHandler):
""" Base class for elements that just add a value to the Dataset's
`recorderInfo` but aren't in the `RecorderInfo` element.
"""
isHeader = True
isSubElement = True
def parse(self, element, **kwargs):
if self.doc is not None:
if element.name == 'Attribute':
parseAttribute(self.doc, element)
else:
self.doc.recorderInfo[element.name] = element.value
#===============================================================================
# RecordingProperties: Calibration
#===============================================================================
class PolynomialParser(ElementHandler):
""" The handler for both Univariate and Bivariate calibration polynomials.
Each are a subclass of this, although this class does all the work.
"""
elementName = ("UnivariatePolynomial", "BivariatePolynomial")
isSubElement = True
isHeader = True
# Parameter names: mapping of element names to the keyword arguments used
# to instantiate a polynomial object. Also used to remove unknown elements
# (see `renameKeys`).
parameterNames = {"CalID": "calId",
"CalReferenceValue": "reference",
"BivariateCalReferenceValue": "reference2",
"BivariateChannelIDRef": "channelId",
"BivariateSubChannelIDRef": "subchannelId",
"PolynomialCoef": "coeffs",
"Attribute": "attributes"}
def parse(self, element, **kwargs):
"""
"""
# Element name (plus ID and file position) for error messages
elName = self.getElementName(element)
params = renameKeys(element.dump(), self.parameterNames)
params['dataset'] = self.doc
coeffs = params.pop("coeffs", None)
if coeffs is None:
raise ParsingError("%s had no coefficients" % elName)
if "calId" not in params:
raise ParsingError("%s had no calibration ID" % elName)
if element.name == "BivariatePolynomial":
# Bivariate polynomial. Do extra error checking.
if "channelId" not in params or "subchannelId" not in params:
raise ParsingError("%s had no channel reference!" % elName)
if len(coeffs) != 4:
raise ParsingError("%s supplied %d coefficients; 4 required" %
(elName, len(coeffs)))
cal = transforms.Bivariate(coeffs, **params)
elif element.name == "UnivariatePolynomial":
cal = transforms.Univariate(coeffs, **params)
else:
# Unknown polynomial type.
raise ParsingError("%s: unknown polynomial type" % elName)
# self.doc might (validly) be None if a configuration tool is
# reading the device info file, rather than reading a recording file.
if self.doc is not None:
self.doc.addTransform(cal)
return cal
class CalibrationElementParser(RecorderPropertyParser):
""" Simple handler for calibration birthday (optional). """
elementName = ("CalibrationDate",
"CalibrationExpiry",
"CalibrationSerialNumber")
class CalibrationListParser(ElementHandler):
""" Root-level parser for calibration data. Handles parsing of the
individual calibration elements (its children). Unlike (most) other
parsers, this one can be instantiated without a reference to a
`dataset.Dataset`. It also keeps a copy of all the calibration items
in a `items` attribute (a list).
"""
isHeader = True
elementName = "CalibrationList"
children = (PolynomialParser, CalibrationElementParser)
#===============================================================================
# RecordingProperties: Sensor-related parsers
#===============================================================================
class SensorListParser(ElementHandler):
""" Handle `SensorList` elements, creating the individual Sensors from
the element's children.
"""
elementName = "SensorList"
isSubElement = True
# Parameter names: mapping of element names to the keyword arguments used
# to instantiate the various children of SensorListParser. Also used to
# remove unknown elements (see `renameKeys`).
parameterNames = {
"Sensor": "sensors",
"SensorID": "sensorId",
"SensorName": "name",
"TraceabilityData": "traceData",
"SensorSerialNumber": "serialNum",
"Attribute": "attributes",
# "SensorBwLimitIDRef": "bandwidthLimitId" # FUTURE
}
def parse(self, element, **kwargs):
""" Parse a SensorList
"""
# data = parse_ebml(element.value)
data = element.dump()
if 'attributes' in data:
atts = self.doc.recorderInfo.setdefault('sensorAttributes', {})
atts.update(decode_attributes(data['attributes']))
data = renameKeys(data, self.parameterNames)
if "sensors" in data:
for sensor in data['sensors']:
self.doc.addSensor(**sensor)
#===============================================================================
# RecordingProperties: Channel and Subchannel parsers
#===============================================================================
class ChannelParser(ElementHandler):
""" Handle individual Channel elements. Separate from ChannelList so | |
"Time", "instances": 22, "metric_value": 0.0909, "depth": 7}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Education", "instances": 4, "metric_value": 0.3333, "depth": 8}
if obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.0, "depth": 9}
if obj[7]<=1.0:
return 'False'
elif obj[7]>1.0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Distance", "instances": 527, "metric_value": 0.4617, "depth": 4}
if obj[9]>1:
# {"feature": "Bar", "instances": 356, "metric_value": 0.4414, "depth": 5}
if obj[5]<=2.0:
# {"feature": "Time", "instances": 323, "metric_value": 0.4297, "depth": 6}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 252, "metric_value": 0.4506, "depth": 7}
if obj[7]>-1.0:
# {"feature": "Occupation", "instances": 241, "metric_value": 0.4626, "depth": 8}
if obj[4]<=7.132780082987552:
# {"feature": "Education", "instances": 160, "metric_value": 0.4669, "depth": 9}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 89, "metric_value": 0.4469, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[3]>1:
# {"feature": "Direction_same", "instances": 71, "metric_value": 0.492, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>7.132780082987552:
# {"feature": "Education", "instances": 81, "metric_value": 0.4416, "depth": 9}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 77, "metric_value": 0.4385, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[7]<=-1.0:
# {"feature": "Occupation", "instances": 11, "metric_value": 0.1515, "depth": 8}
if obj[4]>6:
# {"feature": "Education", "instances": 6, "metric_value": 0.25, "depth": 9}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=6:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Restaurant20to50", "instances": 71, "metric_value": 0.2784, "depth": 7}
if obj[7]>-1.0:
# {"feature": "Occupation", "instances": 68, "metric_value": 0.28, "depth": 8}
if obj[4]<=9:
# {"feature": "Education", "instances": 47, "metric_value": 0.215, "depth": 9}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 38, "metric_value": 0.2659, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[3]>2:
return 'True'
else: return 'True'
elif obj[4]>9:
# {"feature": "Education", "instances": 21, "metric_value": 0.3866, "depth": 9}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.3599, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=-1.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>2.0:
# {"feature": "Occupation", "instances": 33, "metric_value": 0.4242, "depth": 6}
if obj[4]<=18:
# {"feature": "Time", "instances": 28, "metric_value": 0.4792, "depth": 7}
if obj[1]>0:
# {"feature": "Education", "instances": 24, "metric_value": 0.4921, "depth": 8}
if obj[3]<=2:
# {"feature": "Restaurant20to50", "instances": 21, "metric_value": 0.4952, "depth": 9}
if obj[7]>0.0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.5, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
elif obj[7]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>2:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 9}
if obj[7]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.25, "depth": 8}
if obj[7]<=1.0:
return 'True'
elif obj[7]>1.0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 9}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>18:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]<=1:
# {"feature": "Occupation", "instances": 171, "metric_value": 0.4644, "depth": 5}
if obj[4]>1.3264107549745603:
# {"feature": "Education", "instances": 137, "metric_value": 0.4459, "depth": 6}
if obj[3]<=3:
# {"feature": "Time", "instances": 126, "metric_value": 0.432, "depth": 7}
if obj[1]>0:
# {"feature": "Bar", "instances": 94, "metric_value": 0.3985, "depth": 8}
if obj[5]<=3.0:
# {"feature": "Restaurant20to50", "instances": 93, "metric_value": 0.3939, "depth": 9}
if obj[7]<=2.0:
# {"feature": "Direction_same", "instances": 88, "metric_value": 0.4163, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
elif obj[7]>2.0:
return 'False'
else: return 'False'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Bar", "instances": 32, "metric_value": 0.4688, "depth": 8}
if obj[5]>-1.0:
# {"feature": "Restaurant20to50", "instances": 30, "metric_value": 0.4974, "depth": 9}
if obj[7]<=1.0:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.4989, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[7]>1.0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4938, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=-1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>3:
# {"feature": "Bar", "instances": 11, "metric_value": 0.1455, "depth": 7}
if obj[5]<=0.0:
return 'True'
elif obj[5]>0.0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.2667, "depth": 8}
if obj[7]<=0.0:
# {"feature": "Time", "instances": 3, "metric_value": 0.3333, "depth": 9}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[7]>0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=1.3264107549745603:
# {"feature": "Education", "instances": 34, "metric_value": 0.444, "depth": 6}
if obj[3]>1:
# {"feature": "Restaurant20to50", "instances": 18, "metric_value": 0.3704, "depth": 7}
if obj[7]<=1.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.4103, "depth": 8}
if obj[5]<=2.0:
# {"feature": "Time", "instances": 13, "metric_value": 0.4256, "depth": 9}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.42, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>2.0:
return 'True'
else: return 'True'
elif obj[7]>1.0:
return 'True'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 16, "metric_value": 0.4271, "depth": 7}
if obj[7]<=1.0:
# {"feature": "Time", "instances": 12, "metric_value": 0.3429, "depth": 8}
if obj[1]<=2:
# {"feature": "Bar", "instances": 7, "metric_value": 0.2286, "depth": 9}
if obj[5]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
elif obj[5]>0.0:
return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Bar", "instances": 5, "metric_value": 0.48, "depth": 9}
if obj[5]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>1.0:
# {"feature": "Time", "instances": 4, "metric_value": 0.0, "depth": 8}
if obj[1]<=3:
return 'True'
elif obj[1]>3:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[2]<=1:
# {"feature": "Bar", "instances": 2281, "metric_value": 0.4567, "depth": 2}
if obj[5]<=1.0:
# {"feature": "Occupation", "instances": 1601, "metric_value": 0.4436, "depth": 3}
if obj[4]>1.887387522319548:
# {"feature": "Education", "instances": 1333, "metric_value": 0.4585, "depth": 4}
if obj[3]>0:
# {"feature": "Time", "instances": 882, "metric_value": 0.438, "depth": 5}
if obj[1]>0:
# {"feature": "Coffeehouse", "instances": 613, "metric_value": 0.4124, "depth": 6}
if obj[6]<=3.0:
# {"feature": "Direction_same", "instances": 574, "metric_value": 0.4244, "depth": 7}
if obj[8]<=0:
# {"feature": "Restaurant20to50", "instances": 515, "metric_value": 0.4338, "depth": 8}
if obj[7]<=1.0:
# {"feature": "Passanger", "instances": 361, "metric_value": 0.4108, "depth": 9}
if obj[0]<=2:
# {"feature": "Distance", "instances": 303, "metric_value": 0.3955, "depth": 10}
if obj[9]<=2:
return 'False'
elif obj[9]>2:
return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Distance", "instances": 58, "metric_value": 0.4786, "depth": 10}
if obj[9]>1:
return 'False'
elif obj[9]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Distance", "instances": 154, "metric_value": 0.465, "depth": 9}
if obj[9]>1:
# {"feature": "Passanger", "instances": 108, "metric_value": 0.447, "depth": 10}
if obj[0]<=2:
return 'False'
elif obj[0]>2:
return 'False'
else: return 'False'
elif obj[9]<=1:
# {"feature": "Passanger", "instances": 46, "metric_value": 0.49, "depth": 10}
if obj[0]<=1:
return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[8]>0:
# {"feature": "Restaurant20to50", "instances": 59, "metric_value": 0.2805, "depth": 8}
if obj[7]<=3.0:
# {"feature": "Passanger", "instances": 58, "metric_value": 0.2819, "depth": 9}
if obj[0]<=1:
# {"feature": "Distance", "instances": 51, "metric_value": 0.263, "depth": 10}
if obj[9]>1:
return 'False'
elif obj[9]<=1:
return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.4082, "depth": 10}
if obj[9]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]>3.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>3.0:
# {"feature": "Passanger", "instances": 39, "metric_value": 0.148, "depth": 7}
if obj[0]<=2:
# {"feature": "Distance", "instances": 35, "metric_value": 0.1048, "depth": 8}
if obj[9]<=2:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.15, "depth": 9}
if obj[8]<=0:
# {"feature": "Restaurant20to50", "instances": 20, "metric_value": 0.1765, "depth": 10}
if obj[7]<=2.0:
return 'False'
elif obj[7]>2.0:
return 'False'
else: return 'False'
elif obj[8]>0:
return 'False'
else: return 'False'
elif obj[9]>2:
return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.3333, "depth": 8}
if obj[7]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 9}
if obj[8]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 10}
if obj[9]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Restaurant20to50", "instances": 269, "metric_value": 0.4746, "depth": 6}
if obj[7]<=1.0:
# {"feature": "Coffeehouse", "instances": 193, "metric_value": 0.4579, "depth": 7}
if obj[6]>-1.0:
# {"feature": "Distance", "instances": 188, "metric_value": 0.4641, "depth": 8}
if obj[9]<=2:
# {"feature": "Direction_same", "instances": 166, "metric_value": 0.4761, "depth": 9}
if obj[8]<=0:
# {"feature": "Passanger", "instances": 86, "metric_value": 0.4493, "depth": 10}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[8]>0:
# {"feature": "Passanger", "instances": 80, "metric_value": 0.4916, "depth": 10}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[9]>2:
# {"feature": "Passanger", "instances": 22, "metric_value": 0.2944, "depth": 9}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.3084, "depth": 10}
if obj[8]<=0:
return 'False'
else: return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]<=-1.0:
return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Passanger", "instances": 76, "metric_value": 0.4869, "depth": 7}
if obj[0]<=1:
# {"feature": "Distance", "instances": 58, "metric_value": 0.4701, "depth": 8}
if obj[9]<=2:
# {"feature": "Coffeehouse", "instances": 49, "metric_value": 0.4592, "depth": 9}
if obj[6]<=1.0:
# {"feature": "Direction_same", "instances": 28, "metric_value": 0.4955, "depth": 10}
if obj[8]>0:
return 'True'
elif obj[8]<=0:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.4021, "depth": 10}
if obj[8]>0:
return 'True'
elif obj[8]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>2:
# {"feature": "Coffeehouse", "instances": 9, "metric_value": 0.2667, "depth": 9}
if obj[6]<=1.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 10}
if obj[8]<=0:
return 'True'
else: return 'True'
elif obj[6]>1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Distance", "instances": 18, "metric_value": 0.4148, "depth": 8}
if obj[9]>1:
# {"feature": "Coffeehouse", "instances": 15, "metric_value": 0.3905, "depth": 9}
if obj[6]>1.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.3333, "depth": 10}
if obj[8]<=0:
return 'False'
elif | |
<filename>riboflask.py
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import blended_transform_factory
import mpld3
import logging
from mpld3 import plugins,utils
import collections
from sqlitedict import SqliteDict
import pandas as pd
from fetch_shelve_reads2 import get_reads,get_seq_var,get_readlength_breakdown
import sqlite3
import os
import config
from new_plugins import InteractiveLegendPlugin,PointHTMLTooltip,TopToolbar,DownloadProfile,DownloadPNG
import time
# CSS for popup tables that appear when hovering over aug codons
point_tooltip_css = """
table
{
border-collapse: collapse;
}
th
{
color: #000000;
background-color: #d2d4d8;
}
td
{
background-color: #ffffff;
}
table, th, td
{
font-family:Arial, Helvetica, sans-serif;
border: 0px solid black;
text-align: left;
}
"""
color_dict = {'frames': ['#FF4A45', '#64FC44', '#5687F9']}
def get_user_defined_seqs(seq,seqhili):
iupac_dict = {"A":["A"],"U":["U"],"G":["G"],"C":["C"],"R":["A","G"],"Y":["C","U"],"S":["G","C"],"W":["A","U"],"K":["G","U"],
"M":["A","C"],"B":["C","G","U"],"D":["A","G","U"],"D":["A","G","U"],"H":["A","C","U"],"V":["A","C","G"],"N":["A","U","G","C"]}
signalhtml = {0:[],1:[],2:[]}
seq = seq.replace("T","U")
near_cog_starts = {0:[],1:[],2:[]}
for i in range(0,len(seq)):
for subseq in seqhili:
subseq = subseq.upper()
subseq = subseq.replace("T","U").replace(" ","")
partial_seq = list(seq[i:i+len(subseq)])
if len(partial_seq) != len(subseq):
continue
x= 0
for x in range(0,len(subseq)):
char = subseq[x]
if partial_seq[x] in iupac_dict[char]:
partial_seq[x] = char
partial_seq = "".join(partial_seq)
if partial_seq == subseq:
near_cog_starts[(i)%3].append(i+1)
datadict = {'sequence': [subseq]}
df = pd.DataFrame(datadict, columns=(["sequence"]))
label = df.iloc[[0], :].T
label.columns = ["Position: {}".format(i)]
signalhtml[(i)%3].append(str(label.to_html()))
return near_cog_starts,signalhtml
def merge_dicts(dict1,dict2):
print ("dict1, dict2,", dict1, dict2)
for nuc in dict2:
if nuc not in dict1:
dict1[nuc] = dict2[nuc]
else:
for pos in dict2[nuc]:
if pos not in dict1[nuc]:
dict1[nuc][pos] = dict2[nuc][pos]
else:
dict1[nuc][pos] += dict2[nuc][pos]
return dict1
def generate_plot(tran, ambig, min_read, max_read,lite,ribocoverage,organism,readscore, noisered, primetype, minfiles,nucseq, user_hili_starts, user_hili_stops,uga_diff,file_paths_dict, short_code, color_readlen_dist, background_col,uga_col, uag_col, uaa_col,advanced,seqhili,seq_rules,title_size,
subheading_size,axis_label_size,marker_size, transcriptome, trips_uploads_location,cds_marker_size,cds_marker_colour,legend_size,ribo_linewidth, secondary_readscore,pcr,mismatches, hili_start, hili_stop):
if lite == "n" and ribocoverage == True:
return_str = "Error: Cannot display Ribo-Seq Coverage when 'Line Graph' is turned off"
return return_str
labels = ["Frame 1 profiles","Frame 2 profiles","Frame 3 profiles","RNA", "Exon Junctions"]
start_visible=[True, True, True, True, True]
if mismatches == True:
labels.append("Mismatches A")
labels.append("Mismatches T")
labels.append("Mismatches G")
labels.append("Mismatches C")
start_visible.append(False)
start_visible.append(False)
start_visible.append(False)
start_visible.append(False)
start_visible.append(True)
labels.append("CDS markers")
#This is a list of booleans that decide if the interactive legends boxes are filled in or not.Needs to be same length as labels
stop_codons = ["TAG","TAA","TGA"]
frame_orfs = {1:[],2:[],3:[]}
connection = sqlite3.connect('{}/trips.sqlite'.format(config.SCRIPT_LOC))
connection.text_factory = str
cursor = connection.cursor()
cursor.execute("SELECT owner FROM organisms WHERE organism_name = '{}' and transcriptome_list = '{}';".format(organism, transcriptome))
owner = (cursor.fetchone())[0]
if owner == 1:
if os.path.isfile("{0}/{1}/{2}/{2}.{3}.sqlite".format(config.SCRIPT_LOC, config.ANNOTATION_DIR,organism,transcriptome)):
transhelve = sqlite3.connect("{0}/{1}/{2}/{2}.{3}.sqlite".format(config.SCRIPT_LOC, config.ANNOTATION_DIR,organism,transcriptome))
else:
return_str = "Cannot find annotation file {}.{}.sqlite".format(organism,transcriptome)
return return_str
else:
transhelve = sqlite3.connect("{0}transcriptomes/{1}/{2}/{3}/{2}_{3}.sqlite".format(trips_uploads_location,owner,organism,transcriptome))
connection.close()
cursor = transhelve.cursor()
cursor.execute("SELECT * from transcripts WHERE transcript = '{}'".format(tran))
result = cursor.fetchone()
traninfo = {"transcript":result[0] , "gene":result[1], "length":result[2] , "cds_start":result[3] , "cds_stop":result[4] , "seq":result[5] ,
"strand":result[6], "stop_list":result[7].split(","),"start_list":result[8].split(","), "exon_junctions":result[9].split(","),
"tran_type":result[10], "principal":result[11]}
try:
traninfo["stop_list"] = [int(x) for x in traninfo["stop_list"]]
except:
traninfo["stop_list"] = []
try:
traninfo["start_list"] = [int(x) for x in traninfo["start_list"]]
except:
traninfo["start_list"] = []
if str(traninfo["exon_junctions"][0]) != "":
traninfo["exon_junctions"] = [int(x) for x in traninfo["exon_junctions"]]
else:
traninfo["exon_junctions"] = []
all_cds_regions = []
# Check if the 'coding_regions' table exists
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coding_regions';")
result = cursor.fetchone()
if result != None:
cursor.execute("SELECT * from coding_regions WHERE transcript = '{}'".format(tran))
result = cursor.fetchall()
for row in result:
all_cds_regions.append((row[1],row[2]))
transhelve.close()
gene = traninfo["gene"]
tranlen = traninfo["length"]
cds_start = traninfo["cds_start"]
cds_stop = traninfo["cds_stop"]
if cds_start == "NULL" or cds_start == None:
cds_start = 0
if cds_stop == "NULL" or cds_stop == None:
cds_stop = 0
all_starts = traninfo["start_list"]
all_stops = {"TAG":[],"TAA":[],"TGA":[]}
exon_junctions = traninfo["exon_junctions"]
seq = traninfo["seq"].upper()
for i in range(0,len(seq)):
if seq[i:i+3] in stop_codons:
all_stops[seq[i:i+3]].append(i+1)
# Error occurs if one of the frames is empty for any given start/stop, so we initialise with -5 as this won't be seen by user and will prevent the error
start_stop_dict = {1:{"starts":[-5], "stops":{"TGA":[-5],"TAG":[-5],"TAA":[-5]}},
2:{"starts":[-5], "stops":{"TGA":[-5],"TAG":[-5],"TAA":[-5]}},
3:{"starts":[-5], "stops":{"TGA":[-5],"TAG":[-5],"TAA":[-5]}}}
for start in all_starts:
rem = ((start-1)%3)+1
start_stop_dict[rem]["starts"].append(start)
for stop in all_stops:
for stop_pos in all_stops[stop]:
rem = ((stop_pos-1)%3)+1
start_stop_dict[rem]["stops"][stop].append(stop_pos)
#find all open reading frames
for frame in [1,2,3]:
for start in start_stop_dict[frame]["starts"]:
best_stop_pos = 10000000
for stop in start_stop_dict[frame]["stops"]:
for stop_pos in start_stop_dict[frame]["stops"][stop]:
if stop_pos > start and stop_pos < best_stop_pos:
best_stop_pos = stop_pos
if best_stop_pos != 10000000:
frame_orfs[frame].append((start, best_stop_pos))
#self.update_state(state='PROGRESS',meta={'current': 100, 'total': 100,'status': "Fetching RNA-Seq Reads"})
all_rna_reads, rna_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict,tranlen,True, organism, False,noisered, primetype,"rnaseq",readscore,pcr,get_mismatches=mismatches)
#self.update_state(state='PROGRESS',meta={'current': 100, 'total': 100,'status': "Fetching Ribo-Seq Reads"})
all_subcodon_reads,ribo_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict,tranlen,ribocoverage, organism, True,noisered, primetype,"riboseq",readscore,secondary_readscore,pcr,get_mismatches=mismatches)
print ("ribo_seqvar_dict", ribo_seqvar_dict)
seq_var_dict = merge_dicts(ribo_seqvar_dict, rna_seqvar_dict)
try:
rnamax = max(all_rna_reads.values())
except:
rnamax = 0
try:
subcodonmax = max(all_subcodon_reads.values())
except:
subcodonmax = 0
y_max = max(1,rnamax, subcodonmax)*1.1
fig = plt.figure(figsize=(13,8))
ax_main = plt.subplot2grid((30,1), (0,0),rowspan=22)
ax_main.spines['bottom'].set_visible(False)
for s in ['bottom', 'left','top','right']:
ax_main.spines[s].set_linewidth(15)
ax_main.spines[s].set_color("red")
alt_seq_type_vars = []
# Plot any alternative sequence types if there are any
for seq_type in file_paths_dict:
if seq_type != "riboseq" and seq_type != "rnaseq":
if file_paths_dict[seq_type] == {}:
continue
if seq_rules[seq_type]["frame_breakdown"] == 1:
frame_breakdown = True
else:
frame_breakdown = False
alt_sequence_reads,empty_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict,tranlen,True, organism, frame_breakdown,noisered, primetype,seq_type,readscore)
if frame_breakdown == False:
alt_seq_plot = ax_main.plot(alt_sequence_reads.keys(), alt_sequence_reads.values(), alpha=1, label = seq_type, zorder=2, color='#5c5c5c', linewidth=2)
labels.append(seq_type)
start_visible.append(True)
alt_seq_type_vars.append(alt_seq_plot)
else:
alt_frame_counts = {0: collections.OrderedDict(), 1: collections.OrderedDict(), 2: collections.OrderedDict()}
for key in alt_sequence_reads:
start = key
rem = start % 3
if rem == 1: # frame 1
frame = 2
elif rem == 2: # frame 2
frame = 0
elif rem == 0: # frame 3
frame = 1
alt_frame_counts[frame][key] = alt_sequence_reads[key]
frame0_altseqplot = ax_main.plot(alt_frame_counts[0].keys(), alt_frame_counts[0].values(), alpha=0.75, label = seq_type+"frame0", zorder=2, color= "#FF4A45", linewidth=2)
frame1_altseqplot = ax_main.plot(alt_frame_counts[1].keys(), alt_frame_counts[1].values(), alpha=0.75, label = seq_type+"frame1", zorder=2, color= "#64FC44", linewidth=2)
frame2_altseqplot = ax_main.plot(alt_frame_counts[2].keys(), alt_frame_counts[2].values(), alpha=0.75, label = seq_type+"frame2*", zorder=2, color= "#5687F9", linewidth=2)
labels.append(seq_type+"frame 1")
labels.append(seq_type+"frame 2")
labels.append(seq_type+"frame 3")
start_visible.append(True)
start_visible.append(True)
start_visible.append(True)
alt_seq_type_vars.append(frame0_altseqplot)
alt_seq_type_vars.append(frame1_altseqplot)
alt_seq_type_vars.append(frame2_altseqplot)
if max(alt_sequence_reads.values()) > y_max:
y_max = max(alt_sequence_reads.values())
label = 'Reads'
ax_main.set_ylabel(label, fontsize=axis_label_size, labelpad=30)
label = 'Position (nucleotides)'
ax_main.set_xlabel(label, fontsize=axis_label_size,labelpad=-10)
ax_main.set_ylim(0, y_max)
if lite == "n":
rna_bars = ax_main.bar(all_rna_reads.keys(), all_rna_reads.values(), alpha=1, label = labels, zorder=1,color='lightgray', linewidth=0, width=1)
else:
rna_bars = ax_main.plot(all_rna_reads.keys(), all_rna_reads.values(), alpha=1, label = labels, zorder=1,color='#a7adb7', linewidth=4)
cds_markers = ax_main.plot((cds_start,cds_start), (0, y_max*0.97), color=cds_marker_colour,linestyle = 'solid', linewidth=cds_marker_size)
ax_main.text(cds_start,y_max*0.97,"CDS start",fontsize=18,color="black",ha="center")
#ax_main.annotate('axes fraction',xy=(3, 1), xycoords='data',xytext=(0.8, 0.95), textcoords='axes fraction',arrowprops=dict(facecolor='black', shrink=0.05),horizontalalignment='right', verticalalignment='top')
#trans = blended_transform_factory(ax_main.transData, ax_main.transAxes)
#ax_main.annotate('CDS RELATIVE START',(100,100),transform=trans)
#tform = blended_transform_factory(ax_main.transData, ax_main.transAxes)
#r=10
#ax_main.text(cds_start, 0.9, "CDS START OR WHATEVER", fontsize='xx-large', color='r', transform=tform)
cds_markers += ax_main.plot((cds_stop+1,cds_stop+1), (0, y_max*0.97), color=cds_marker_colour,linestyle = 'solid', linewidth=cds_marker_size)
ax_main.text(cds_stop,y_max*0.97,"CDS stop",fontsize=18,color="black",ha="center")
ax_cds = plt.subplot2grid((31,1), (26,0),rowspan=1,sharex=ax_main)
ax_cds.set_facecolor("white")
ax_cds.set_ylabel('Merged CDS', labelpad=4, verticalalignment='center',horizontalalignment="right",rotation="horizontal",color="black",fontsize=(axis_label_size/1.5))
ax_f1 = plt.subplot2grid((31,1), (27,0),rowspan=1,sharex=ax_main)
ax_f1.set_facecolor(color_dict['frames'][0])
ax_f2 = plt.subplot2grid((31,1), (28,0),rowspan=1,sharex=ax_main)
ax_f2.set_facecolor(color_dict['frames'][1])
ax_f3 = plt.subplot2grid((31,1), (29,0),rowspan=1,sharex=ax_main)
ax_f3.set_facecolor(color_dict['frames'][2])
ax_nucseq = plt.subplot2grid((31,1), (30,0),rowspan=1,sharex=ax_main)
ax_nucseq.set_xlabel('Transcript: {} Length: {} nt'.format(tran, tranlen), fontsize=subheading_size)
for tup in all_cds_regions:
ax_cds.fill_between([tup[0],tup[1]], [1, 1],zorder=0, alpha=1, color="#001285")
#plot a dummy exon junction at postion -1, needed in cases there are no exon junctions, this wont be seen
allexons = ax_main.plot((-1,-1), (0, 1), alpha=0.01,color='black',linestyle = '-.', linewidth=2)
print ("Exon junctions", exon_junctions)
for exon in exon_junctions:
allexons += ax_main.plot((exon,exon), (0, y_max), alpha=0.95,color='black',linestyle = ':', linewidth=3)
#dictionary for each frame in which the keys are the posistions and the values are the counts
frame_counts = {0: collections.OrderedDict(), 1: collections.OrderedDict(), 2: collections.OrderedDict()}
for key in all_subcodon_reads:
rem = key % 3
if rem == 1: # frame 1
frame = 0
elif rem == 2: # frame 2
frame = 1
elif rem == 0: # frame 3
frame = 2
frame_counts[frame][key] = all_subcodon_reads[key]
if lite == "n":
frame_counts[frame][key+1] = 0
frame_counts[frame][key+2] = 0
if lite == "n":
frame0subpro = ax_main.bar(frame_counts[0].keys(), frame_counts[0].values(), alpha=0.75, label = labels, zorder=2, color= "#FF4A45", edgecolor="#FF4A45", width=1, linewidth=4)
frame1subpro = ax_main.bar(frame_counts[1].keys(), frame_counts[1].values(), alpha=0.75, label = labels, zorder=2, color= "#64FC44", edgecolor="#64FC44", width=1, linewidth=4)
frame2subpro = ax_main.bar(frame_counts[2].keys(), frame_counts[2].values(), alpha=0.75, label = labels, zorder=2, color= "#5687F9", edgecolor="#5687F9", width=1, linewidth=4)
else:
frame0subpro = ax_main.plot(frame_counts[0].keys(), frame_counts[0].values(), alpha=0.75, label = labels, zorder=2, color= "#FF4A45", linewidth=ribo_linewidth)
frame1subpro = ax_main.plot(frame_counts[1].keys(), frame_counts[1].values(), alpha=0.75, label = labels, zorder=2, color= "#64FC44", linewidth=ribo_linewidth)
frame2subpro = ax_main.plot(frame_counts[2].keys(), frame_counts[2].values(), alpha=0.75, label = labels, zorder=2, color= "#5687F9", linewidth=ribo_linewidth)
if mismatches == True:
a_mismatches = ax_main.plot(seq_var_dict["A"].keys(), seq_var_dict["A"].values(),alpha=0.01, label = labels, zorder=2, color= "purple", linewidth=2)
t_mismatches = ax_main.plot(seq_var_dict["T"].keys(), seq_var_dict["T"].values(),alpha=0.01, label = labels, zorder=2, color= "yellow", linewidth=2)
g_mismatches = ax_main.plot(seq_var_dict["G"].keys(), seq_var_dict["G"].values(),alpha=0.01, label = labels, zorder=2, color= "orange", linewidth=2)
c_mismatches = ax_main.plot(seq_var_dict["C"].keys(), seq_var_dict["C"].values(),alpha=0.01, label = labels, zorder=2, color= "pink", linewidth=2)
xy = 0
if nucseq == True:
ax_nucseq.set_facecolor(background_col)
mrnaseq = seq.replace("T","U")
color_list = ["#FF4A45","#64FC44","#5687F9"]
char_frame = 0
for char in mrnaseq:
ax_nucseq.text((xy+1)-0.1,0.2,mrnaseq[xy],fontsize=20,color=color_list[char_frame%3])
xy += 1
char_frame += 1
# If the user passed a list of sequences to highlight, find and plot them here.
if seqhili != ['']:
near_cog_starts,signalhtml = get_user_defined_seqs(seq, seqhili)
for slip in near_cog_starts[0]:
try:
hili_sequences += ax_f1.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4,color='black', linewidth=5)
except Exception as e:
hili_sequences = ax_f1.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4, color='black', linewidth=5)
for slip in near_cog_starts[1]:
try:
hili_sequences += ax_f2.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4,color='black', linewidth=5)
except:
hili_sequences = ax_f2.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4,color='black',linewidth=5)
for slip in near_cog_starts[2]:
try:
hili_sequences += ax_f3.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4,color='black', linewidth=5)
except:
hili_sequences = ax_f3.plot((slip, slip),(0,0.5), alpha=1, label = labels, zorder=4,color='black', linewidth=5)
#Plot sequence identifiers which will create a popup telling user what the subsequence is (useful if they have passed multiple subsequences)
frame1_subsequences = ax_f1.plot(near_cog_starts[0], [0.25]*len(near_cog_starts[0]), 'o', color='b',mec='k', ms=12, mew=1, alpha=0, zorder=4)
frame2_subsequences = ax_f2.plot(near_cog_starts[1], [0.25]*len(near_cog_starts[1]), 'o', color='b',mec='k', ms=12, mew=1, alpha=0, zorder=4)
frame3_subsequences = ax_f3.plot(near_cog_starts[2], [0.25]*len(near_cog_starts[2]), 'o', color='b',mec='k', ms=12, mew=1, alpha=0, zorder=4)
#Attach the labels to the subsequences plotted above
signaltooltip1 = PointHTMLTooltip(frame1_subsequences[0], signalhtml[0], voffset=10, hoffset=10, css=point_tooltip_css)
signaltooltip2 = PointHTMLTooltip(frame2_subsequences[0], signalhtml[1], voffset=10, hoffset=10, css=point_tooltip_css)
signaltooltip3 = PointHTMLTooltip(frame3_subsequences[0], signalhtml[2], voffset=10, hoffset=10, css=point_tooltip_css)
for axisname in (ax_f1, ax_f2, ax_f3,ax_nucseq,ax_cds):
axisname.tick_params(top=False, bottom=False, labelleft=False, labelright=False, labelbottom=False)
for label in ax_main.xaxis.get_majorticklabels():
label.set_fontsize(36)
for axis, frame in ((ax_f1, 1), (ax_f2, 2), (ax_f3, 3)):
axis.set_xlim(1, tranlen)
starts = [(item, 1) for item in start_stop_dict[frame]['starts']]
uag_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAG']]
uaa_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAA']]
uga_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TGA']]
#Plot start positions
axis.broken_barh(starts, (0.30, 1),color="white", zorder=2,linewidth=7)
#Plot stop positions
axis.broken_barh(uag_stops, (0, 1), color=uag_col, zorder=2, linewidth=4)
axis.broken_barh(uaa_stops, (0, 1), color=uaa_col, zorder=2, linewidth=4)
axis.broken_barh(uga_stops, (0, 1), color=uga_col, zorder=2, linewidth=4)
axis.set_ylim(0, 1)
axis.set_ylabel('Frame {}'.format(frame), labelpad=4, verticalalignment='center',horizontalalignment="right",rotation="horizontal",color="black",fontsize=(axis_label_size/1.5))
title_str = '{} ({})'.format(gene,short_code)
plt.title(title_str, fontsize=50,y=38)
line_collections = [frame0subpro, frame1subpro, frame2subpro, rna_bars, allexons]
if mismatches == True:
line_collections.append(a_mismatches)
line_collections.append(t_mismatches)
line_collections.append(g_mismatches)
line_collections.append(c_mismatches)
line_collections.append(cds_markers)
if not (hili_start == 0 and hili_stop == 0):
hili_start = int(hili_start)
hili_stop = int(hili_stop)
hili | |
<gh_stars>1-10
from bilanci.tree_dict_models import deep_sum
from bilanci.utils import couch, nearly_equal
from bilanci.utils.comuni import FLMapper
from django.test import TestCase
from django.core.management import BaseCommand
from django.conf import settings
from collections import OrderedDict
from optparse import make_option
import logging
__author__ = 'guglielmo'
class Command(BaseCommand, TestCase):
option_list = BaseCommand.option_list + (
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help='Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All"'),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server to connect to (defaults to staging).'),
)
help = 'Verify the bilanci_simple values and sums.'
logger = logging.getLogger('management')
comuni_dicts = {}
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
cities_codes = options['cities']
if not cities_codes:
raise Exception("Missing city parameter")
mapper = FLMapper()
cities = mapper.get_cities(cities_codes)
if cities_codes.lower() != 'all':
self.logger.info("Processing cities: {0}".format(cities))
years = options['years']
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years = range(int(start_year), int(end_year)+1)
else:
years = [int(y.strip()) for y in years.split(",") if 2001 < int(y.strip()) < 2014]
if not years:
raise Exception("No suitable year found in {0}".format(years))
self.logger.info("Processing years: {0}".format(years))
couchdb_server_name = options['couchdb_server']
if couchdb_server_name not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server name.")
###
# Couchdb connections
###
couchdb_server_alias = options['couchdb_server']
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server alias.")
# hook to simple DB
simple_db_name = 'bilanci_simple'
simple_db = couch.connect(
simple_db_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
self.logger.info("Hooked to simple DB: {0}".format(simple_db_name))
# hook to normalized DB (for comparisons)
norm_db_name = 'bilanci_voci'
norm_db = couch.connect(
norm_db_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
self.logger.info("Hooked to normalized DB: {0}".format(norm_db_name))
entrate_sections = OrderedDict([
('Accertamenti', 0),
('Riscossioni in conto competenza', 1),
('Riscossioni in conto residui', 2),
])
spese_sections = OrderedDict([
('Impegni', 0),
('Pagamenti in conto competenza', 1),
('Pagamenti in conto residui', 2),
])
# totali_* will hold a list of all voices to be compared
# norm refers to the normalized tree
# simp refers to the simple tree
totali_preventivo_entrate = [
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-tributarie',
'data', 'totale titolo i', 0),
'simp': ('preventivo', 'ENTRATE', 'Imposte e tasse', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-contributi-e-trasferimenti-correnti-dello-stato-della-regione-e-di-altri-enti-pubblici-anche-in-rapporto-funzioni-delegate-dalla-regione',
'data', 'totale titolo ii', 0),
'simp': ('preventivo', 'ENTRATE', 'Contributi pubblici', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-extratributarie',
'data', 'totale titolo iii', 0),
'simp': ('preventivo', 'ENTRATE', 'Entrate extratributarie', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-alienazione-da-trasferimenti-di-capitali-e-da-riscossioni-di-crediti',
'data', 'totale titolo iv', 0),
'simp': ('preventivo', 'ENTRATE', 'Vendite e trasferimenti di capitali', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-accensioni-di-prestiti',
'data', 'totale titolo v', 0),
'simp': ('preventivo', 'ENTRATE', 'Prestiti')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-servizi-per-conto-di-terzi',
'data', 'totale titolo vi', 0),
'simp': ('preventivo', 'ENTRATE', 'Entrate per conto terzi')},
]
totali_consuntivo_entrate = []
for section_name, section_idx in entrate_sections.items():
totali_consuntivo_entrate.extend([
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-i-entrate-tributarie',
'data', 'totale entrate tributarie', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-ii-entrate-derivanti-da-contributi-e-trasferimenti-correnti',
'data', 'totale entrate derivanti da contributi e trasferimenti correnti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Contributi pubblici', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-iii-entrate-extratributarie',
'data', 'totale entrate extratributarie', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-iv-entrate-derivanti-da-alienazione-da-trasfer-di-capitali-e-da-riscossioni-di-crediti',
'data', 'totale entrate derivanti da alienazione, trasferimenti di capitali e da riscossioni di crediti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-v-entrate-derivanti-da-accensione-di-prestiti',
'data', 'totale entrate derivanti da accensione di prestiti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Prestiti')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-vi-entrate-da-servizi-per-conto-di-terzi',
'data', 'totale entrate da servizi per conto di terzi', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Entrate per conto terzi')},
])
totali_consuntivo_spese = []
# quadro 3
# section_name and section_idx contains the Impegni/Competenze/Residui name and indexes
for section_name, section_idx in spese_sections.items():
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'totale generale delle spese', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo i - spese correnti', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo ii - spese in c/capitale', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo iii - spese per rimborso di prestiti', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Prestiti')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo iv - spese per servirzi per conto di terzi', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese per conto terzi')},
])
# quadro 4
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '04',
'quadro-4-a-impegni',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Impegni', 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '04',
'quadro-4-b-pagamenti-in-conto-competenza',
'data', 'totali', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto competenza', 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '04',
'quadro-4-c-pagamenti-in-conto-residui',
'data', 'totali', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto residui', 'Spese correnti', 'TOTALE')},
])
# quadro 5
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '05',
'quadro-5-a-impegni',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Impegni', 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '05',
'quadro-5-b-pagamenti-in-conto-competenza',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto competenza', 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '05',
'quadro-5-c-pagamenti-in-conto-residui',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto residui', 'Spese per investimenti', 'TOTALE')},
])
somme_consuntivo_nodes = []
for section_name in entrate_sections.keys():
somme_consuntivo_nodes.extend([
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse'),
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'Imposte'),
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'Tasse'),
('consuntivo', 'ENTRATE', section_name, 'Contributi pubblici'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'Servizi pubblici'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'Proventi di beni dell\'ente'),
('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali'),
('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali', 'Trasferimenti di capitali da privati'),
])
somme_preventivo_nodes = [
('preventivo', 'ENTRATE', 'Imposte e tasse'),
('preventivo', 'ENTRATE', 'Imposte e tasse', 'Imposte'),
('preventivo', 'ENTRATE', 'Imposte e tasse', 'Tasse'),
('preventivo', 'ENTRATE', 'Contributi pubblici'),
('preventivo', 'ENTRATE', 'Entrate extratributarie'),
('preventivo', 'ENTRATE', 'Vendite e trasferimenti di capitali'),
]
for city in cities:
for year in years:
self.logger.info("Processing city of {0}, year {1}".format(
city, year
))
code = "{}_{}".format(year, city)
norm_doc_id = "{}_{}".format(year, city)
simple_doc_id = city
# both documents need to exist in the dbs
self.assertTrue(self.test_couch_doc_exists(norm_db, norm_doc_id),
"Could not find {}".format(norm_doc_id))
self.assertTrue(self.test_couch_doc_exists(simple_db, simple_doc_id))
norm_doc = norm_db[norm_doc_id]
simple_doc = simple_db[simple_doc_id]
# preventivo tests
if len(simple_doc[str(year)]['preventivo'].keys()) > 0:
self.logger.debug("::::: Testing first level totals for preventivo entrate")
self.test_totali(totali_preventivo_entrate, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing totale - funzioni - interventi for preventivo/spese")
for tipo_spese in (u'Spese correnti', u'Spese per investimenti'):
node = simple_doc[str(year)]['preventivo']['SPESE'][tipo_spese]
label = u"/Preventivo/{0}".format(tipo_spese)
self.test_totale_funzioni_interventi(label, node, year)
self.logger.debug("::::: Testing inner sums for preventivo entrate")
self.test_somme(somme_preventivo_nodes, simple_doc, year)
# consuntivo tests
if len(simple_doc[str(year)]['consuntivo'].keys()) > 0:
self.logger.debug("::::: Testing first level totals for consuntivo entrate")
self.test_totali(totali_consuntivo_entrate, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing first level totals for consuntivo spese")
self.test_totali(totali_consuntivo_spese, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing totale - funzioni - interventi for consuntivo/spese")
for section_name in spese_sections.keys():
for tipo_spese in ('Spese correnti', 'Spese per investimenti'):
node = simple_doc[str(year)]['consuntivo']['SPESE'][section_name][tipo_spese]
label = u"/Consuntivo/{0}/{1}".format(section_name, tipo_spese)
self.test_totale_funzioni_interventi(label, node, year)
self.logger.debug("::::: Testing inner sums for consuntivo entrate")
self.test_somme(somme_consuntivo_nodes, simple_doc, year)
###
# TESTS
###
def test_couch_doc_exists(self, couch_db, doc_id):
"""
couch db connection is correct and document exists
"""
return doc_id in couch_db
###
# totals for first level sections in normalized and
# simplified trees are compared
###
def test_totali(self, totali, simple_doc, norm_doc, year):
"""
totals for 1st level sections of the preventivo/entrate in the normalized tree (quadro 2)
are compared with the corresponding values in the simplified tree
"""
for tot in totali:
# extract year section from the simple doc (simple docs contain all years)
tot_simp = simple_doc[str(year)]
tot_norm = norm_doc
# drill through the tree to fetch the leaf value in tot['simp']
for t in tot['simp']:
tot_simp = tot_simp[t]
# drill through the tree to fetch the leaf value in tot['simp']
# catch exception om totale/totali, trying both before failing
# in the normalized tree
for t in tot['norm']:
if t == 'totale':
try:
tot_norm = tot_norm['totale']
except KeyError:
try:
tot_norm = tot_norm['totali']
except KeyError:
# log a warning and break away from the inner for loop
# do not execute the else section
self.logger.warning(
"totale/i key not found in bilanci_voce. node: {0}".format(
tot['norm']
)
)
break
else:
tot_norm = tot_norm[t]
else:
# transform the string representation in the normalized doc,
# into an | |
<filename>opfython/models/unsupervised.py<gh_stars>10-100
"""Unsupervised Optimum-Path Forest.
"""
import time
import numpy as np
import opfython.utils.constants as c
import opfython.utils.exception as e
import opfython.utils.logging as log
from opfython.core import OPF, Heap
from opfython.subgraphs import KNNSubgraph
logger = log.get_logger(__name__)
class UnsupervisedOPF(OPF):
"""An UnsupervisedOPF which implements the unsupervised version of OPF classifier.
References:
<NAME>, <NAME>, <NAME>.
Data clustering as an optimum-path forest problem with applications in image analysis.
International Journal of Imaging Systems and Technology (2009).
"""
def __init__(self, min_k=1, max_k=1, distance='log_squared_euclidean', pre_computed_distance=None):
"""Initialization method.
Args:
min_k (int): Minimum `k` value for cutting the subgraph.
max_k (int): Maximum `k` value for cutting the subgraph.
distance (str): An indicator of the distance metric to be used.
pre_computed_distance (str): A pre-computed distance file for feeding into OPF.
"""
logger.info('Overriding class: OPF -> UnsupervisedOPF.')
super(UnsupervisedOPF, self).__init__(distance, pre_computed_distance)
# Defining the minimum `k` value for cutting the subgraph
self.min_k = min_k
# Defining the maximum `k` value for cutting the subgraph
self.max_k = max_k
logger.info('Class overrided.')
@property
def min_k(self):
"""int: Minimum `k` value for cutting the subgraph.
"""
return self._min_k
@min_k.setter
def min_k(self, min_k):
if not isinstance(min_k, int):
raise e.TypeError('`min_k` should be an integer')
if min_k < 1:
raise e.ValueError('`min_k` should be >= 1')
self._min_k = min_k
@property
def max_k(self):
"""int: Maximum `k` value for cutting the subgraph.
"""
return self._max_k
@max_k.setter
def max_k(self, max_k):
if not isinstance(max_k, int):
raise e.TypeError('`max_k` should be an integer')
if max_k < 1:
raise e.ValueError('`max_k` should be >= 1')
if max_k < self.min_k:
raise e.ValueError('`max_k` should be >= `min_k`')
self._max_k = max_k
def _clustering(self, n_neighbours):
"""Clusters the subgraph using using a `k` value (number of neighbours).
Args:
n_neighbours (int): Number of neighbours to be used.
"""
for i in range(self.subgraph.n_nodes):
for k in range(n_neighbours):
# Gathers node `i` adjacent node
j = int(self.subgraph.nodes[i].adjacency[k])
# If both nodes' density are equal
if self.subgraph.nodes[i].density == self.subgraph.nodes[j].density:
# Turns on the insertion flag
insert = True
# For every possible `l` value
for l in range(n_neighbours):
# Gathers node `j` adjacent node
adj = int(self.subgraph.nodes[j].adjacency[l])
# If the nodes are the same
if i == adj:
# Turns off the insertion flag
insert = False
# If it is supposed to be inserted
if insert:
# Inserts node `i` in the adjacency list of `j`
self.subgraph.nodes[j].adjacency.insert(0, i)
# Increments the amount of adjacent nodes
self.subgraph.nodes[j].n_plateaus += 1
# Creating a maximum heap
h = Heap(size=self.subgraph.n_nodes, policy='max')
for i in range(self.subgraph.n_nodes):
# Updates the node's cost on the heap
h.cost[i] = self.subgraph.nodes[i].cost
# Defines node's `i` predecessor as NIL
self.subgraph.nodes[i].pred = c.NIL
# And its root as its same identifier
self.subgraph.nodes[i].root = i
# Inserts the node in the heap
h.insert(i)
# Defining an `l` counter
l = 0
while not h.is_empty():
# Removes a node
p = h.remove()
# Appends its index to the ordered list
self.subgraph.idx_nodes.append(p)
# If the node's predecessor is NIL
if self.subgraph.nodes[p].pred == c.NIL:
# Updates its cost on the heap
h.cost[p] = self.subgraph.nodes[p].density
# Defines its cluster label as `l`
self.subgraph.nodes[p].cluster_label = l
# Increments the cluster identifier
l += 1
# Apply current node's cost as the heap's cost
self.subgraph.nodes[p].cost = h.cost[p]
# Calculates the number of its adjacent nodes
n_adjacents = self.subgraph.nodes[p].n_plateaus + n_neighbours
# For every possible adjacent node
for k in range(n_adjacents):
# Gathers the adjacent identifier
q = int(self.subgraph.nodes[p].adjacency[k])
if h.color[q] != c.BLACK:
# Calculates the current cost
current_cost = np.minimum(h.cost[p], self.subgraph.nodes[q].density)
# If temporary cost is bigger than heap's cost
if current_cost > h.cost[q]:
# Apply `q` predecessor as `p`
self.subgraph.nodes[q].pred = p
# Gathers the same root's identifier
self.subgraph.nodes[q].root = self.subgraph.nodes[p].root
# And its cluster label
self.subgraph.nodes[q].cluster_label = self.subgraph.nodes[p].cluster_label
# Updates the heap `q` node and the current cost
h.update(q, current_cost)
# The final number of clusters will be equal to `l`
self.subgraph.n_clusters = l
def _normalized_cut(self, n_neighbours):
"""Performs a normalized cut over the subgraph using a `k` value (number of neighbours).
Args:
n_neighbours (int): Number of neighbours to be used.
Returns:
The value of the normalized cut.
"""
# Defining an array to represent the internal cluster distances
internal_cluster = np.zeros(self.subgraph.n_clusters)
# Defining an array to represent the external cluster distances
external_cluster = np.zeros(self.subgraph.n_clusters)
# Defining the cut value
cut = 0.0
for i in range(self.subgraph.n_nodes):
# Calculates its number of adjacent nodes
n_adjacents = self.subgraph.nodes[i].n_plateaus + n_neighbours
for k in range(n_adjacents):
# Gathers its adjacent node identifier
j = int(self.subgraph.nodes[i].adjacency[k])
if self.pre_computed_distance:
distance = self.pre_distances[self.subgraph.nodes[i].idx][self.subgraph.nodes[j].idx]
else:
distance = self.distance_fn(self.subgraph.nodes[i].features, self.subgraph.nodes[j].features)
if distance > 0.0:
# If nodes belongs to the same clusters
if self.subgraph.nodes[i].cluster_label == self.subgraph.nodes[j].cluster_label:
# Increments the internal cluster distance
internal_cluster[self.subgraph.nodes[i].cluster_label] += 1 / distance
# If nodes belongs to distinct clusters
else:
# Increments the external cluster distance
external_cluster[self.subgraph.nodes[i].cluster_label] += 1 / distance
for l in range(self.subgraph.n_clusters):
# If the sum of internal and external clusters is bigger than 0
if internal_cluster[l] + external_cluster[l] > 0.0:
# Increments the value of the cut
cut += external_cluster[l] / \
(internal_cluster[l] + external_cluster[l])
return cut
def _best_minimum_cut(self, min_k, max_k):
"""Performs a minimum cut on the subgraph using the best `k` value.
Args:
min_k (int): Minimum value of k.
max_k (int): Maximum value of k.
"""
logger.debug('Calculating the best minimum cut within [%d, %d] ...', min_k, max_k)
# Calculates the maximum possible distances
max_distances = self.subgraph.create_arcs(
max_k, self.distance_fn, self.pre_computed_distance, self.pre_distances)
# Initialize the minimum cut as maximum possible value
min_cut = c.FLOAT_MAX
for k in range(min_k, max_k + 1):
# If minimum cut is different than zero
if min_cut != 0.0:
# Gathers the subgraph's density
self.subgraph.density = max_distances[k - 1]
# Gathers current `k` as the subgraph's best `k` value
self.subgraph.best_k = k
# Calculates the p.d.f.
self.subgraph.calculate_pdf(
k, self.distance_fn, self.pre_computed_distance, self.pre_distances)
# Clustering with current `k` value
self._clustering(k)
# Performs the normalized cut with current `k` value
cut = self._normalized_cut(k)
if cut < min_cut:
min_cut = cut
best_k = k
self.subgraph.destroy_arcs()
# Applying best `k` value to the subgraph
self.subgraph.best_k = best_k
# Creating new arcs with the best `k` value
self.subgraph.create_arcs( best_k, self.distance_fn, self.pre_computed_distance, self.pre_distances)
# Calculating the new p.d.f. with the best `k` value
self.subgraph.calculate_pdf( best_k, self.distance_fn, self.pre_computed_distance, self.pre_distances)
logger.debug('Best: %d | Minimum cut: %d.', best_k, min_cut)
def fit(self, X_train, Y_train=None, I_train=None):
"""Fits data in the classifier.
Args:
X_train (np.array): Array of training features.
Y_train (np.array): Array of training labels.
I_train (np.array): Array of training indexes.
"""
logger.info('Clustering with classifier ...')
start = time.time()
# Creating a subgraph
self.subgraph = KNNSubgraph(X_train, Y_train, I_train)
# Performing the best minimum cut on the subgraph
self._best_minimum_cut(self.min_k, self.max_k)
# Clustering the data with best `k` value
self._clustering(self.subgraph.best_k)
# The subgraph has been properly trained
self.subgraph.trained = True
end = time.time()
train_time = end - start
logger.info('Classifier has been clustered with.')
logger.info('Number of clusters: %d.', self.subgraph.n_clusters)
logger.info('Clustering time: %s seconds.', train_time)
def predict(self, X_val, I_val=None):
"""Predicts new data using the pre-trained classifier.
Args:
X_val (np.array): Array of validation features.
I_val (np.array): Array of validation indexes.
Returns:
A list of predictions for each record of the data.
"""
if not self.subgraph:
raise e.BuildError('KNNSubgraph has not been properly created')
if not self.subgraph.trained:
raise e.BuildError('Classifier has not been properly clustered')
logger.info('Predicting data ...')
start = time.time()
# Creating a prediction subgraph
pred_subgraph = KNNSubgraph(X_val, I=I_val)
# Gathering the best `k` value
best_k = self.subgraph.best_k
# Creating an array of distances
distances = np.zeros(best_k + 1)
# Creating an array of nearest neighbours indexes
neighbours_idx = np.zeros(best_k + 1)
for i in range(pred_subgraph.n_nodes):
# Defines the current cost
cost = -c.FLOAT_MAX
# Filling array of distances with maximum value
distances.fill(c.FLOAT_MAX)
for j in range(self.subgraph.n_nodes):
if j != i:
if self.pre_computed_distance:
distances[best_k] = self.pre_distances[pred_subgraph.nodes[i].idx][self.subgraph.nodes[j].idx]
else:
distances[best_k] = self.distance_fn(pred_subgraph.nodes[i].features, self.subgraph.nodes[j].features)
# Apply node `j` as a neighbour
neighbours_idx[best_k] = j
# Gathers current `k`
cur_k = best_k
# While current `k` is bigger than 0 and the `k` distance is smaller than `k-1` distance
while cur_k > 0 and distances[cur_k] | |
import os
import sys
import numpy as np
import torch as th
from torch import nn
from collections import defaultdict
from latent_dialog.enc2dec.base_modules import summary
from latent_dialog.enc2dec.decoders import TEACH_FORCE, GEN, DecoderRNN
from datetime import datetime
from latent_dialog.utils import get_detokenize
from latent_dialog.corpora import EOS, PAD
from latent_dialog.data_loaders import DealDataLoaders
from latent_dialog import evaluators
from latent_dialog.record import record, record_task, UniquenessSentMetric, UniquenessWordMetric
import logging
logger = logging.getLogger()
class LossManager(object):
def __init__(self):
self.losses = defaultdict(list)
self.backward_losses = []
def add_loss(self, loss):
for key, val in loss.items():
# print('key = %s\nval = %s' % (key, val))
if val is not None and type(val) is not bool:
self.losses[key].append(val.item())
def pprint(self, name, window=None, prefix=None):
str_losses = []
for key, loss in self.losses.items():
if loss is None:
continue
aver_loss = np.average(loss) if window is None else np.average(loss[-window:])
if 'nll' in key:
str_losses.append('{} PPL {:.3f}'.format(key, np.exp(aver_loss)))
else:
str_losses.append('{} {:.3f}'.format(key, aver_loss))
if prefix:
return '{}: {} {}'.format(prefix, name, ' '.join(str_losses))
else:
return '{} {}'.format(name, ' '.join(str_losses))
def clear(self):
self.losses = defaultdict(list)
self.backward_losses = []
def add_backward_loss(self, loss):
self.backward_losses.append(loss.item())
def avg_loss(self):
return np.mean(self.backward_losses)
class Reinforce(object):
def __init__(self, dialog, ctx_gen, corpus, sv_config, sys_model, usr_model, rl_config, dialog_eval, ctx_gen_eval):
self.dialog = dialog
self.ctx_gen = ctx_gen
self.corpus = corpus
self.sv_config = sv_config
self.sys_model = sys_model
self.usr_model = usr_model
self.rl_config = rl_config
self.dialog_eval = dialog_eval
self.ctx_gen_eval = ctx_gen_eval
# training data for supervised learning
train_dial, val_dial, test_dial = self.corpus.get_corpus()
self.train_data = DealDataLoaders('Train', train_dial, self.sv_config)
self.val_data = DealDataLoaders('Val', val_dial, self.sv_config)
self.test_data = DealDataLoaders('Test', test_dial, self.sv_config)
# training func for supervised learning
self.train_func = train_single_batch
# recording func
self.record_func = record
if self.rl_config.record_freq > 0:
self.ppl_exp_file = open(os.path.join(self.rl_config.record_path, 'ppl.tsv'), 'w')
self.rl_exp_file = open(os.path.join(self.rl_config.record_path, 'rl.tsv'), 'w')
self.learning_exp_file = open(os.path.join(self.rl_config.record_path, 'learning.tsv'), 'w')
# evaluation
self.validate_func = validate
self.evaluator = evaluators.BleuEvaluator('Deal')
self.generate_func = generate
def run(self):
n = 0
best_valid_loss = np.inf
best_rl_reward = 0
# BEFORE RUN, RECORD INITIAL PERFORMANCE
self.record_func(n, self.sys_model, self.test_data, self.sv_config, self.usr_model, self.ppl_exp_file,
self.dialog_eval, self.ctx_gen_eval, self.rl_exp_file)
for ctxs in self.ctx_gen.iter(self.rl_config.nepoch):
n += 1
if n % 20 == 0:
print('='*15, '{}/{}'.format(n, self.ctx_gen.total_size(self.rl_config.nepoch)))
# supervised learning
if self.rl_config.sv_train_freq > 0 and n % self.rl_config.sv_train_freq == 0:
# print('-'*15, 'Supervised Learning', '-'*15)
self.train_func(self.sys_model, self.train_data, self.sv_config)
# print('-'*40)
# roll out and learn
_, agree, rl_reward, rl_stats = self.dialog.run(ctxs, verbose=n % self.rl_config.record_freq == 0)
# record model performance in terms of several evaluation metrics
if self.rl_config.record_freq > 0 and n % self.rl_config.record_freq == 0:
# TEST ON TRAINING DATA
rl_stats = validate_rl(self.dialog_eval, self.ctx_gen, num_episode=400)
self.learning_exp_file.write('{}\t{}\t{}\t{}\n'.format(n, rl_stats['sys_rew'],
rl_stats['avg_agree'],
rl_stats['sys_unique']))
self.learning_exp_file.flush()
aver_reward = rl_stats['sys_rew']
# TEST ON HELD-HOLD DATA
print('-'*15, 'Recording start', '-'*15)
self.record_func(n, self.sys_model, self.test_data, self.sv_config, self.usr_model, self.ppl_exp_file,
self.dialog_eval, self.ctx_gen_eval, self.rl_exp_file)
# SAVE MODEL BASED on REWARD
if aver_reward > best_rl_reward:
print('[INFO] Update on reward in Epsd {} ({} > {})'.format(n, aver_reward, best_rl_reward))
th.save(self.sys_model.state_dict(), self.rl_config.reward_best_model_path)
best_rl_reward = aver_reward
else:
print('[INFO] No update on reward in Epsd {} ({} < {})'.format(n, aver_reward, best_rl_reward))
print('-'*15, 'Recording end', '-'*15)
# print('='*15, 'Episode {} end'.format(n), '='*15)
if self.rl_config.nepisode > 0 and n > self.rl_config.nepisode:
print('-'*15, 'Stop from config', '-'*15)
break
print("$$$ Load {}-model".format(self.rl_config.reward_best_model_path))
self.sv_config.batch_size = 32
self.sys_model.load_state_dict(th.load(self.rl_config.reward_best_model_path))
validate(self.sys_model, self.val_data, self.sv_config)
validate(self.sys_model, self.test_data, self.sv_config)
with open(os.path.join(self.rl_config.record_path, 'valid_file.txt'), 'w') as f:
self.generate_func(self.sys_model, self.val_data, self.sv_config, self.evaluator, num_batch=None,
dest_f=f)
with open(os.path.join(self.rl_config.record_path, 'test_file.txt'), 'w') as f:
self.generate_func(self.sys_model, self.test_data, self.sv_config, self.evaluator, num_batch=None,
dest_f=f)
class OfflineTaskReinforce(object):
def __init__(self, agent, corpus, sv_config, sys_model, rl_config, generate_func):
self.agent = agent
self.corpus = corpus
self.sv_config = sv_config
self.sys_model = sys_model
self.rl_config = rl_config
# training func for supervised learning
self.train_func = task_train_single_batch
self.record_func = record_task
self.validate_func = validate
# prepare data loader
train_dial, val_dial, test_dial = self.corpus.get_corpus()
self.train_data = BeliefDbDataLoaders('Train', train_dial, self.sv_config)
self.sl_train_data = BeliefDbDataLoaders('Train', train_dial, self.sv_config)
self.val_data = BeliefDbDataLoaders('Val', val_dial, self.sv_config)
self.test_data = BeliefDbDataLoaders('Test', test_dial, self.sv_config)
# create log files
if self.rl_config.record_freq > 0:
self.learning_exp_file = open(os.path.join(self.rl_config.record_path, 'offline-learning.tsv'), 'w')
self.ppl_val_file = open(os.path.join(self.rl_config.record_path, 'val-ppl.tsv'), 'w')
self.rl_val_file = open(os.path.join(self.rl_config.record_path, 'val-rl.tsv'), 'w')
self.ppl_test_file = open(os.path.join(self.rl_config.record_path, 'test-ppl.tsv'), 'w')
self.rl_test_file = open(os.path.join(self.rl_config.record_path, 'test-rl.tsv'), 'w')
# evaluation
self.evaluator = evaluators.MultiWozEvaluator('SYS_WOZ')
self.generate_func = generate_func
def run(self):
n = 0
best_valid_loss = np.inf
best_rewards = -1 * np.inf
# BEFORE RUN, RECORD INITIAL PERFORMANCE
test_loss = self.validate_func(self.sys_model, self.test_data, self.sv_config, use_py=True)
t_success, t_match, t_bleu, t_f1 = self.generate_func(self.sys_model, self.test_data, self.sv_config,
self.evaluator, None, verbose=False)
self.ppl_test_file.write('{}\t{}\t{}\t{}\n'.format(n, np.exp(test_loss), t_bleu, t_f1))
self.ppl_test_file.flush()
self.rl_test_file.write('{}\t{}\t{}\t{}\n'.format(n, (t_success + t_match), t_success, t_match))
self.rl_test_file.flush()
self.sys_model.train()
try:
for epoch_id in range(self.rl_config.nepoch):
self.train_data.epoch_init(self.sv_config, shuffle=True, verbose=epoch_id == 0, fix_batch=True)
while True:
if n % self.rl_config.episode_repeat == 0:
batch = self.train_data.next_batch()
if batch is None:
break
n += 1
if n % 50 == 0:
print("Reinforcement Learning {}/{} eposide".format(n, self.train_data.num_batch*self.rl_config.nepoch))
self.learning_exp_file.write(
'{}\t{}\n'.format(n, np.mean(self.agent.all_rewards[-50:])))
self.learning_exp_file.flush()
# reinforcement learning
# make sure it's the same dialo
assert len(set(batch['keys'])) == 1
task_report, success, match = self.agent.run(batch, self.evaluator, max_words=self.rl_config.max_words, temp=self.rl_config.temperature)
reward = float(success) # + float(match)
stats = {'Match': match, 'Success': success}
self.agent.update(reward, stats)
# supervised learning
if self.rl_config.sv_train_freq > 0 and n % self.rl_config.sv_train_freq == 0:
self.train_func(self.sys_model, self.sl_train_data, self.sv_config)
# record model performance in terms of several evaluation metrics
if self.rl_config.record_freq > 0 and n % self.rl_config.record_freq == 0:
self.agent.print_dialog(self.agent.dlg_history, reward, stats)
print('-'*15, 'Recording start', '-'*15)
# save train reward
self.learning_exp_file.write('{}\t{}\n'.format(n, np.mean(self.agent.all_rewards[-self.rl_config.record_freq:])))
self.learning_exp_file.flush()
# PPL & reward on validation
valid_loss = self.validate_func(self.sys_model, self.val_data, self.sv_config, use_py=True)
v_success, v_match, v_bleu, v_f1 = self.generate_func(self.sys_model, self.val_data, self.sv_config, self.evaluator, None, verbose=False)
self.ppl_val_file.write('{}\t{}\t{}\t{}\n'.format(n, np.exp(valid_loss), v_bleu, v_f1))
self.ppl_val_file.flush()
self.rl_val_file.write('{}\t{}\t{}\t{}\n'.format(n, (v_success + v_match), v_success, v_match))
self.rl_val_file.flush()
test_loss = self.validate_func(self.sys_model, self.test_data, self.sv_config, use_py=True)
t_success, t_match, t_bleu, t_f1 = self.generate_func(self.sys_model, self.test_data, self.sv_config, self.evaluator, None, verbose=False)
self.ppl_test_file.write('{}\t{}\t{}\t{}\n'.format(n, np.exp(test_loss), t_bleu, t_f1))
self.ppl_test_file.flush()
self.rl_test_file.write('{}\t{}\t{}\t{}\n'.format(n, (t_success + t_match), t_success, t_match))
self.rl_test_file.flush()
# save model is needed
if v_success+v_match > best_rewards:
print("Model saved with success {} match {}".format(v_success, v_match))
th.save(self.sys_model.state_dict(), self.rl_config.reward_best_model_path)
best_rewards = v_success+v_match
self.sys_model.train()
print('-'*15, 'Recording end', '-'*15)
except KeyboardInterrupt:
print("RL training stopped from keyboard")
print("$$$ Load {}-model".format(self.rl_config.reward_best_model_path))
self.sv_config.batch_size = 32
self.sys_model.load_state_dict(th.load(self.rl_config.reward_best_model_path))
validate(self.sys_model, self.val_data, self.sv_config, use_py=True)
validate(self.sys_model, self.test_data, self.sv_config, use_py=True)
with open(os.path.join(self.rl_config.record_path, 'valid_file.txt'), 'w') as f:
self.generate_func(self.sys_model, self.val_data, self.sv_config, self.evaluator, num_batch=None, dest_f=f)
with open(os.path.join(self.rl_config.record_path, 'test_file.txt'), 'w') as f:
self.generate_func(self.sys_model, self.test_data, self.sv_config, self.evaluator, num_batch=None, dest_f=f)
def validate_rl(dialog_eval, ctx_gen, num_episode=200):
print("Validate on training goals for {} episode".format(num_episode))
reward_list = []
agree_list = []
sent_metric = UniquenessSentMetric()
word_metric = UniquenessWordMetric()
for _ in range(num_episode):
ctxs = ctx_gen.sample()
conv, agree, rewards = dialog_eval.run(ctxs)
true_reward = rewards[0] if agree else 0
reward_list.append(true_reward)
agree_list.append(float(agree if agree is not None else 0.0))
for turn in conv:
if turn[0] == 'System':
sent_metric.record(turn[1])
word_metric.record(turn[1])
results = {'sys_rew': np.average(reward_list),
'avg_agree': np.average(agree_list),
'sys_sent_unique': sent_metric.value(),
'sys_unique': word_metric.value()}
return results
def train_single_batch(model, train_data, config):
batch_cnt = 0
optimizer = model.get_optimizer(config, verbose=False)
model.train()
# decoding CE
train_data.epoch_init(config, shuffle=True, verbose=False)
for i in range(16):
batch = train_data.next_batch()
if batch is None:
train_data.epoch_init(config, shuffle=True, verbose=False)
batch = train_data.next_batch()
optimizer.zero_grad()
loss = model(batch, mode=TEACH_FORCE)
model.backward(loss, batch_cnt)
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
optimizer.step()
def task_train_single_batch(model, train_data, config):
batch_cnt = 0
optimizer = model.get_optimizer(config, verbose=False)
model.train()
# decoding CE
train_data.epoch_init(config, shuffle=True, verbose=False)
for i in range(16):
batch = train_data.next_batch()
if batch is None:
train_data.epoch_init(config, shuffle=True, verbose=False)
batch = train_data.next_batch()
optimizer.zero_grad()
loss = model(batch, mode=TEACH_FORCE)
model.backward(loss, batch_cnt)
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
optimizer.step()
def train(model, train_data, val_data, test_data, config, evaluator, gen=None):
patience = 10
valid_loss_threshold = np.inf
best_valid_loss = np.inf
batch_cnt = 0
optimizer = model.get_optimizer(config)
done_epoch = 0
best_epoch = 0
train_loss = LossManager()
model.train()
logger.info(summary(model, show_weights=False))
saved_models = []
last_n_model = config.last_n_model if hasattr(config, 'last_n_model') else 5
logger.info('***** Training Begins at {} *****'.format(datetime.now().strftime("%Y-%m-%d %H-%M-%S")))
logger.info('***** Epoch 0/{} *****'.format(config.max_epoch))
while True:
train_data.epoch_init(config, shuffle=True, verbose=done_epoch==0, fix_batch=config.fix_train_batch)
while True:
batch = train_data.next_batch()
if batch is None:
break
optimizer.zero_grad()
loss = model(batch, mode=TEACH_FORCE)
model.backward(loss, batch_cnt)
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
optimizer.step()
batch_cnt += 1
train_loss.add_loss(loss)
if batch_cnt % config.print_step == 0:
# print('Print step at {}'.format(datetime.now().strftime("%Y-%m-%d %H-%M-%S")))
logger.info(train_loss.pprint('Train',
window=config.print_step,
prefix='{}/{}-({:.3f})'.format(batch_cnt%config.ckpt_step, config.ckpt_step, model.kl_w)))
sys.stdout.flush()
if batch_cnt % config.ckpt_step == 0:
logger.info('Checkpoint step at {}'.format(datetime.now().strftime("%Y-%m-%d %H-%M-%S")))
logger.info('==== Evaluating Model ====')
logger.info(train_loss.pprint('Train'))
done_epoch += 1
logger.info('done epoch {} -> {}'.format(done_epoch-1, done_epoch))
# generation
if gen is not None:
gen(model, val_data, config, evaluator, num_batch=config.preview_batch_num)
# validation
valid_loss = validate(model, val_data, config, batch_cnt)
_ = validate(model, test_data, config, batch_cnt)
# update early stopping stats
if valid_loss < best_valid_loss:
if valid_loss <= valid_loss_threshold * config.improve_threshold:
patience = max(patience, done_epoch*config.patient_increase)
valid_loss_threshold = valid_loss
logger.info('Update patience to {}'.format(patience))
if config.save_model:
cur_time | |
<reponame>8by8-org/usvotes<filename>app/main/starter_views.py
from __future__ import print_function
from app.main import main
from flask import g, url_for, render_template, request, redirect, session as http_session, abort, current_app, flash, jsonify, make_response
from app.main.forms import *
from app.services import SessionManager
from app.services.steps import Step_0
from app.main.helpers import guess_locale
import json
from app.services import FormFillerService
from app.services.usps_api import USPS_API
from app.services.email_service import EmailService
from flask_cors import cross_origin
from datetime import datetime, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
#from google.cloud import scheduler_v1
import os
import tracemalloc
tracemalloc.start(10)
# backend api endpoint for checking voter registration status
@main.route('/registered', strict_slashes=False, methods=["POST"])
@cross_origin(origin='*')
def registered():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
otherErrors = []
if 'state' not in requestData:
missingParams.append('state')
elif len(requestData.get('state')) != 2:
otherErrors.append('state must be 2 letter abbreviation')
if 'city' not in requestData:
missingParams.append('city')
if 'street' not in requestData:
missingParams.append('street')
if 'name_first' not in requestData:
missingParams.append('name_first')
if 'name_last' not in requestData:
missingParams.append('name_last')
if 'dob' not in requestData:
missingParams.append('dob')
else:
dob = requestData.get('dob').split('/')
if len(dob) != 3 or len(dob[0]) not in range(1, 3) or len(dob[1]) not in range(1, 3) or len(dob[2]) != 4:
otherErrors.append('dob must be in the form mm/dd/yyyy')
if 'zip' not in requestData:
missingParams.append('zip')
elif len(requestData.get('zip')) != 5:
otherErrors.append('zip must be 5 digits')
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if address is valid
form = FormVR3(
addr = requestData.get('street'),
city = requestData.get('city'),
state = requestData.get('state'),
zip = requestData.get('zip'),
)
usps_api = USPS_API(form.data)
validated_addresses = usps_api.validate_addresses()
if not validated_addresses:
otherErrors.append('(street, city, state, zip) do not form a valid address')
if otherErrors:
error = otherErrors[0]
for i in range(1, len(otherErrors)):
error = error + ', ' + otherErrors[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if the address is valid (via USPS address verification)
someJson = requestData
step = Step_0(someJson)
regFound = step.lookup_registration(
state=requestData.get('state'),
city=requestData.get('city'),
street=requestData.get('street'),
name_first=requestData.get('name_first'),
name_last=requestData.get('name_last'),
dob=requestData.get('dob'),
zipcode=requestData.get('zip')
)
#print(regFound)
if (regFound and 'status' not in regFound) or (regFound and 'status' in regFound and regFound['status'] == 'active'):
return jsonify({ 'registered': True })
elif regFound and 'status' in regFound:
return { 'registered': False, 'status': regFound['status'] }
else:
return { 'registered': False, 'status': 'not found' }
# backend api endpoint for filling out the Federal Form to register to vote
@main.route('/registertovote', strict_slashes=False, methods=['POST'])
@cross_origin(origin='*')
def reg():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
otherErrors = []
if 'name_first' not in requestData:
missingParams.append('name_first')
if 'name_last' not in requestData:
missingParams.append('name_last')
if 'state' not in requestData:
missingParams.append('state')
elif len(requestData.get('state')) != 2:
otherErrors.append('state must be 2 letter abbreviation')
if 'city' not in requestData:
missingParams.append('city')
if 'street' not in requestData:
missingParams.append('street')
if 'dob' not in requestData:
missingParams.append('dob')
else:
dobArr = requestData.get('dob').split('/')
if len(dobArr) != 3 or len(dobArr[0]) not in range(1, 3) or len(dobArr[1]) not in range(1, 3) or len(dobArr[2]) != 4:
otherErrors.append('dob must be in the form mm/dd/yyyy')
if 'zip' not in requestData:
missingParams.append('zip')
elif len(requestData.get('zip')) != 5:
otherErrors.append('zip must be 5 digits')
if 'email' not in requestData:
missingParams.append('email')
else:
emailArr = requestData.get('email').split('@')
if len(emailArr) != 2 or len(list(filter(None, emailArr[1].split('.')))) != 2:
otherErrors.append('invalid email')
if 'citizen' not in requestData:
missingParams.append('citizen')
elif requestData.get('citizen') != 'yes':
otherErrors.append('citizen parameter must be yes')
if 'eighteenPlus' not in requestData:
missingParams.append('eighteenPlus')
elif requestData.get('eighteenPlus') != 'yes':
otherErrors.append('eighteenPlus parameter must be yes')
if 'party' not in requestData:
missingParams.append('party')
if 'idNumber' not in requestData:
missingParams.append('idNumber')
elif not requestData.get('idNumber').isdigit():
otherErrors.append('invalid ID number')
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if the address is valid (via USPS address verification)
# instead of an error, send a warning if address is invalid right after email is sent
form = FormVR3(
addr = requestData.get('street'),
city = requestData.get('city'),
state = requestData.get('state'),
zip = requestData.get('zip'),
)
usps_api = USPS_API(form.data)
validated_addresses = usps_api.validate_addresses()
if otherErrors:
error = otherErrors[0]
for i in range(1, len(otherErrors)):
error = error + ', ' + otherErrors[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# get POST form body parameters
name_first = requestData.get('name_first')
name_last = requestData.get('name_last')
state = requestData.get('state')
city = requestData.get('city')
street = requestData.get('street')
dob = requestData.get('dob')
zip = requestData.get('zip')
email = requestData.get('email')
party = requestData.get('party')
idNumber = requestData.get('idNumber')
payload_file = 'app/services/tests/test-vr-en-payload.json'
with open(payload_file) as payload_f:
payload = json.load(payload_f)
payload['01_firstName'] = name_first
payload['01_lastName'] = name_last
payload['02_homeAddress'] = street
payload['02_aptLot'] = ""
payload['02_cityTown'] = city
payload['02_state'] = state
payload['02_zipCode'] = zip
payload['04_dob'] = dob
payload['07_party'] = party
payload['06_idNumber'] = idNumber
payload['00_citizen_yes'] = True
payload['00_eighteenPlus_yes'] = True
# fill out the voter registration form
ffs = FormFillerService(payload=payload, form_name='/vr/en')
img = ffs.as_image()
# use Gmail API to send email to the user with their voter reg form
emailServ = EmailService()
to = email
subject = 'Here’s your voter registration form'
messageWithAttachment = emailServ.create_message_with_attachment(to, subject, img)
emailServ.send_message(messageWithAttachment)
if not validated_addresses:
return { 'status': 'email sent', 'warning': '(street, city, state, zip) do not form a valid address' }
return { 'status': 'email sent' }
@main.route('/email', strict_slashes=False, methods=['POST'])
@cross_origin(origin='*')
def email():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
if 'email' not in requestData:
missingParams.append('email')
else:
emailArr = requestData.get('email').split('@')
if len(emailArr) != 2 or len(list(filter(None, emailArr[1].split('.')))) != 2:
resp = jsonify(error='invalid email: ' + requestData.get('email'))
return make_response(resp, 400)
if 'type' not in requestData:
missingParams.append('type')
elif requestData.get('type') == 'badgeEarned':
if 'avatar' not in requestData or 'daysLeft' not in requestData or 'badgesLeft' not in requestData:
resp = jsonify(error='for badgeEarned emails, parameters avatar, daysLeft, and badgesLeft are required')
return make_response(resp, 400)
elif (requestData.get('type') == 'registered' or requestData.get('type') == 'electionReminder') and ('avatar' not in requestData or 'firstName' not in requestData):
resp = jsonify(error='for ' + requestData.get('type') + ' emails, parameters avatar and firstName are required')
return make_response(resp, 400)
elif requestData.get('type') == 'challengeWon' and 'avatar' not in requestData:
resp = jsonify(error='for ' + requestData.get('type') + ' emails, parameter avatar is required')
return make_response(resp, 400)
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# Initialize email service that uses Gmail API
emailServ = EmailService()
emailTo = requestData.get('email')
type = requestData.get('type')
daysLeft = requestData.get('daysLeft')
badgesLeft = requestData.get('badgesLeft')
firstName = requestData.get('firstName')
avatar = requestData.get('avatar')
isChallenger = requestData.get('isChallenger')
# Attempt to create the email template that was asked for
try:
message = emailServ.create_template_message(emailTo, type, daysLeft, badgesLeft, firstName, avatar, isChallenger)
emailServ.send_message(message)
if type == 'challengerWelcome':
# Start the scheduler
#sched = BackgroundScheduler()
#sched.start()
currDay = datetime.today()
#challengeEnd = currDay + timedelta(days=8)
# Store the job in a variable in case we want to cancel it.
# The job will be executed on the day the challenge ends
#job = sched.add_job(delay_send, 'date', run_date=challengeEnd, args=[emailTo])
return { 'status': 'email sent' }
except ValueError: # value error if email type provided by user is not valid
resp = jsonify(error='invalid template type, valid types include: challengerWelcome, badgeEarned, challengeWon, challengeIncomplete, playerWelcome, registered, electionReminder')
return make_response(resp, 400)
except Exception as e:
resp = jsonify(error='invalid email: ' + emailTo)
return make_response(resp, 400)
def delay_send(emailTo):
# Initialize email service that uses Gmail API
emailServ = EmailService()
message = emailServ.create_template_message(emailTo, 'challengeIncomplete')
emailServ.send_message(message)
return 'delayed email sent'
'''
def create_scheduled_job():
client = scheduler_v1.CloudSchedulerClient.from_service_account_info({
"type": "service_account",
"project_id": os.getenv('PROJECT_ID'),
"private_key_id": os.getenv('PRIVATE_KEY_ID'),
"private_key": os.getenv('PRIVATE_KEY'),
"client_email": os.getenv('CLIENT_EMAIL'),
"client_id": os.getenv('CLIENT_ID_GCS'),
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/emailscheduler%40by8-318322.iam.gserviceaccount.com"
}
)
parent= client.location_path(os.getenv('PROJECT_ID'),'us-west1')
job={"name":"projects/your-project/locations/app-engine-location/jobs/traing_for_model",
"description":"this is for testing training model",
"http_target": {"uri":"https://us-central1-gerald-automl-test.cloudfunctions.net/automl-trainmodel-1-test-for-cron-job"},
"schedule":"0 10 * * *",
"time_zone":"America/Los_Angeles",
}
job = {
"name": "",
"http_target": {
"http_method": "POST",
"uri": uri,
"headers": {"Content-Type": "application/json"},
"body": {'email': '<EMAIL>',
'type': 'challengeIncomplete',
'avatar': '2',
'daysLeft': '3',
'badgesLeft': '4',
'firstName': 'Wesley'},
},
"schedule": "* * * * | |
'success')
return redirect(url_for('admin_dashboard'))
# admin create user account validator form
class AdduserForm(Form):
first_name = StringField('First Name', [validators.InputRequired()])
last_name = StringField('Last Name', [validators.InputRequired()])
username = StringField('<NAME>', [validators.InputRequired()])
password = PasswordField('Password',
[validators.DataRequired(), validators.Length(min=6, max=100),
validators.EqualTo('confirm', message='Passwords Do Not Match')])
confirm = PasswordField('Confirm Password', [validators.DataRequired()])
# admin create user account page
@app.route('/admin/add_user', methods=['post', 'get'])
@is_admin_logged_in
def add_user():
cur = mysql.connection.cursor()
# view messages
cur.execute("SELECT * FROM contact_us WHERE status = %s ORDER BY id DESC LIMIT 6;", ["not_seen"])
messages = cur.fetchall()
# show messages number
cur.execute("SELECT COUNT(id) FROM contact_us WHERE status = %s ", ['not_seen'])
count_message = cur.fetchone()
count_messages = count_message['COUNT(id)']
# show new orders number
cur.execute("SELECT COUNT(status) FROM buy_orders WHERE status = %s", ['Pending'])
count_order = cur.fetchone()
count_orders_where_pending = count_order['COUNT(status)']
# show new orders
cur.execute("SELECT COUNT(status), user_name FROM buy_orders WHERE status = %s GROUP BY user_name ASC LIMIT 12", ['Pending'])
count_orders_by_user = cur.fetchall()
cur.close()
form = AdduserForm(request.form)
if request.method == 'POST' and form.validate():
username = form.username.data
folder = os.path.exists(app.root_path + r"\static\uploads\users\{}".format(username))
if folder == True:
flash('Folder Name Already Exists', 'warning')
return redirect(url_for('add_user'))
cur = mysql.connection.cursor()
cur.execute("SELECT username FROM users WHERE username = %s", [username])
res = cur.fetchone()
cur.close()
if username in str(res):
msg = "User Name Already Exists"
return render_template('admin_add_user.html', form=form, msg=msg, admin_name=session['admin_username'], admin_image=session['admin_image'])
else:
permission = request.form['permissions']
first_name = form.first_name.data.lower()
last_name = form.last_name.data.lower()
email = request.form['email'].lower()
gender = request.form['gender']
country = request.form['country']
username = form.username.data
password = <PASSWORD>(str(form.password.data))
file = request.files['file']
# if file.filename == '':
# flash('You Have to Select a File!', 'warning')
try:
rmtree(app.root_path + r"\static\uploads\users\{}".format(username))
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
except:
os.makedirs(app.root_path + r"\static\uploads\users\{}".format(username))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
dir = app.root_path + r"\static\uploads\users\{}".format(username)
file.save(os.path.join(dir, filename))
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(permission, first_name, last_name,\
email, gender, country, username, password, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(permission, first_name, last_name, email, gender,\
country, username, password, filename))
mysql.connection.commit()
cur.close()
flash('You Have Created an Account successfully!', 'success')
return redirect(url_for('admin_dashboard'))
elif file.filename == '' or 'file' not in request.files:
copy(app.root_path + r'\static\admin.png', app.root_path + r'\static\uploads\users\{}\admin.png'.format(username))
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(permission, first_name, last_name,\
email, gender, country, username, password, files)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(permission, first_name, last_name, email, gender, \
country, username, password, '<PASSWORD>'))
mysql.connection.commit()
cur.close()
flash('You Have Created an Account successfully!', 'success')
return redirect(url_for('admin_dashboard'))
return render_template('admin_add_user.html', form=form, admin_name=session['admin_username'], admin_image=session['admin_image'], permission=session['permission'], messages=messages, count_messages=count_messages, count_orders_where_pending=count_orders_where_pending, count_orders_by_user=count_orders_by_user)
# admin delete user
@app.route('/admin/delete_user/<id>', methods=['post', 'get'])
@is_admin_logged_in
def delete_user(id):
cur = mysql.connection.cursor()
cur.execute("SELECT username FROM users WHERE id = %s", [id])
name = cur.fetchone()
n = name['username']
try:
rmtree(app.root_path + r"\static\uploads\users\{}".format(n))
except:
pass
cur.execute("DELETE FROM orders WHERE user_name = %s", [n])
cur.execute("DELETE FROM buy_orders WHERE user_name = %s", [n])
cur.execute("DELETE FROM reviews WHERE user_name = %s", [n])
cur.execute("DELETE FROM slider_reviews WHERE user_name = %s", [n])
cur.execute("DELETE FROM users WHERE id = %s", [id])
mysql.connection.commit()
cur.close()
flash('You Have Deleted User Account successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin add new category validator form
class CategoryForm(Form):
category = StringField('Category', [validators.InputRequired(), validators.length(min=1, max=100)])
# admin add new category page
@app.route('/admin/add_category', methods=['post', 'get'])
@is_admin_logged_in
def add_category():
cur = mysql.connection.cursor()
# view messages
cur.execute("SELECT * FROM contact_us WHERE status = %s ORDER BY id DESC LIMIT 6;", ["not_seen"])
messages = cur.fetchall()
# show messages number
cur.execute("SELECT COUNT(id) FROM contact_us WHERE status = %s ", ['not_seen'])
count_message = cur.fetchone()
count_messages = count_message['COUNT(id)']
# show new orders number
cur.execute("SELECT COUNT(status) FROM buy_orders WHERE status = %s", ['Pending'])
count_order = cur.fetchone()
count_orders_where_pending = count_order['COUNT(status)']
# show new orders
cur.execute("SELECT COUNT(status), user_name FROM buy_orders WHERE status = %s GROUP BY user_name ASC LIMIT 12", ['Pending'])
count_orders_by_user = cur.fetchall()
cur.close()
form = CategoryForm(request.form)
if request.method == 'POST' and form.validate():
category = form.category.data.lower()
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM categories WHERE category = BINARY %s", [category])
if result > 0:
cur.close()
flash('This Category Already Exists', 'warning')
return redirect(url_for('admin_dashboard'))
if category == ' ':
cur.close()
flash('You Should Type A Word!', 'warning')
return redirect(url_for('add_category'))
if result == 0:
cur = mysql.connection.cursor()
cur.execute("INSERT INTO categories (category) VALUES(%s);", ([category]))
mysql.connection.commit()
cur.close()
flash('You Have Added New Category successfully!', 'success')
return redirect(url_for('admin_dashboard'))
return render_template('admin_add_category.html', form=form, admin_name=session['admin_username'], admin_image=session['admin_image'], permission=session['permission'], messages=messages, count_messages=count_messages, count_orders_where_pending=count_orders_where_pending, count_orders_by_user=count_orders_by_user)
# admin edit category page
@app.route('/admin/edit_category/<current_category>', methods=['post', 'get'])
@is_admin_logged_in
def edit_category(current_category):
cur = mysql.connection.cursor()
cur.execute("SELECT category FROM categories Where category=%s;", [current_category])
cat = cur.fetchone()
# view messages
cur.execute("SELECT * FROM contact_us WHERE status = %s ORDER BY id DESC LIMIT 6;", ["not_seen"])
messages = cur.fetchall()
# show messages number
cur.execute("SELECT COUNT(id) FROM contact_us WHERE status = %s ", ['not_seen'])
count_message = cur.fetchone()
count_messages = count_message['COUNT(id)']
# show new orders number
cur.execute("SELECT COUNT(status) FROM buy_orders WHERE status = %s", ['Pending'])
count_order = cur.fetchone()
count_orders_where_pending = count_order['COUNT(status)']
# show new orders
cur.execute("SELECT COUNT(status), user_name FROM buy_orders WHERE status = %s GROUP BY user_name ASC LIMIT 12", ['Pending'])
count_orders_by_user = cur.fetchall()
cur.close()
form = CategoryForm(request.form)
form.category.data = cat['category']
if request.method == 'POST' and form.validate():
category = request.form['category'].lower()
if category == ' ':
cur.close()
flash('You Should Type A Word!', 'warning')
return redirect(url_for('add_category'))
cur = mysql.connection.cursor()
cur.execute("UPDATE categories SET category=%s WHERE category=%s;", ([category], [current_category]))
cur.execute("UPDATE products SET category=%s WHERE category=%s", \
([category], [current_category]))
cur.execute("UPDATE slider_products SET category=%s WHERE category=%s", \
([category], [current_category]))
mysql.connection.commit()
cur.close()
flash('You Have Edited Category successfully!', 'success')
return redirect(url_for('admin_dashboard'))
return render_template('admin_edit_category.html', form=form, admin_name=session['admin_username'], admin_image=session['admin_image'], permission=session['permission'], messages=messages, count_messages=count_messages, count_orders_where_pending=count_orders_where_pending, count_orders_by_user=count_orders_by_user)
# admin delete category
@app.route('/admin/delete_category/<category>', methods=['post', 'get'])
@is_admin_logged_in
def delete_category(category):
cur = mysql.connection.cursor()
prod = cur.execute("SELECT product_name FROM products WHERE category=%s", [category])
if prod > 0:
# flash('You Have products in This category', 'success')
pass
products = cur.fetchall()
for product in products:
rmtree(app.root_path + r"\static\uploads\products\{}".format(product['product_name']))
cur.execute("DELETE FROM reviews WHERE product_name=%s", [product['product_name']])
mysql.connection.commit()
slider = cur.execute("SELECT product_name FROM slider_products WHERE category=%s", [category])
if slider > 0:
pass
sliders = cur.fetchall()
for slider in sliders:
rmtree(app.root_path + r"\static\uploads\slider_products\{}".format(slider['product_name']))
cur.execute("DELETE FROM slider_reviews WHERE product_name=%s", [slider['product_name']])
mysql.connection.commit()
cur.execute("DELETE FROM slider_products WHERE category=%s", [category])
cur.execute("DELETE FROM products WHERE category=%s", [category])
cur.execute("DELETE FROM categories Where category=%s;", [category])
mysql.connection.commit()
cur.close()
flash("You Have Deleted Category With it's products Successfully!", 'success')
return redirect(url_for('admin_dashboard'))
# admin delete all categories
@app.route('/admin/delete_all_categories', methods=['post', 'get'])
@is_admin_logged_in
def delete_all_categories():
cur = mysql.connection.cursor()
cur.execute("TRUNCATE categories")
cur.execute("TRUNCATE products")
cur.execute("TRUNCATE slider_products")
cur.execute("TRUNCATE orders")
cur.execute("TRUNCATE buy_orders")
cur.execute("TRUNCATE reviews")
cur.execute("TRUNCATE slider_reviews")
mysql.connection.commit()
cur.close()
try:
rmtree(app.root_path + r"\static\uploads\products")
rmtree(app.root_path + r"\static\uploads\slider_products")
flash('You Has been Deleted All Categories and Products Successfully!', 'success')
except:
flash('You Has been Deleted All Categories and Products Successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin delete all users
@app.route('/admin/delete_all_users', methods=['post', 'get'])
@is_admin_logged_in
def delete_all_users():
cur = mysql.connection.cursor()
result = cur.execute("SELECT username FROM users WHERE permission = 'user'")
if result >0:
name = cur.fetchall()
for n in name:
rmtree(app.root_path + r"\static\uploads\users\{}".format(n['username']))
elif result == 0:
pass
cur.execute("DELETE FROM users WHERE permission = 'user' ")
mysql.connection.commit()
cur.close()
flash('You Have Deleted All Users Account with their files successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin delete all accounts
@app.route('/admin/delete_all_accounts', methods=['post', 'get'])
@is_admin_logged_in
def delete_all_accounts():
try:
rmtree(app.root_path + r"\static\uploads\users")
rmtree(app.root_path + r"\static\uploads\products")
rmtree(app.root_path + r"\static\uploads\slider_products")
except:
pass
cur = mysql.connection.cursor()
cur.execute("TRUNCATE users")
cur.execute("TRUNCATE categories")
cur.execute("TRUNCATE products")
cur.execute("TRUNCATE slider_products")
cur.execute("TRUNCATE orders")
cur.execute("TRUNCATE buy_orders")
cur.execute("TRUNCATE reviews")
cur.execute("TRUNCATE slider_reviews")
mysql.connection.commit()
cur.close()
session.clear()
flash('You Have Deleted All Accounts with their files successfully!', 'success')
return redirect(url_for('admin_login'))
# admin accept orders
@app.route('/admin/accept_orders/<id>', methods=['post', 'get'])
@is_admin_logged_in
def accept_orders(id):
cur = mysql.connection.cursor()
cur.execute("UPDATE buy_orders SET status = %s WHERE id = %s", (['Accepted'], id))
mysql.connection.commit()
cur.close()
flash('You have accepted the order Successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin accept all orders
@app.route('/admin/accept_all_orders', methods=['post', 'get'])
@is_admin_logged_in
def accept_all_orders():
cur = mysql.connection.cursor()
cur.execute("UPDATE buy_orders SET status = %s", (['Accepted']))
mysql.connection.commit()
cur.close()
flash('You have accepted all orders Successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin reject orders
@app.route('/admin/reject_orders/<id>', methods=['post', 'get'])
@is_admin_logged_in
def reject_orders(id):
cur = mysql.connection.cursor()
cur.execute("UPDATE buy_orders SET status = %s WHERE id = %s", (['Rejected'], id))
mysql.connection.commit()
cur.close()
flash('You have rejected the order Successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin reject all orders
@app.route('/admin/reject_all_orders', methods=['post', 'get'])
@is_admin_logged_in
def reject_all_orders():
cur = mysql.connection.cursor()
cur.execute("UPDATE buy_orders SET status = %s", (['Rejected']))
mysql.connection.commit()
cur.close()
flash('You have rejected all orders Successfully!', 'success')
return redirect(url_for('admin_dashboard'))
# admin search bar
@app.route('/search', methods=['GET', 'POST'])
@is_admin_logged_in
def search():
if request.method == "POST":
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM `buy_sell`.`products` \
WHERE( CONVERT(`product_name` USING utf8)\
LIKE %s)", [["%" + request.form['search'] + "%"]])
categories = cur.fetchall()
cur.close()
if result > 0:
return | |
"""
Implementation of ODE Risk minimization
<NAME>, ETH Zurich
based on code from
<NAME>, Machine Learning Research Group, University of Oxford
February 2019
"""
# Libraries
from odin.utils.trainable_models import TrainableModel
from odin.utils.gaussian_processes import GaussianProcess
from odin.utils.tensorflow_optimizer import ExtendedScipyOptimizerInterface
import numpy as np
import tensorflow as tf
from typing import Union, Tuple
import time
class ODERiskMinimization(object):
"""
Class that implements ODIN risk minimization
"""
def __init__(self, trainable: TrainableModel,
system_data: np.array, t_data: np.array,
gp_kernel: str = 'RBF',
optimizer: str = 'L-BFGS-B',
initial_gamma: float = 1e-6,
train_gamma: bool = True,
gamma_bounds: Union[np.array, list, Tuple] = (1e-6, 10.0),
state_bounds: np.array = None,
basinhopping: bool = True,
basinhopping_options: dict = None,
single_gp: bool = False,
state_normalization: bool = True,
time_normalization: bool = False,
tensorboard_summary_dir: str = None,
runtime_prof_dir: str = None):
"""
Constructor.
:param trainable: Trainable model class, as explained and implemented in
utils.trainable_models;
:param system_data: numpy array containing the noisy observations of
the state values of the system, size is [n_states, n_points];
:param t_data: numpy array containing the time stamps corresponding to
the observations passed as system_data;
:param gp_kernel: string indicating which kernel to use in the GP.
Valid options are 'RBF', 'Matern52', 'Matern32', 'RationalQuadratic',
'Sigmoid';
:param optimizer: string indicating which scipy optimizer to use. The
valid ones are the same that can be passed to scipy.optimize.minimize.
Notice that some of them will ignore bounds;
:param initial_gamma: initial value for the gamma parameter.
:param train_gamma: boolean, indicates whether to train of not the
variable gamma;
:param gamma_bounds: bounds for gamma (a lower bound of at least 1e-6
is always applied to overcome numerical instabilities);
:param state_bounds: bounds for the state optimization;
:param basinhopping: boolean, indicates whether to turn on the scipy
basinhopping;
:param basinhopping_options: dictionary containing options for the
basinhooping algorithm (syntax is the same as scipy's one);
:param single_gp: boolean, indicates whether to use a single set of GP
hyperparameters for each state;
:param state_normalization: boolean, indicates whether to normalize the
states values before the optimization (notice the parameter values
theta won't change);
:param time_normalization: boolean, indicates whether to normalize the
time stamps before the optimization (notice the parameter values
theta won't change);
:param QFF_features: int, the order of the quadrature scheme
:param tensorboard_summary_dir, runtime_prof_dir: str, logging directories
"""
# Save arguments
self.trainable = trainable
self.system_data = np.copy(system_data)
self.t_data = np.copy(t_data).reshape(-1, 1)
self.dim, self.n_p = system_data.shape
self.gp_kernel = gp_kernel
self.optimizer = optimizer
self.initial_gamma = initial_gamma
self.train_gamma = train_gamma
self.gamma_bounds = np.log(np.array(gamma_bounds))
self.basinhopping = basinhopping
self.basinhopping_options = {'n_iter': 10,
'temperature': 1.0,
'stepsize': 0.05}
self.state_normalization = state_normalization
if basinhopping_options:
self.basinhopping_options.update(basinhopping_options)
self.single_gp = single_gp
# Build bounds for the states and gamma
self._compute_state_bounds(state_bounds)
self._compute_gamma_bounds(gamma_bounds)
# Initialize utils
self._compute_standardization_data(state_normalization,
time_normalization)
# Build the necessary TensorFlow tensors
self._build_tf_data()
# Initialize the Gaussian Process for the derivative model
self.gaussian_process = GaussianProcess(self.dim, self.n_p,
self.gp_kernel, self.single_gp)
#initialize logging variables
if tensorboard_summary_dir:
self.writer = tf.summary.FileWriter(tensorboard_summary_dir)
theta_sum=tf.summary.histogram('Theta_summary',self.trainable.theta)
else:
self.writer = None
self.runtime_prof_dir= runtime_prof_dir
# Initialization of TF operations
self.init = None
return
def _compute_gamma_bounds(self, bounds: Union[np.array, list, Tuple])\
-> None:
"""
Builds the numpy array that defines the bounds for gamma.
:param bounds: of the form (lower_bound, upper_bound).
"""
self.gamma_bounds = np.array([1.0, 1.0])
if bounds is None:
self.gamma_bounds[0] = np.log(1e-6)
self.gamma_bounds[1] = np.inf
else:
self.gamma_bounds[0] = np.log(np.array(bounds[0]))
self.gamma_bounds[1] = np.log(np.array(bounds[1]))
return
def _compute_state_bounds(self, bounds: np.array) -> None:
"""
Builds the numpy array that defines the bounds for the states.
:param bounds: numpy array, sized [n_dim, 2], in which for each
dimensions we can find respectively lower and upper bounds.
"""
if bounds is None:
self.state_bounds = np.inf * np.ones([self.dim, 2])
self.state_bounds[:, 0] = - self.state_bounds[:, 0]
else:
self.state_bounds = np.array(bounds)
return
def _compute_standardization_data(self, state_normalization: bool,
time_normalization: bool) -> None:
"""
Compute the means and the standard deviations for data standardization,
used in the GP regression.
"""
# Compute mean and std dev of the state and time values
if state_normalization:
self.system_data_means = np.mean(self.system_data,
axis=1).reshape(self.dim, 1)
self.system_data_std_dev = np.std(self.system_data,
axis=1).reshape(self.dim, 1)
else:
self.system_data_means = np.zeros([self.dim, 1])
self.system_data_std_dev = np.ones([self.dim, 1])
if time_normalization:
self.t_data_mean = np.mean(self.t_data)
self.t_data_std_dev = np.std(self.t_data)
else:
self.t_data_mean = 0.0
self.t_data_std_dev = 1.0
if self.gp_kernel == 'Sigmoid':
self.t_data_mean = 0.0
# Normalize states and time
self.normalized_states = (self.system_data - self.system_data_means) / \
self.system_data_std_dev
self.normalized_t_data = (self.t_data - self.t_data_mean) / \
self.t_data_std_dev
return
def _build_tf_data(self) -> None:
"""
Initialize all the TensorFlow constants needed by the pipeline.
"""
self.system = tf.constant(self.normalized_states, dtype=tf.float64)
self.t = tf.constant(self.normalized_t_data, dtype=tf.float64)
self.system_means = tf.constant(self.system_data_means,
dtype=tf.float64,
shape=[self.dim, 1])
self.system_std_dev = tf.constant(self.system_data_std_dev,
dtype=tf.float64,
shape=[self.dim, 1])
self.t_mean = tf.constant(self.t_data_mean, dtype=tf.float64)
self.t_std_dev = tf.constant(self.t_data_std_dev, dtype=tf.float64)
self.n_points = tf.constant(self.n_p, dtype=tf.int32)
self.dimensionality = tf.constant(self.dim, dtype=tf.int32)
return
def _build_states_bounds(self) -> None:
"""
Builds the tensors for the normalized states that will containing the
bounds for the constrained optimization.
"""
# Tile the bounds to get the right dimensions
state_lower_bounds = self.state_bounds[:, 0].reshape(self.dim, 1)
state_lower_bounds = np.tile(state_lower_bounds, [1, self.n_p])
state_lower_bounds = (state_lower_bounds - self.system_data_means)\
/ self.system_data_std_dev
state_lower_bounds = state_lower_bounds.reshape([self.dim,
self.n_p])
state_upper_bounds = self.state_bounds[:, 1].reshape(self.dim, 1)
state_upper_bounds = np.tile(state_upper_bounds, [1, self.n_p])
state_upper_bounds = (state_upper_bounds - self.system_data_means)\
/ self.system_data_std_dev
state_upper_bounds = state_upper_bounds.reshape([self.dim,
self.n_p])
self.state_lower_bounds = state_lower_bounds
self.state_upper_bounds = state_upper_bounds
return
def _build_variables(self) -> None:
"""
Builds the TensorFlow variables with the state values and the gamma
that will later be optimized.
"""
with tf.variable_scope('risk_main'):
self.x = tf.Variable(self.system,
dtype=tf.float64, trainable=True,
name='states')
if self.single_gp:
self.log_gamma = tf.Variable(np.log(self.initial_gamma),
dtype=tf.float64,
trainable=self.train_gamma,
name='log_gamma')
self.gamma = tf.exp(self.log_gamma)\
* tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)
else:
self.log_gamma =\
tf.Variable(np.log(self.initial_gamma)
* tf.ones([self.dimensionality, 1, 1],
dtype=tf.float64),
trainable=self.train_gamma,
dtype=tf.float64,
name='log_gamma')
self.gamma = tf.exp(self.log_gamma)
return
def _build_regularization_risk_term(self) -> tf.Tensor:
"""
Build the first term of the risk, connected to regularization.
:return: the TensorFlow Tensor that contains the term.
"""
a_vector = tf.linalg.solve(
self.gaussian_process.c_phi_matrices_noiseless, tf.expand_dims(self.x, -1),name='reg_risk_inv_kernel')
risk_term = 0.5 * tf.reduce_sum(self.x * tf.squeeze(a_vector))
return tf.reduce_sum(risk_term)
def _build_states_risk_term(self) -> tf.Tensor:
"""
Build the second term of the risk, connected with the value of the
states.
:return: the TensorFlow Tensor that contains the term.
"""
states_difference = self.system - self.x
risk_term = tf.reduce_sum(states_difference * states_difference, 1)
risk_term = risk_term * 0.5 / tf.squeeze(
self.gaussian_process.likelihood_variances)
return tf.reduce_sum(risk_term)
def _build_derivatives_risk_term(self) -> tf.Tensor:
"""
Build the third term of the risk, connected with the value of the
derivatives.
:return: the TensorFlow Tensor that contains the term.
"""
# Compute model and data-based derivatives
unnormalized_states = self.x * self.system_std_dev + self.system_means
model_derivatives = tf.expand_dims(self.trainable.compute_gradients(
unnormalized_states) / self.system_std_dev * self.t_std_dev, -1)
data_derivatives =\
self.gaussian_process.compute_posterior_derivative_mean(self.x)
derivatives_difference = model_derivatives - data_derivatives
# Compute log_variance on the derivatives
self.posterior_derivative_variance = self.gaussian_process.compute_posterior_derivative_variance()
post_variance =\
self.posterior_derivative_variance +\
self.gamma * tf.expand_dims(tf.eye(self.n_points,
dtype=tf.float64), 0)
# Compute risk term
a_vector = tf.linalg.solve(post_variance, derivatives_difference,name='deriv_risk_inv_A')
risk_term = 0.5 * tf.reduce_sum(a_vector * derivatives_difference)
return risk_term
def _build_gamma_risk_term(self) -> tf.Tensor:
"""
Build the term associated with gamma.
:return: the TensorFlow Tensor that contains the terms
"""
# Compute log_variance on the derivatives
post_variance =\
self.posterior_derivative_variance +\
self.gamma * tf.expand_dims(tf.eye(self.n_points,
dtype=tf.float64), 0)
risk_term = 0.5 * tf.linalg.logdet(post_variance)
return tf.reduce_sum(risk_term)
def _build_risk(self) -> None:
"""
Build the risk tensor by summing up the single terms.
"""
self.risk_term1 = self._build_regularization_risk_term()
self.risk_term2 = self._build_states_risk_term()
self.risk_term3 = self._build_derivatives_risk_term()
self.risk_term4 = self._build_gamma_risk_term()
self.risk = self.risk_term1 + self.risk_term2 + self.risk_term3
if self.train_gamma:
self.risk += self.risk_term4
if self.writer:
loss_sum=tf.summary.scalar(name='loss_sum', tensor=self.risk)
return
def _build_optimizer(self) -> None:
"""
Build the TensorFlow optimizer, wrapper to the scipy optimization
algorithms.
"""
# Extract the TF variables that get optimized in the risk minimization
t_vars = tf.trainable_variables()
risk_vars = [var for var in t_vars if 'risk_main' in var.name]
# Dictionary containing the bounds on the TensorFlow Variables
var_to_bounds = {risk_vars[0]: (self.trainable.parameter_lower_bounds,
self.trainable.parameter_upper_bounds),
risk_vars[1]: (self.state_lower_bounds,
self.state_upper_bounds)}
if self.train_gamma:
var_to_bounds[risk_vars[2]] = (self.gamma_bounds[0],
self.gamma_bounds[1])
self.risk_optimizer = ExtendedScipyOptimizerInterface(
loss=self.risk, method=self.optimizer, var_list=risk_vars,
var_to_bounds=var_to_bounds,file_writer=self.writer,dir_prof_name=self.runtime_prof_dir)
return
def build_model(self) -> None:
"""
Builds Some common part of the computational graph for the optimization.
"""
# Gaussian Process Interpolation
self.gaussian_process.build_supporting_covariance_matrices(
self.t, self.t)
self._build_states_bounds()
self._build_variables()
self._build_risk()
if self.writer:
self.merged_sum = tf.summary.merge_all()
self._build_optimizer()
return
def _initialize_variables(self) -> None:
"""
Initialize all the variables and placeholders in the graph.
"""
self.init = tf.global_variables_initializer()
return
def _initialize_states_with_mean_gp(self, session: tf.Session, compute_dict:dict) -> None:
"""
Before optimizing the risk, we initialize the x | |
description = "a module that simulates Parameter Estimation uploads to GraceDB"
author = "<EMAIL>"
#-------------------------------------------------
import os
import random
import schedule
#-------------------------------------------------
'''
generate a different object for each follow-up. These may inherit from a single parent object, but they each should be able to produce data that would be uploaded to GraceDB
'''
class Bayestar():
def __init__(self, graceDBevent, startTimeout=10.0, startJitter=2.0, startProb=1.0, skymapTimeout=45.0, skymapJitter=5.0, skymapProb=1.0, finishTimeout=40.0, finishJitter=2.0, finishProb=1.0, plotSkymapTimeout=5.0, plotSkymapJitter=1.0, plotSkymapProb=1.0, skyviewerTimeout=5.0, skyviewerJitter=1.0, skyviewerProb=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.startTimeout = startTimeout
self.startJitter = startJitter
self.startProb = startProb
self.skymapTimeout = skymapTimeout
self.skymapJitter = skymapJitter
self.skymapProb = skymapProb
self.finishTimeout = finishTimeout
self.finishJitter = finishJitter
self.finishProb = finishProb
self.plotSkymapTimeout = plotSkymapTimeout
self.plotSkymapJitter = plotSkymapJitter
self.plotSkymapProb = plotSkymapProb
self.skyviewerTimeout = skyviewerTimeout
self.skyviewerJitter = skyviewerJitter
self.skyviewerProb = skyviewerProb
def writeFITS(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
fitsname = "%s/bayestar.fits.gz"%dirname
open(fitsname, 'w').close() ### may want to do more than this...
return fitsname
def genSchedule(self, directory='.', lvem=True):
'''
generate a schedule for Bayestar
'''
sched = schedule.Schedule()
if random.random() < self.startProb:
start_dt = max(0, random.normalvariate(self.startTimeout, self.startJitter))
for message in ['INFO:BAYESTAR:by your command...', 'INFO:BAYESTAR:starting sky localization']:
sched.insert( schedule.WriteLog( start_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.finishProb:
finish_dt = max(start_dt, random.normalvariate(self.finishTimeout, self.finishJitter))
message = 'INFO:BAYESTAR:sky localization complete'
sched.insert( schedule.WriteLog( finish_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.skymapProb:
skymap_dt = max(finish_dt, random.normalvariate(self.skymapTimeout, self.skymapJitter))
message = 'INFO:BAYESTAR:uploaded sky map'
fitsname = self.writeFITS(directory=directory)
tagname = ['sky_loc']
if lvem:
tagname.append( 'lvem' )
sched.insert( schedule.WriteLog( skymap_dt, self.graceDBevent, message, filename=fitsname, tagname=tagname, gdb_url=self.gdb_url ) )
### add in plotting and skyviewer
agenda = PlotSkymaps(self.graceDBevent, timeout=self.plotSkymapTimeout, jitter=self.plotSkymapJitter, probOfSuccess=self.plotSkymapProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname) \
+ Skyviewer(self.graceDBevent, timeout=self.skyviewerTimeout, jitter=self.skyviewerJitter, probOfSuccess=self.skyviewerProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname)
agenda.bump( skymap_dt )
sched += agenda
return sched
class LALInference():
def __init__(self, graceDBevent, startTimeout=10.0, startJitter=2.0, startProb=1.0, skymapTimeout=45.0, skymapJitter=5.0, skymapProb=1.0, finishTimeout=40.0, finishJitter=2.0, finishProb=1.0, plotSkymapTimeout=5.0, plotSkymapJitter=1.0, plotSkymapProb=1.0, skyviewerTimeout=5.0, skyviewerJitter=1.0, skyviewerProb=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.startTimeout = startTimeout
self.startJitter = startJitter
self.startProb = startProb
self.skymapTimeout = skymapTimeout
self.skymapJitter = skymapJitter
self.skymapProb = skymapProb
self.finishTimeout = finishTimeout
self.finishJitter = finishJitter
self.finishProb = finishProb
self.plotSkymapTimeout = plotSkymapTimeout
self.plotSkymapJitter = plotSkymapJitter
self.plotSkymapProb = plotSkymapProb
self.skyviewerTimeout = skyviewerTimeout
self.skyviewerJitter = skyviewerJitter
self.skyviewerProb = skyviewerProb
def writeFITS(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
fitsname = "%s/lalinference_skymap.fits.gz"%dirname
open(fitsname, 'w').close() ### may want to do more than this...
return fitsname
def writeDat(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
datname = "%s/posterior_samples.dat"%dirname
open(datname, 'w').close() ### may want to do more than this...
return datname
def genSchedule(self, directory='.', lvem=True):
'''
generate a schedule for Bayestar
'''
sched = schedule.Schedule()
if random.random() < self.startProb:
start_dt = max(0, random.normalvariate(self.startTimeout, self.startJitter))
message = 'LALInference online estimation started'
sched.insert( schedule.WriteLog( start_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.finishProb:
finish_dt = max(start_dt, random.normalvariate(self.finishTimeout, self.finishJitter))
message = 'LALInference online estimation finished'
filename = self.writeDat(directory=directory)
sched.insert( schedule.WriteLog( finish_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.skymapProb:
skymap_dt = max(finish_dt, random.normalvariate(self.skymapTimeout, self.skymapJitter))
message = 'LALInference'
fitsname = self.writeFITS(directory=directory)
tagname = ['sky_loc']
if lvem:
tagname.append( 'lvem' )
sched.insert( schedule.WriteLog( skymap_dt, self.graceDBevent, message, filename=fitsname, tagname=tagname, gdb_url=self.gdb_url ) )
### add in plotting and skyviewer
agenda = PlotSkymaps(self.graceDBevent, timeout=self.plotSkymapTimeout, jitter=self.plotSkymapJitter, probOfSuccess=self.plotSkymapProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname) \
+ Skyviewer(self.graceDBevent, timeout=self.skyviewerTimeout, jitter=self.skyviewerJitter, probOfSuccess=self.skyviewerProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname)
agenda.bump( skymap_dt )
sched += agenda
return sched
class LIB():
def __init__(self, graceDBevent, startTimeout=10.0, startJitter=2.0, startProb=1.0, skymapTimeout=45.0, skymapJitter=5.0, skymapProb=1.0, finishTimeout=40.0, finishJitter=2.0, finishProb=1.0, plotSkymapTimeout=5.0, plotSkymapJitter=1.0, plotSkymapProb=1.0, skyviewerTimeout=5.0, skyviewerJitter=1.0, skyviewerProb=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.startTimeout = startTimeout
self.startJitter = startJitter
self.startProb = startProb
self.skymapTimeout = skymapTimeout
self.skymapJitter = skymapJitter
self.skymapProb = skymapProb
self.finishTimeout = finishTimeout
self.finishJitter = finishJitter
self.finishProb = finishProb
self.plotSkymapTimeout = plotSkymapTimeout
self.plotSkymapJitter = plotSkymapJitter
self.plotSkymapProb = plotSkymapProb
self.skyviewerTimeout = skyviewerTimeout
self.skyviewerJitter = skyviewerJitter
self.skyviewerProb = skyviewerProb
def writeFITS(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
fitsname = "%s/LIB_skymap.fits.gz"%dirname
open(fitsname, 'w').close() ### may want to do more than this...
return fitsname
def writeDat(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
datname = "%s/posterior_samples.dat"%dirname
open(datname, 'w').close() ### may want to do more than this...
return datname
def genSchedule(self, directory='.', lvem=True):
'''
generate a schedule for Bayestar
'''
sched = schedule.Schedule()
if random.random() < self.startProb:
start_dt = max(0, random.normalvariate(self.startTimeout, self.startJitter))
message = "LIB Parameter estimation started."
sched.insert( schedule.WriteLog( start_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.finishProb:
finish_dt = max(start_dt, random.normalvariate(self.finishTimeout, self.finishJitter))
message = 'LIB Parameter estimation finished'
sched.insert( schedule.WriteLog( finish_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.skymapProb:
skymap_dt = max(finish_dt, random.normalvariate(self.skymapTimeout, self.skymapJitter))
message = 'LIB'
fitsname = self.writeFITS(directory=directory)
tagname = ['sky_loc']
if lvem:
tagname.append( 'lvem' )
sched.insert( schedule.WriteLog( skymap_dt, self.graceDBevent, message, filename=fitsname, tagname=tagname, gdb_url=self.gdb_url ) )
### add in plotting and skyviewer
agenda = PlotSkymaps(self.graceDBevent, timeout=self.plotSkymapTimeout, jitter=self.plotSkymapJitter, probOfSuccess=self.plotSkymapProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname) \
+ Skyviewer(self.graceDBevent, timeout=self.skyviewerTimeout, jitter=self.skyviewerJitter, probOfSuccess=self.skyviewerProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname)
agenda.bump( skymap_dt )
sched += agenda
return sched
class BayesWave():
def __init__(self, graceDBevent, startTimeout=10.0, startJitter=2.0, startProb=1.0, skymapTimeout=45.0, skymapJitter=5.0, skymapProb=1.0, finishTimeout=40.0, finishJitter=2.0, finishProb=1.0, plotSkymapTimeout=5.0, plotSkymapJitter=1.0, plotSkymapProb=1.0, skyviewerTimeout=5.0, skyviewerJitter=1.0, skyviewerProb=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.startTimeout = startTimeout
self.startJitter = startJitter
self.startProb = startProb
self.skymapTimeout = skymapTimeout
self.skymapJitter = skymapJitter
self.skymapProb = skymapProb
self.finishTimeout = finishTimeout
self.finishJitter = finishJitter
self.finishProb = finishProb
self.plotSkymapTimeout = plotSkymapTimeout
self.plotSkymapJitter = plotSkymapJitter
self.plotSkymapProb = plotSkymapProb
self.skyviewerTimeout = skyviewerTimeout
self.skyviewerJitter = skyviewerJitter
self.skyviewerProb = skyviewerProb
def writeFITS(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
fitsname = "%s/BW_skymap.fits"%dirname
open(fitsname, 'w').close() ### may want to do more than this...
return fitsname
def genSchedule(self, directory='.', lvem=True):
'''
generate a schedule for Bayestar
'''
sched = schedule.Schedule()
if random.random() < self.startProb:
start_dt = max(0, random.normalvariate(self.startTimeout, self.startJitter))
message = 'BayesWaveBurst launched'
sched.insert( schedule.WriteLog( start_dt, self.graceDBevent, message, gdb_url=self.gdb_url ) )
if random.random() < self.finishProb:
finish_dt = max(start_dt, random.normalvariate(self.finishTimeout, self.finishJitter))
for message in ['BWB Follow-up results', 'BWB parameter estimation', 'BWB Bayes Factors']:
sched.insert( schedule.WriteLog( finish_dt, self.graceDBevent, message, tagname=['pe'], gdb_url=self.gdb_url ) )
if random.random() < self.skymapProb:
skymap_dt = max(finish_dt, random.normalvariate(self.skymapTimeout, self.skymapJitter))
message = 'BWB'
fitsname = self.writeFITS(directory=directory)
tagname = ['sky_loc']
if lvem:
tagname.append( 'lvem' )
sched.insert( schedule.WriteLog( skymap_dt, self.graceDBevent, message, filename=fitsname, tagname=tagname, gdb_url=self.gdb_url ) )
### add in plotting and skyviewer
agenda = PlotSkymaps(self.graceDBevent, timeout=self.plotSkymapTimeout, jitter=self.plotSkymapJitter, probOfSuccess=self.plotSkymapProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname) \
+ Skyviewer(self.graceDBevent, timeout=self.skyviewerTimeout, jitter=self.skyviewerJitter, probOfSuccess=self.skyviewerProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname)
agenda.bump( skymap_dt )
sched += agenda
return sched
class CoherentWaveBurst():
def __init__(self, graceDBevent, startTimeout=10.0, skymapTimeout=45.0, skymapJitter=5.0, skymapProb=1.0, finishTimeout=40.0, finishJitter=2.0, finishProb=1.0, plotSkymapTimeout=5.0, plotSkymapJitter=1.0, plotSkymapProb=1.0, skyviewerTimeout=5.0, skyviewerJitter=1.0, skyviewerProb=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.skymapTimeout = skymapTimeout
self.skymapJitter = skymapJitter
self.skymapProb = skymapProb
self.finishTimeout = finishTimeout
self.finishJitter = finishJitter
self.finishProb = finishProb
self.plotSkymapTimeout = plotSkymapTimeout
self.plotSkymapJitter = plotSkymapJitter
self.plotSkymapProb = plotSkymapProb
self.skyviewerTimeout = skyviewerTimeout
self.skyviewerJitter = skyviewerJitter
self.skyviewerProb = skyviewerProb
def writeFITS(self, directory='.'):
dirname = "%s/%s/"%(directory, self.graceDBevent.get_randStr())
if not os.path.exists(dirname):
os.makedirs(dirname)
fitsname = "%s/skyprobcc.fits.gz"%dirname
open(fitsname, 'w').close() ### may want to do more than this...
return fitsname
def genSchedule(self, directory='.', lvem=True):
'''
generate a schedule for Bayestar
'''
sched = schedule.Schedule()
if random.random() < self.finishProb:
finish_dt = max(0, random.normalvariate(self.finishTimeout, self.finishJitter))
message = 'cWB parameter estimation'
sched.insert( schedule.WriteLog( finish_dt, self.graceDBevent, message, tagname=['pe'], gdb_url=self.gdb_url ) )
if random.random() < self.skymapProb:
skymap_dt = max(finish_dt, random.normalvariate(self.skymapTimeout, self.skymapJitter))
message = 'cWB skymap fit'
fitsname = self.writeFITS(directory=directory)
tagname = ['sky_loc']
if lvem:
tagname.append( 'lvem' )
sched.insert( schedule.WriteLog( skymap_dt, self.graceDBevent, message, filename=fitsname, tagname=tagname, gdb_url=self.gdb_url ) )
### add in plotting and skyviewer
agenda = PlotSkymaps(self.graceDBevent, timeout=self.plotSkymapTimeout, jitter=self.plotSkymapJitter, probOfSuccess=self.plotSkymapProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname) \
+ Skyviewer(self.graceDBevent, timeout=self.skyviewerTimeout, jitter=self.skyviewerJitter, probOfSuccess=self.skyviewerProb, gdb_url=self.gdb_url).genSchedule(fitsname, tagname=tagname)
agenda.bump( skymap_dt )
sched += agenda
return sched
#-----------
class PlotSkymaps():
def __init__(self, graceDBevent, timeout=30.0, jitter=5.0, probOfSuccess=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.timeout = timeout
self.jitter = jitter
self.prob = probOfSuccess
def genMessage(self, fits):
return "Mollweide projection of %s"%fits
def genPNG(self, fits):
pngName = os.path.join( os.path.dirname(fits), os.path.basename(fits).split('.')[0]+".png" )
open(pngName, "w").close() ### touch it so it exists
return pngName
def genSchedule(self, fits, tagname=['sky_loc']):
sched = schedule.Schedule()
if random.random() < self.prob:
sched.insert( schedule.WriteLog( max(0, random.normalvariate(self.timeout, self.jitter)), self.graceDBevent, self.genMessage(fits), filename=self.genPNG(fits), tagname=tagname, gdb_url=self.gdb_url ) )
return sched
class Skyviewer():
def __init__(self, graceDBevent, timeout=30.0, jitter=5.0, probOfSuccess=1.0, gdb_url='https://gracedb.ligo.org/api/'):
self.graceDBevent = graceDBevent
self.gdb_url = gdb_url
self.timeout = timeout
self.jitter = jitter
self.prob = probOfSuccess
def genMessage(self):
return | |
'S', # \N{LISU LETTER SA}
0x16f3a: 'S', # \N{MIAO LETTER SA}
0x10296: 'S', # \N{LYCIAN LETTER S}
0x10420: 'S', # \N{DESERET CAPITAL LETTER ZHEE}
0x1f75c: 'sss', # \N{ALCHEMICAL SYMBOL FOR STRATUM SUPER STRATUM}
0xfb06: 'st', # \N{LATIN SMALL LIGATURE ST}
0x1d42d: 't', # \N{MATHEMATICAL BOLD SMALL T}
0x1d461: 't', # \N{MATHEMATICAL ITALIC SMALL T}
0x1d495: 't', # \N{MATHEMATICAL BOLD ITALIC SMALL T}
0x1d4c9: 't', # \N{MATHEMATICAL SCRIPT SMALL T}
0x1d4fd: 't', # \N{MATHEMATICAL BOLD SCRIPT SMALL T}
0x1d531: 't', # \N{MATHEMATICAL FRAKTUR SMALL T}
0x1d565: 't', # \N{MATHEMATICAL DOUBLE-STRUCK SMALL T}
0x1d599: 't', # \N{MATHEMATICAL BOLD FRAKTUR SMALL T}
0x1d5cd: 't', # \N{MATHEMATICAL SANS-SERIF SMALL T}
0x1d601: 't', # \N{MATHEMATICAL SANS-SERIF BOLD SMALL T}
0x1d635: 't', # \N{MATHEMATICAL SANS-SERIF ITALIC SMALL T}
0x1d669: 't', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL T}
0x1d69d: 't', # \N{MATHEMATICAL MONOSPACE SMALL T}
0x22a4: 'T', # \N{DOWN TACK}
0x27d9: 'T', # \N{LARGE DOWN TACK}
0x1f768: 'T', # \N{ALCHEMICAL SYMBOL FOR CRUCIBLE-4}
0xff34: 'T', # \N{FULLWIDTH LATIN CAPITAL LETTER T}
0x1d413: 'T', # \N{MATHEMATICAL BOLD CAPITAL T}
0x1d447: 'T', # \N{MATHEMATICAL ITALIC CAPITAL T}
0x1d47b: 'T', # \N{MATHEMATICAL BOLD ITALIC CAPITAL T}
0x1d4af: 'T', # \N{MATHEMATICAL SCRIPT CAPITAL T}
0x1d4e3: 'T', # \N{MATHEMATICAL BOLD SCRIPT CAPITAL T}
0x1d517: 'T', # \N{MATHEMATICAL FRAKTUR CAPITAL T}
0x1d54b: 'T', # \N{MATHEMATICAL DOUBLE-STRUCK CAPITAL T}
0x1d57f: 'T', # \N{MATHEMATICAL BOLD FRAKTUR CAPITAL T}
0x1d5b3: 'T', # \N{MATHEMATICAL SANS-SERIF CAPITAL T}
0x1d5e7: 'T', # \N{MATHEMATICAL SANS-SERIF BOLD CAPITAL T}
0x1d61b: 'T', # \N{MATHEMATICAL SANS-SERIF ITALIC CAPITAL T}
0x1d64f: 'T', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL T}
0x1d683: 'T', # \N{MATHEMATICAL MONOSPACE CAPITAL T}
0x3a4: 'T', # \N{GREEK CAPITAL LETTER TAU}
0x1d6bb: 'T', # \N{MATHEMATICAL BOLD CAPITAL TAU}
0x1d6f5: 'T', # \N{MATHEMATICAL ITALIC CAPITAL TAU}
0x1d72f: 'T', # \N{MATHEMATICAL BOLD ITALIC CAPITAL TAU}
0x1d769: 'T', # \N{MATHEMATICAL SANS-SERIF BOLD CAPITAL TAU}
0x1d7a3: 'T', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL TAU}
0x2ca6: 'T', # \N{COPTIC CAPITAL LETTER TAU}
0x422: 'T', # \N{CYRILLIC CAPITAL LETTER TE}
0x13a2: 'T', # \N{CHEROKEE LETTER I}
0xa4d4: 'T', # \N{LISU LETTER TA}
0x16f0a: 'T', # \N{MIAO LETTER TA}
0x118bc: 'T', # \N{WARANG CITI CAPITAL LETTER HAR}
0x10297: 'T', # \N{LYCIAN LETTER T}
0x102b1: 'T', # \N{CARIAN LETTER C-18}
0x10315: 'T', # \N{OLD ITALIC LETTER TE}
0xa728: 'T3', # \N{LATIN CAPITAL LETTER TZ}
0x2121: 'TEL', # \N{TELEPHONE SIGN}
0xa777: 'tf', # \N{LATIN SMALL LETTER TUM}
0x2a6: 'ts', # \N{LATIN SMALL LETTER TS DIGRAPH}
0x1d42e: 'u', # \N{MATHEMATICAL BOLD SMALL U}
0x1d462: 'u', # \N{MATHEMATICAL ITALIC SMALL U}
0x1d496: 'u', # \N{MATHEMATICAL BOLD ITALIC SMALL U}
0x1d4ca: 'u', # \N{MATHEMATICAL SCRIPT SMALL U}
0x1d4fe: 'u', # \N{MATHEMATICAL BOLD SCRIPT SMALL U}
0x1d532: 'u', # \N{MATHEMATICAL FRAKTUR SMALL U}
0x1d566: 'u', # \N{MATHEMATICAL DOUBLE-STRUCK SMALL U}
0x1d59a: 'u', # \N{MATHEMATICAL BOLD FRAKTUR SMALL U}
0x1d5ce: 'u', # \N{MATHEMATICAL SANS-SERIF SMALL U}
0x1d602: 'u', # \N{MATHEMATICAL SANS-SERIF BOLD SMALL U}
0x1d636: 'u', # \N{MATHEMATICAL SANS-SERIF ITALIC SMALL U}
0x1d66a: 'u', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL U}
0x1d69e: 'u', # \N{MATHEMATICAL MONOSPACE SMALL U}
0xa79f: 'u', # \N{LATIN SMALL LETTER VOLAPUK UE}
0x1d1c: 'u', # \N{LATIN LETTER SMALL CAPITAL U}
0xab4e: 'u', # \N{LATIN SMALL LETTER U WITH SHORT RIGHT LEG}
0xab52: 'u', # \N{LATIN SMALL LETTER U WITH LEFT HOOK}
0x28b: 'u', # \N{LATIN SMALL LETTER V WITH HOOK}
0x3c5: 'u', # \N{GREEK SMALL LETTER UPSILON}
0x1d6d6: 'u', # \N{MATHEMATICAL BOLD SMALL UPSILON}
0x1d710: 'u', # \N{MATHEMATICAL ITALIC SMALL UPSILON}
0x1d74a: 'u', # \N{MATHEMATICAL BOLD ITALIC SMALL UPSILON}
0x1d784: 'u', # \N{MATHEMATICAL SANS-SERIF BOLD SMALL UPSILON}
0x1d7be: 'u', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL UPSILON}
0x57d: 'u', # \N{ARMENIAN SMALL LETTER SEH}
0x104f6: 'u', # \N{OSAGE SMALL LETTER U}
0x118d8: 'u', # \N{WARANG CITI SMALL LETTER PU}
0x222a: 'U', # \N{UNION}
0x22c3: 'U', # \N{N-ARY UNION}
0x1d414: 'U', # \N{MATHEMATICAL BOLD CAPITAL U}
0x1d448: 'U', # \N{MATHEMATICAL ITALIC CAPITAL U}
0x1d47c: 'U', # \N{MATHEMATICAL BOLD ITALIC CAPITAL U}
0x1d4b0: 'U', # \N{MATHEMATICAL SCRIPT CAPITAL U}
0x1d4e4: 'U', # \N{MATHEMATICAL BOLD SCRIPT CAPITAL U}
0x1d518: 'U', # \N{MATHEMATICAL FRAKTUR CAPITAL U}
0x1d54c: 'U', # \N{MATHEMATICAL DOUBLE-STRUCK CAPITAL U}
0x1d580: 'U', # \N{MATHEMATICAL BOLD FRAKTUR CAPITAL U}
0x1d5b4: 'U', # \N{MATHEMATICAL SANS-SERIF CAPITAL U}
0x1d5e8: 'U', # \N{MATHEMATICAL SANS-SERIF BOLD CAPITAL U}
0x1d61c: 'U', # \N{MATHEMATICAL SANS-SERIF ITALIC CAPITAL U}
0x1d650: 'U', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL U}
0x1d684: 'U', # \N{MATHEMATICAL MONOSPACE CAPITAL U}
0x54d: 'U', # \N{ARMENIAN CAPITAL LETTER SEH}
0x1200: 'U', # \N{ETHIOPIC SYLLABLE HA}
0x104ce: 'U', # \N{OSAGE CAPITAL LETTER U}
0x144c: 'U', # \N{CANADIAN SYLLABICS TE}
0xa4f4: 'U', # \N{LISU LETTER U}
0x16f42: 'U', # \N{MIAO LETTER WA}
0x118b8: 'U', # \N{WARANG CITI CAPITAL LETTER PU}
0x1467: "U'", # \N{CANADIAN SYLLABICS TTE}
0x1d6b: 'ue', # \N{LATIN SMALL LETTER UE}
0xab63: 'uo', # \N{LATIN SMALL LETTER UO}
0x2228: 'v', # \N{LOGICAL OR}
0x22c1: 'v', # \N{N-ARY LOGICAL OR}
0xff56: 'v', # \N{FULLWIDTH LATIN SMALL LETTER V}
0x2174: 'v', # \N{SMALL ROMAN NUMERAL FIVE}
0x1d42f: 'v', # \N{MATHEMATICAL BOLD SMALL V}
0x1d463: 'v', # \N{MATHEMATICAL ITALIC SMALL V}
0x1d497: 'v', # \N{MATHEMATICAL BOLD ITALIC SMALL V}
0x1d4cb: 'v', # \N{MATHEMATICAL SCRIPT SMALL V}
0x1d4ff: 'v', # \N{MATHEMATICAL BOLD SCRIPT SMALL V}
0x1d533: 'v', # \N{MATHEMATICAL FRAKTUR SMALL V}
0x1d567: 'v', # \N{MATHEMATICAL DOUBLE-STRUCK SMALL V}
0x1d59b: 'v', # \N{MATHEMATICAL BOLD FRAKTUR SMALL V}
0x1d5cf: 'v', # \N{MATHEMATICAL SANS-SERIF SMALL V}
0x1d603: 'v', # \N{MATHEMATICAL SANS-SERIF BOLD SMALL V}
0x1d637: 'v', # \N{MATHEMATICAL SANS-SERIF ITALIC SMALL V}
0x1d66b: 'v', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL V}
0x1d69f: 'v', # \N{MATHEMATICAL MONOSPACE SMALL V}
0x1d20: 'v', # \N{LATIN LETTER SMALL CAPITAL V}
0x3bd: 'v', # \N{GREEK SMALL LETTER NU}
0x1d6ce: 'v', # \N{MATHEMATICAL BOLD SMALL NU}
0x1d708: 'v', # \N{MATHEMATICAL ITALIC SMALL NU}
0x1d742: 'v', # \N{MATHEMATICAL BOLD ITALIC SMALL NU}
0x1d77c: 'v', # \N{MATHEMATICAL SANS-SERIF BOLD SMALL NU}
0x1d7b6: 'v', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL NU}
0x475: 'v', # \N{CYRILLIC SMALL LETTER IZHITSA}
0x5d8: 'v', # \N{HEBREW LETTER TET}
0x11706: 'v', # \N{AHOM LETTER PA}
0xaba9: 'v', # \N{CHEROKEE SMALL LETTER DO}
0x118c0: 'v', # \N{WARANG CITI SMALL LETTER NGAA}
0x1d20d: 'V', # \N{GREEK VOCAL NOTATION SYMBOL-14}
0x667: 'V', # \N{ARABIC-INDIC DIGIT SEVEN}
0x6f7: 'V', # \N{EXTENDED ARABIC-INDIC DIGIT SEVEN}
0x2164: 'V', # \N{ROMAN NUMERAL FIVE}
0x1d415: 'V', # \N{MATHEMATICAL BOLD CAPITAL V}
0x1d449: 'V', # \N{MATHEMATICAL ITALIC CAPITAL V}
0x1d47d: 'V', # \N{MATHEMATICAL BOLD ITALIC CAPITAL V}
0x1d4b1: 'V', # \N{MATHEMATICAL SCRIPT CAPITAL V}
0x1d4e5: 'V', # \N{MATHEMATICAL BOLD SCRIPT CAPITAL V}
0x1d519: 'V', # \N{MATHEMATICAL FRAKTUR CAPITAL V}
0x1d54d: 'V', # \N{MATHEMATICAL DOUBLE-STRUCK CAPITAL V}
0x1d581: 'V', # \N{MATHEMATICAL BOLD FRAKTUR CAPITAL V}
0x1d5b5: 'V', # \N{MATHEMATICAL SANS-SERIF CAPITAL V}
0x1d5e9: 'V', # \N{MATHEMATICAL SANS-SERIF BOLD CAPITAL V}
0x1d61d: 'V', # \N{MATHEMATICAL SANS-SERIF ITALIC CAPITAL V}
0x1d651: 'V', # \N{MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL V}
0x1d685: 'V', # \N{MATHEMATICAL MONOSPACE CAPITAL V}
0x474: 'V', # \N{CYRILLIC CAPITAL LETTER IZHITSA}
0x2d38: 'V', # \N{TIFINAGH LETTER YADH}
0x13d9: 'V', # \N{CHEROKEE LETTER DO}
0x142f: 'V', # \N{CANADIAN SYLLABICS PE}
0xa6df: 'V', # \N{BAMUM LETTER KO}
0xa4e6: 'V', # \N{LISU LETTER HA}
0x16f08: 'V', # \N{MIAO LETTER VA}
0x118a0: 'V', # \N{WARANG CITI CAPITAL LETTER NGAA}
0x1051d: 'V', # \N{ELBASAN LETTER TE}
0x1f76c: 'VB', # \N{ALCHEMICAL SYMBOL FOR BATH OF VAPOURS}
0x2175: 'vi', # \N{SMALL ROMAN NUMERAL SIX}
0x2176: 'vii', # \N{SMALL ROMAN NUMERAL SEVEN}
0x2177: 'viii', # \N{SMALL ROMAN NUMERAL EIGHT}
0x2165: 'Vl', # \N{ROMAN NUMERAL SIX}
0x2166: 'Vll', # \N{ROMAN NUMERAL SEVEN}
0x2167: 'Vlll', # \N{ROMAN NUMERAL EIGHT}
0x26f: 'w', # \N{LATIN SMALL LETTER TURNED M}
0x1d430: 'w', # \N{MATHEMATICAL BOLD SMALL W}
0x1d464: 'w', # \N{MATHEMATICAL ITALIC SMALL W}
0x1d498: 'w', # \N{MATHEMATICAL BOLD ITALIC SMALL W}
0x1d4cc: 'w', # \N{MATHEMATICAL SCRIPT SMALL W}
0x1d500: 'w', # \N{MATHEMATICAL BOLD SCRIPT SMALL W}
0x1d534: 'w', # \N{MATHEMATICAL FRAKTUR SMALL W}
0x1d568: 'w', # \N{MATHEMATICAL DOUBLE-STRUCK SMALL W}
0x1d59c: | |
<reponame>eiling/SchoolIdolAPI
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User, Group
from django.db import models
from django.contrib import admin
from dateutil.relativedelta import relativedelta
from django.utils.translation import ugettext_lazy as _, string_concat
from api.models_languages import *
from django.core import validators
from django.utils import timezone
from django.conf import settings
from django_prometheus.models import ExportModelOperationsMixin
from api.raw import raw_information
from web.utils import randomString, singlecardurl
import hashlib, urllib
import csv
import datetime
import os
ATTRIBUTE_CHOICES = (
('Smile', _('Smile')),
('Pure', _('Pure')),
('Cool', _('Cool')),
('All', _('All')),
)
ATTRIBUTE_ARRAY = dict(ATTRIBUTE_CHOICES).keys()
RARITY_CHOICES = (
('N', _('Normal')),
('R', _('Rare')),
('SR', _('Super Rare')),
('SSR', _('Super Super Rare')),
('UR', _('Ultra Rare')),
)
RARITY_DICT = dict(RARITY_CHOICES)
OS_CHOICES = (
('Android', 'Android'),
('iOs', 'iOs'),
)
STORED_CHOICES = (
('Deck', string_concat(_('Deck'), ' (', _('You have it'), ')')),
('Album', string_concat(_('Album'), ' (', _('You don\'t have it anymore'), ')')),
('Box', _('Present Box')),
('Favorite', _('Wish List')),
)
STORED_DICT = dict(STORED_CHOICES)
STORED_DICT_FOR_ACTIVITIES = {
'Deck': _('Deck'),
'Album': _('Album'),
'Box': _('Present Box'),
'Favorite': _('Wish List'),
}
VERIFIED_CHOICES = (
(0, ''),
(1, _('Silver Verified')),
(2, _('Gold Verified')),
(3, _('Bronze Verified')),
)
VERIFIED_DICT = dict(VERIFIED_CHOICES)
VERIFIED_UNTRANSLATED_CHOICES = (
(0, ''),
(1, 'Silver Verified'),
(2, 'Gold Verified'),
(3, 'Bronze Verified'),
)
VERIFIED_UNTRANSLATED_DICT = dict(VERIFIED_UNTRANSLATED_CHOICES)
PLAYWITH_CHOICES = (
('Thumbs', _('Thumbs')),
('Fingers', _('All fingers')),
('Index', _('Index fingers')),
('Hand', _('One hand')),
('Other', _('Other')),
)
PLAYWITH_DICT = dict(PLAYWITH_CHOICES)
PLAYWITH_ICONS = (
('Thumbs', 'thumbs'),
('Fingers', 'fingers'),
('Index', 'index'),
('Hand', 'fingers'),
('Other', 'sausage'),
)
PLAYWITH_ICONS_DICT = dict(PLAYWITH_ICONS)
ACTIVITY_TYPE_CUSTOM = 6
ACTIVITY_MESSAGE_CHOICES_INT = (
(0, 'Added a card'),
(1, 'Idolized a card'),
(2, 'Rank Up'),
(3, 'Ranked in event'),
(4, 'Verified'),
(5, 'Trivia'),
(ACTIVITY_TYPE_CUSTOM, 'Custom'),
)
ACTIVITY_MESSAGE_DICT_INT = dict(ACTIVITY_MESSAGE_CHOICES_INT)
def messageStringToInt(message):
for k, v in ACTIVITY_MESSAGE_DICT_INT.items():
if v == message:
return k
return 0
ACTIVITY_MESSAGE_CHOICES = (
('Added a card', _('Added {} in {}')),
('Idolized a card', _('Idolized {} in {}')),
('Rank Up', _('Rank Up {}')),
('Ranked in event', _('Ranked {} in event {}')),
('Verified', _('Just got verified: {}')),
('Trivia', _('{}/10 on School Idol Trivia! {}')),
('Custom', 'Custom'),
)
ACTIVITY_MESSAGE_DICT = dict(ACTIVITY_MESSAGE_CHOICES)
NOTIFICATION_PM = 0
NOTIFICATION_LIKE = 1
NOTIFICATION_FOLLOW = 2
NOTIFICATION_MESSAGE_CHOICES = (
(NOTIFICATION_PM, _('You have a new private message from {}.')),
(NOTIFICATION_LIKE, _('{} liked your activity.')),
(NOTIFICATION_FOLLOW, _('{} just followed you.')),
)
NOTIFICATION_MESSAGE_DICT = dict(NOTIFICATION_MESSAGE_CHOICES)
NOTIFICATION_URLS = {
NOTIFICATION_PM: '/user/{}/messages/',
NOTIFICATION_LIKE: '/activities/{}/',
NOTIFICATION_FOLLOW: '/user/{}/',
}
NOTIFICATION_ICONS = {
NOTIFICATION_PM: 'comments',
NOTIFICATION_LIKE: 'heart',
NOTIFICATION_FOLLOW: 'users',
}
NOTIFICATION_STRINGS = {
NOTIFICATION_PM: 'PM',
NOTIFICATION_LIKE: 'LIKE',
NOTIFICATION_FOLLOW: 'FOLLOW',
}
STATUS_CHOICES = (
('THANKS', 'Thanks'),
('SUPPORTER', _('Idol Supporter')),
('LOVER', _('Idol Lover')),
('AMBASSADOR', _('Idol Ambassador')),
('PRODUCER', _('Idol Producer')),
('DEVOTEE', _('Ultimate Idol Devotee')),
('STAFF', _('Staff')),
('DATABASE', _('Database Maintainer')),
)
STATUS_DICT = dict(STATUS_CHOICES)
STAFF_PERMISSIONS_CHOICES = (
('VERIFICATION_1', string_concat(_('Takes care of verifications:'), ' ', _('Silver Verified'))),
('VERIFICATION_2', string_concat(_('Takes care of verifications:'), ' ', _('Gold Verified'))),
('VERIFICATION_3', string_concat(_('Takes care of verifications:'), ' ', _('Bronze Verified'))),
('ACTIVE_MODERATOR', string_concat(_('Active'), ' ', _('Moderator'))),
('DECISIVE_MODERATOR', string_concat(_('Decisive'), ' ', _('Moderator'))),
('COMMUNITY_MANAGER', _('Community Manager')),
('DATABASE_MAINTAINER', _('Database Maintainer')),
('DEVELOPER', _('Developer')),
)
STAFF_PERMISSIONS_DICT = dict(STAFF_PERMISSIONS_CHOICES)
VERIFICATION_STATUS_CHOICES = (
(0, _('Rejected')),
(1, _('Pending')),
(2, _('In Progress')),
(3, _('Verified')),
)
VERIFICATION_STATUS_DICT = dict(VERIFICATION_STATUS_CHOICES)
VERIFICATION_STATUS_CHOICES = (
(0, _('Rejected')),
(1, _('Pending')),
(2, _('In Progress')),
(3, _('Verified')),
)
VERIFICATION_STATUS_DICT = dict(VERIFICATION_STATUS_CHOICES)
VERIFICATION_UNTRANSLATED_STATUS_CHOICES = (
(0, 'Rejected'),
(1, 'Pending'),
(2, 'In Progress'),
(3, 'Verified'),
)
VERIFICATION_UNTRANSLATED_STATUS_DICT = dict(VERIFICATION_UNTRANSLATED_STATUS_CHOICES)
MODERATION_REPORT_STATUS_CHOICES = (
(0, 'Rejected'),
(1, 'Pending'),
(2, 'In Progress'),
(3, 'Accepted'),
)
MODERATION_REPORT_STATUS_DICT = dict(MODERATION_REPORT_STATUS_CHOICES)
LINK_CHOICES = (
('twitter', 'Twitter'),
('facebook', 'Facebook'),
('reddit', 'Reddit'),
('line', 'LINE Messenger'),
('tumblr', 'Tumblr'),
('otonokizaka', 'Otonokizaka.org Forum'),
('twitch', 'Twitch'),
('steam', 'Steam'),
('osu', 'Osu!'),
('mal', 'MyAnimeList'),
('instagram', 'Instagram'),
('myfigurecollection', 'MyFigureCollection'),
('hummingbird', 'Hummingbird'),
('youtube', 'YouTube'),
('deviantart', 'DeviantArt'),
('pixiv', 'Pixiv'),
('github', 'GitHub'),
('animeplanet', 'Anime-Planet'),
)
LINK_DICT = dict(LINK_CHOICES)
LINK_URLS = {
'Best Girl': '/idol/{}/',
'Location': 'http://maps.google.com/?q={}',
'Birthdate': '/map/',
'twitter': 'http://twitter.com/{}',
'facebook': 'https://www.facebook.com/{}',
'reddit': 'http://www.reddit.com/user/{}',
'line': 'http://line.me/#{}',
'tumblr': 'http://{}.tumblr.com/',
'otonokizaka': 'http://otonokizaka.org/index.php?user/{}/',
'twitch': 'http://twitch.tv/{}',
'steam': 'http://steamcommunity.com/id/{}',
'osu': 'http://osu.ppy.sh/u/{}',
'mal': 'http://myanimelist.net/profile/{}',
'instagram': 'https://instagram.com/{}/',
'myfigurecollection': 'http://myfigurecollection.net/profile/{}',
'hummingbird': 'https://hummingbird.me/users/{}',
'youtube': 'https://www.youtube.com/{}',
'deviantart': 'http://{}.deviantart.com/gallery/',
'pixiv': 'http://www.pixiv.net/member.php?id={}',
'github': 'https://github.com/{}',
'animeplanet': 'http://www.anime-planet.com/users/{}',
}
LINK_IMAGES = {
'reddit': settings.STATIC_FILES_URL + 'static/reddit.png',
'twitter': settings.STATIC_FILES_URL + 'static/twitter.png',
'facebook': settings.STATIC_FILES_URL + 'static/facebook.png',
'instagram': settings.STATIC_FILES_URL + 'static/instagram.png',
'line': settings.STATIC_FILES_URL + 'static/line.png',
'twitch': settings.STATIC_FILES_URL + 'static/twitch.png',
'mal': settings.STATIC_FILES_URL + 'static/mal.png',
'steam': settings.STATIC_FILES_URL + 'static/steam.png',
'tumblr': settings.STATIC_FILES_URL + 'static/tumblr.png',
}
LINK_RELEVANCE_CHOICES = (
(0, _('Never')),
(1, _('Sometimes')),
(2, _('Often')),
(3, _('Every single day')),
)
LINK_RELEVANCE_DICT = dict(LINK_RELEVANCE_CHOICES)
ACCOUNT_TAB_CHOICES = (
('deck', _('Deck')),
('album', _('Album')),
('teams', _('Teams')),
('events', _('Events')),
('wishlist', _('Wish List')),
('presentbox', _('Present Box')),
)
ACCOUNT_TAB_DICT = dict(ACCOUNT_TAB_CHOICES)
HOME_TAB_CHOICES = (
('following', _('Following')),
('all', _('All')),
('hot', _('Hot')),
)
ACCOUNT_TAB_DICT = dict(ACCOUNT_TAB_CHOICES)
ACCOUNT_TAB_ICONS = (
('deck', 'deck'),
('album', 'album'),
('teams', 'more'),
('events', 'event'),
('wishlist', 'star'),
('presentbox', 'present'),
)
ACCOUNT_TAB_ICONS_DICT = dict(ACCOUNT_TAB_ICONS)
CENTER_SKILL_SENTENCES = {
'Power': _('{} increases slightly (+3%)'),
'Heart': _('{} increases (+6%)'),
'Star': _('{} increases (+7%)'),
'UR': _('{} increases drastically (+9%)'),
'differentUR': _('{} increases drastically based on {} ()'),
}
EXTRA_CENTER_SKILL_SENTENCE = _(u' and {type} members {attribute} points up by {points}%')
CENTER_SKILL_TYPE_CHOICES = [
('main_unit', _(u'Main Unit')),
('sub_unit', _(u'Sub Unit')),
('year', _(u'Year')),
]
CENTER_EXTRA_POINTS = {
('SSR', 'main_unit'): 1,
('SSR', 'sub_unit'): 2,
('SSR', 'year'): 2,
('UR', 'main_unit'): 3,
('UR', 'sub_unit'): 6,
('UR', 'year'): 6,
}
CENTER_SKILL_UR = {
'Princess': 'Smile',
'Angel': 'Pure',
'Empress': 'Cool',
}
CENTER_SKILL_TRANSLATE = [_('Princess'), _('Angel'), _('Empress'), _('Power'), _('Heart'), _('Star')]
TRIVIA_SCORE_SENTENCES = [
_('Ouch!'),
_('Oh no...'),
_('Oh no...'),
_('Oh no...'),
_('Meh.'),
_('Meh.'),
_('Not bad!'),
_('Not bad!'),
_('Yay~'),
_('Awesome!'),
_('Woohoo!'),
]
OWNEDCARD_ORIGIN_10 = 0
OWNEDCARD_ORIGIN_SOLO = 1
OWNEDCARD_ORIGIN_VOUCHER = 2
OWNEDCARD_ORIGIN_EVENT = 3
OWNEDCARD_ORIGIN_SHOP = 4
OWNEDCARD_ORIGIN_TICKET = 5
OWNEDCARD_ORIGIN_LIVE = 6
OWNEDCARD_ORIGIN_LBONUS = 7
OWNEDCARD_ORIGIN_CHOICES = (
(OWNEDCARD_ORIGIN_10, _('Honor Scouting (10+1, 50 love gems)')),
(OWNEDCARD_ORIGIN_SOLO, _('Solo Yolo (5 love gems)')),
(OWNEDCARD_ORIGIN_TICKET, _('Scouting Ticket')),
(OWNEDCARD_ORIGIN_VOUCHER, _('Vouchers (blue tickets)')),
(OWNEDCARD_ORIGIN_EVENT, _('Event Reward')),
(OWNEDCARD_ORIGIN_SHOP, _('Sticker Shop')),
(OWNEDCARD_ORIGIN_LIVE, _('At the end of a live')),
(OWNEDCARD_ORIGIN_LBONUS, _('Login Bonus')),
)
def triviaScoreToSentence(score):
return TRIVIA_SCORE_SENTENCES[score]
def verifiedToString(val):
val = int(val)
return VERIFIED_DICT[val]
def verifiedUntranslatedToString(val):
val = int(val)
return VERIFIED_UNTRANSLATED_DICT[val]
def verificationStatusToString(val):
return VERIFICATION_STATUS_DICT[val]
def verificationUntranslatedStatusToString(val):
return VERIFICATION_UNTRANSLATED_STATUS_DICT[val]
def staffPermissionToString(val):
return STAFF_PERMISSIONS_DICT[val]
def reportStatusToString(val):
return MODERATION_REPORT_STATUS_DICT[val]
def activityMessageToString(val):
return ACTIVITY_MESSAGE_DICT[val]
def playWithToString(val):
return PLAYWITH_DICT[val]
def playWithToIcon(val):
return PLAYWITH_ICONS_DICT[val]
def storedChoiceToString(stored):
for key, string in STORED_CHOICES:
if stored == key:
return string
return None
def linkTypeToString(val):
return LINK_DICT[val]
def accountTabToString(val):
return ACCOUNT_TAB_DICT[val]
def accountTabToIcon(val):
return ACCOUNT_TAB_ICONS_DICT[val.lower()]
def statusToString(val):
return STATUS_DICT[val]
def statusToColor(status):
if status == 'SUPPORTER': return '#4a86e8'
elif status == 'LOVER': return '#ff53a6'
elif status == 'AMBASSADOR': return '#a8a8a8'
elif status == 'PRODUCER': return '#c98910'
elif status == 'DEVOTEE': return '#c98910'
elif status == 'STAFF': return 'rgb(246, 221, 83)'
elif status == 'DATABASE': return 'green'
return ''
def statusToColorString(status):
if status == 'SUPPORTER': return _('blue')
elif status == 'LOVER': return _('pink')
elif status == 'AMBASSADOR': return _('shiny Silver')
elif status == 'PRODUCER': return _('shiny Gold')
elif status == 'DEVOTEE': return _('shiny Gold')
return ''
def idolToColor(idol_name):
if idol_name in raw_information:
return raw_information[idol_name]['color']
return '#ccc'
def rarityToString(val):
return RARITY_DICT[val]
def japanese_attribute(attribute):
if attribute == 'Smile':
return u'スマイル'
elif attribute == 'Pure':
return u'ピュア'
elif attribute == 'Cool':
return u'クール'
return u'❤'
# Take a dictionary that contains id and firstname
cardsImagesToName = {
'card_image': lambda info: u'{id}{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'card_idolized_image': lambda info: u'{id}idolized{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'english_card_image': lambda info: u'{id}{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'english_card_idolized_image': lambda info: u'{id}idolized{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'round_card_image': lambda info: u'{id}Round{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'round_card_idolized_image': lambda info: u'{id}RoundIdolized{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'english_round_card_image': lambda info: u'{id}Round{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'english_round_card_idolized_image': lambda info: u'{id}RoundIdolized{firstname}.png'.format(id=info['id'], firstname=info['firstname']),
'transparent_image': lambda info: u'{id}Transparent.png'.format(id=info['id']),
'transparent_idolized_image': lambda info: u'{id}idolizedTransparent.png'.format(id=info['id']),
'clean_ur': lambda info: u'{id}{firstname}CleanUR.png'.format(id=info['id'], firstname=info['firstname']),
'clean_ur_idolized': lambda info: u'{id}{firstname}CleanURIdolized.png'.format(id=info['id'], firstname=info['firstname']),
}
def event_EN_upload_to(instance, filename):
name, extension = os.path.splitext(filename)
return 'events/EN/' + (instance.english_name if instance.english_name else instance.japanese_name) + randomString(16) + extension
class Event(ExportModelOperationsMixin('Event'), models.Model):
japanese_name = models.CharField(max_length=100, unique=True, db_index=True)
romaji_name = models.CharField(max_length=100, blank=True, null=True)
english_name = models.CharField(max_length=100, blank=True, null=True)
beginning = models.DateTimeField(blank=True, null=True)
end = models.DateTimeField(blank=True, null=True)
english_beginning = models.DateTimeField(blank=True, null=True)
english_end = models.DateTimeField(blank=True, null=True)
english_t1_points = models.PositiveIntegerField(null=True, blank=True)
english_t1_rank = models.PositiveIntegerField(null=True, blank=True)
english_t2_points = models.PositiveIntegerField(null=True, blank=True)
english_t2_rank = models.PositiveIntegerField(null=True, blank=True)
english_t3_points = models.PositiveIntegerField(null=True, blank=True)
english_t3_rank = models.PositiveIntegerField(null=True, blank=True)
japanese_t1_points = models.PositiveIntegerField(null=True, blank=True)
japanese_t1_rank = models.PositiveIntegerField(null=True, blank=True)
japanese_t2_points = models.PositiveIntegerField(null=True, blank=True)
japanese_t2_rank = models.PositiveIntegerField(null=True, blank=True)
japanese_t3_points = models.PositiveIntegerField(null=True, blank=True)
japanese_t3_rank = models.PositiveIntegerField(null=True, blank=True)
note = models.CharField(max_length=200, null=True, blank=True)
image = models.ImageField(upload_to='events/', null=True, blank=True)
english_image = models.ImageField(upload_to=event_EN_upload_to, null=True, blank=True)
def is_japan_current(self):
return (self.beginning is not None
and self.end is not None
and timezone.now() > self.beginning
and timezone.now() < self.end)
def is_world_current(self):
return (self.english_beginning is not None
and self.english_end is not None
and timezone.now() > self.english_beginning
and timezone.now() < self.english_end)
def did_happen_world(self):
return (self.english_beginning is not None
and self.english_end is not None
and timezone.now() > self.english_end)
def did_happen_japan(self):
return (self.beginning is not None
and self.end is not None
and timezone.now() > self.end)
def soon_happen_world(self):
return (self.english_beginning is not None
and self.english_end is not None
and timezone.now() > (self.english_beginning - relativedelta(days=60))
and timezone.now() < self.english_beginning)
def soon_happen_japan(self):
return (self.beginning is not None
and self.end is not None
and timezone.now() > (self.beginning - relativedelta(days=60))
and timezone.now() < self.beginning)
def __unicode__(self):
return self.japanese_name
admin.site.register(Event)
class Idol(ExportModelOperationsMixin('Idol'), models.Model):
name = models.CharField(max_length=100, unique=True)
japanese_name | |
"""
sparse tables
=============
Might look like this:
level 1 level 2 level 3
columns columns columns
idx a b c d e f idx g h i j k l idx m n o p
___ _ _ _ _ _ _ ___ _ _ _ _ _ _
|_0_|_|_|_|_|_|_||_0_|_|_|_|_|_|_|
|_1_|_|_|_|_|_|_|
|_2_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|_3_|_|_|_|_|_|_||_3_|_|_|_|_|_|_|
|_4_|_|_|_|_|_|_||_4_|_|_|_|_|_|_| ___ _ _ _ _
|_5_|_|_|_|_|_|_||_5_|_|_|_|_|_|_||_5_|_|_|_|_|
|_6_|_|_|_|_|_|_|
|_7_|_|_|_|_|_|_|
|_8_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|_9_|_|_|_|_|_|_||_9_|_|_|_|_|_|_|
|10_|_|_|_|_|_|_||10_|_|_|_|_|_|_|
|11_|_|_|_|_|_|_| ___ _ _ _ _ _ _ ___ _ _ _ _
|12_|_|_|_|_|_|_||12_|_|_|_|_|_|_||12_|_|_|_|_|
|13_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|14_|_|_|_|_|_|_||14_|_|_|_|_|_|_|
Can be represented in memory like this:
level 1 level 2 level 3
columns columns columns
idx a b c d e f idx g h i j k l idx m n o p
___ _ _ _ _ _ _ ___ _ _ _ _ _ _ ___ _ _ _ _
|_0_|_|_|_|_|_|_| |_0_|_|_|_|_|_|_| |_5_|_|_|_|_|
|_1_|_|_|_|_|_|_| |_3_|_|_|_|_|_|_| |12_|_|_|_|_|
|_2_|_|_|_|_|_|_| |_4_|_|_|_|_|_|_|
|_3_|_|_|_|_|_|_| |_9_|_|_|_|_|_|_|
|_4_|_|_|_|_|_|_| |10_|_|_|_|_|_|_|
|_6_|_|_|_|_|_|_| |12_|_|_|_|_|_|_|
|_7_|_|_|_|_|_|_| |14_|_|_|_|_|_|_|
|_8_|_|_|_|_|_|_|
|_9_|_|_|_|_|_|_|
|10_|_|_|_|_|_|_|
|11_|_|_|_|_|_|_|
|12_|_|_|_|_|_|_|
|13_|_|_|_|_|_|_|
|14_|_|_|_|_|_|_|
Written to tape-archive
table.tar
|_ level_1/idx
|_ level_1/column_a
|_ level_1/column_b
|_ level_1/column_c
|_ level_1/column_d
|_ level_1/column_e
|_ level_1/column_f
|_ level_2/idx
|_ level_2/column_g
|_ level_2/column_h
|_ level_2/column_i
|_ level_2/column_j
|_ level_2/column_k
|_ level_2/column_l
|_ level_3/idx
|_ level_3/column_m
|_ level_3/column_n
|_ level_3/column_o
|_ level_3/column_p
"""
import pandas as pd
import numpy as np
import tarfile
import io
import shutil
import tempfile
import os
IDX = "idx"
IDX_DTYPE = "<u8"
LEVEL_COLUMN_DELIMITER = "/"
FILEAME_TEMPLATE = "{:s}" + LEVEL_COLUMN_DELIMITER + "{:s}.{:s}"
DTYPES = [
"<u1",
"<u2",
"<u4",
"<u8",
"<i1",
"<i2",
"<i4",
"<i8",
"<f2",
"<f4",
"<f8",
]
# logical operations
# ==================
def intersection(list_of_lists_of_indices):
"""
Returns the common indices among the lists of indices.
Example
-------
[4, 5, 6] = intersection([[1,2,3,4,5,6], [3,4,5,6,7,8], [4,5,6,7,8,9,10]])
"""
inter = list_of_lists_of_indices[0]
for i in range(len(list_of_lists_of_indices)):
inter = np.intersect1d(inter, list_of_lists_of_indices[i])
return inter
def cut_level_on_indices(level, indices, column_keys=None):
"""
Returns a level (recarray) only containing the row-indices in 'indices'.
Parameters
----------
level : recarray
A level in a sparse table.
indices : list
The row-indices to be written to the output-level.
column_keys : list of strings (None)
When specified, only these columns will be in the output-level.
"""
if column_keys is None:
column_keys = list(level.dtype.names)
column_keys.append(IDX)
_part = {}
for column_key in column_keys:
_part[column_key] = level[column_key]
part_df = pd.DataFrame(_part)
del _part
common_df = pd.merge(
part_df,
pd.DataFrame(dict_to_recarray({IDX: indices})),
on=IDX,
how="inner",
)
del part_df
return common_df.to_records(index=False)
def cut_table_on_indices(table, common_indices, level_keys=None):
"""
Returns table but only with the rows listed in common_indices.
Parameters
----------
table : dict of recarrays.
The sparse numeric table.
common_indices : list of indices
The row-indices to cut on. Only row-indices in this list will go
in the output-table.
level_keys : list of strings (None)
When provided, the output-table will only contain these levels.
"""
if level_keys is None:
level_keys = list(table.keys())
out = {}
for level_key in level_keys:
out[level_key] = cut_level_on_indices(
level=table[level_key], indices=common_indices,
)
return out
def sort_table_on_common_indices(table, common_indices):
"""
Returns a table with all row-indices ordered same as common_indices.
table : dict of recarrays.
The table. But must be rectangular, i.e. not sparse.
common_indices : list of indices
The row-indices to sort by.
"""
common_order_args = np.argsort(common_indices)
common_inv_order = np.zeros(shape=common_indices.shape, dtype=np.int)
common_inv_order[common_order_args] = np.arange(len(common_indices))
del common_order_args
out = {}
for level_key in table:
level = table[level_key]
level_order_args = np.argsort(level[IDX])
level_sorted = level[level_order_args]
del level_order_args
level_same_order_as_common = level_sorted[common_inv_order]
out[level_key] = level_same_order_as_common
return out
def cut_and_sort_table_on_indices(table, common_indices, level_keys=None):
"""
Returns a table (rectangular, not sparse) containing only rows listed in
common_indices and in this order.
Parameters
----------
table : dict of recarrays.
The sparse table.
common_indices : list of indices
The row-indices to cut on and sort by.
level_keys : list of strings (None)
When specified, only this levels will be in the output-table.
"""
out = cut_table_on_indices(
table=table, common_indices=common_indices, level_keys=level_keys,
)
out = sort_table_on_common_indices(
table=out, common_indices=common_indices
)
return out
def make_mask_of_right_in_left(left_indices, right_indices):
"""
Returns a mask for left indices indicating wheter a right index is in it.
Parameters
----------
left_indices : list of indices
right_indices : list of indices
Example
-------
[0, 1, 0, 0] = make_mask_of_right_in_left([1,2,3,4], [0,2,9])
"""
left_df = pd.DataFrame({IDX: left_indices})
right_df = pd.DataFrame({IDX: right_indices})
mask_df = pd.merge(left_df, right_df, on=IDX, how="left", indicator=True)
indicator_df = mask_df["_merge"]
mask = np.array(indicator_df == "both", dtype=np.int64)
return mask
def make_rectangular_DataFrame(table):
"""
Returns a pandas.DataFrame made from a table.
The table must already be rectangular, i.e. not sparse anymore.
The row-indices among all levels in the table must have the same ordering.
"""
out = {}
for level_key in table:
for column_key in table[level_key].dtype.names:
if column_key == IDX:
if IDX in out:
np.testing.assert_array_equal(
out[IDX], table[level_key][IDX]
)
else:
out[IDX] = table[level_key][IDX]
else:
out[
"{:s}{:s}{:s}".format(
level_key, LEVEL_COLUMN_DELIMITER, column_key
)
] = table[level_key][column_key]
return pd.DataFrame(out)
# assertion
# =========
def _assert_same_keys(keys_a, keys_b):
"""
Asserts that two lists contain the same items, but order does not matter.
"""
uni_keys = list(set(keys_a + keys_b))
for key in uni_keys:
assert key in keys_a and key in keys_b, "Key: {:s}".format(key)
def assert_tables_are_equal(table_a, table_b):
_assert_same_keys(list(table_a.keys()), list(table_b.keys()))
for level_key in table_a:
_assert_same_keys(
table_a[level_key].dtype.names, table_b[level_key].dtype.names
)
for column_key in table_a[level_key].dtype.names:
assert (
table_a[level_key].dtype[column_key]
== table_b[level_key].dtype[column_key]
)
np.testing.assert_array_equal(
x=table_a[level_key][column_key],
y=table_b[level_key][column_key],
err_msg="table[{:s}][{:s}]".format(level_key, column_key),
verbose=True,
)
def assert_table_has_structure(table, structure):
for level_key in structure:
assert (
level_key in table
), "Expected level '{:s}' in table, but it is not.".format(level_key)
assert IDX in table[level_key].dtype.names, (
"Expected table[{:s}] to have column '{:s}', "
"but it has not.".format(level_key, IDX)
)
assert IDX_DTYPE == table[level_key].dtype[IDX], (
"Expected table[{:s}][{:s}].dtype == {:s}"
"but actually it is {:s}.".format(
level_key,
IDX,
str(IDX_DTYPE),
str(table[level_key].dtype[IDX]),
)
)
for column_key in structure[level_key]:
assert column_key in table[level_key].dtype.names, (
"Expected column '{:s}' in table's level '{:s}', "
"but it is not.".format(column_key, level_key)
)
expected_dtype = structure[level_key][column_key]["dtype"]
assert expected_dtype == table[level_key].dtype[column_key], (
"Expected table[{level_key:s}][{column_key:s}].dtype "
"== {expected_dtype:s}, "
"but actually it is {actual_dtype:s}".format(
level_key=level_key,
column_key=column_key,
expected_dtype=str(expected_dtype),
actual_dtype=str(table[level_key].dtype[column_key]),
)
)
def dict_to_recarray(d):
return pd.DataFrame(d).to_records(index=False)
def _assert_no_whitespace(key):
for char in key:
assert not str.isspace(
char
), "Key must not contain spaces, but key = '{:s}'".format(key)
def _assert_no_dot(key):
assert "." not in key, "Key must not contain '.', but key = '{:s}'".format(
key
)
def _assert_no_directory_delimeter(key):
assert "/" not in key, "Key must not contain '/', but key = '{:s}'".format(
key
)
assert (
"\\" not in key
), "Key must not contain '\\', but key = '{:s}'".format(key)
def _assert_key_is_valid(key):
_assert_no_whitespace(key)
_assert_no_dot(key)
_assert_no_directory_delimeter(key)
def assert_structure_keys_are_valid(structure):
for level_key in structure:
_assert_key_is_valid(level_key)
for column_key in structure[level_key]:
assert IDX != column_key
_assert_key_is_valid(column_key)
assert structure[level_key][column_key]["dtype"] in DTYPES, (
"Structure[{:s}][{:s}]['dtype'] = {:s} "
"is not in DTYPES".format(
level_key,
column_key,
str(structure[level_key][column_key]["dtype"]),
)
)
# input output
# ============
def _append_tar(tarfout, name, payload_bytes):
tarinfo = tarfile.TarInfo()
tarinfo.name = name
tarinfo.size = len(payload_bytes)
with io.BytesIO() as fileobj:
fileobj.write(payload_bytes)
fileobj.seek(0)
tarfout.addfile(tarinfo=tarinfo, fileobj=fileobj)
def write(path, table, structure=None):
"""
Writes the table to path.
parameters
----------
path : string
Path to be written to.
table : dict of recarrays
The sparse table.
structure : dict (default: None)
The structure of the table. If provided it is asserted that the
table written has the provided structure.
"""
if structure:
assert_table_has_structure(table=table, structure=structure)
with tarfile.open(path + ".tmp", "w") as tarfout:
for level_key in table:
assert IDX in table[level_key].dtype.names
for column_key in table[level_key].dtype.names:
dtype_key = table[level_key].dtype[column_key].str
_append_tar(
tarfout=tarfout,
name=FILEAME_TEMPLATE.format(
level_key, column_key, dtype_key
),
payload_bytes=table[level_key][column_key].tobytes(),
)
shutil.move(path + ".tmp", path)
def _split_level_column_dtype(path):
level_key, column_key_and_dtype = str.split(path, LEVEL_COLUMN_DELIMITER)
column_key, dtype_key = str.split(column_key_and_dtype, ".")
return level_key, column_key, dtype_key
def read(path, structure=None):
"""
Returns table which is read from path.
parameters
----------
path : string
Path to tape-archive in filesystem
structure : dict (default: None)
The structure of the table. If provided it is asserted that the
table read has the provided structure.
"""
out = {}
with tarfile.open(path, "r") as tarfin:
for tarinfo in tarfin:
level_key, column_key, dtype_key = _split_level_column_dtype(
path=tarinfo.name
)
if column_key == IDX:
assert dtype_key == IDX_DTYPE
level_column_bytes = tarfin.extractfile(tarinfo).read()
if level_key not in out:
out[level_key] = {}
out[level_key][column_key] = np.frombuffer(
level_column_bytes, dtype=dtype_key
)
for level_key in out:
out[level_key] = dict_to_recarray(out[level_key])
if structure:
assert_table_has_structure(table=out, structure=structure)
return out
# concatenate
# ===========
def _make_tmp_paths(tmp, structure):
tmp_paths = {}
for level_key in structure:
tmp_paths[level_key] = {}
idx_fname | |
0 0 0]
[8 2 0 0 0 0 0]
[8 4 4 4 4 4 4]]
Output:
[[8 0 0 0 0 0 2]
[8 0 0 0 0 2 0]
[8 0 0 0 2 0 0]
[8 0 0 2 0 0 0]
[8 0 2 0 0 0 0]
[8 2 0 0 0 0 0]
[8 4 4 4 4 4 4]]
Colour Encoding:
Black = 0, Dark Blue = 1, Red =2 , Green = 3 , Yellow = 4 , Grey = 5 , Pink = 6 , Orange = 7 , Sky Blue = 8 , Brown = 9
Algorithm:
The description of the task is, given a matrix with left most column filled completely with any colour (0-9), \
the goal is to fill the last row of the matrix with yellow colour (4), except the first cell in the row and \
fill the diagonal cells of the matrix from left bottom corner to top right corner with red colour (2) except the bottom left corner cell.
Implementation:
First the left most column colour is fetched for reference, then fill the last row of the matrix with yellow (4) \
and to get the indices of the diagonal cells of the matrix, since the diagonal needed is from bottom left corner to top right corner, \
the matrix is flipped upside down and diagonal cell indices (top left corner to bottom right corner) are fetched and filled with red (2) and inverted back to original position, \
now the left bottom most corner cell is filled with the reference colour which is fetched initially.
Results:
All the 3 train test cases and 1 testing test cases passed
"""
assert type(x) == np.ndarray
rows, columns = x.shape # fetching number of rows and columns in the given matrix for matrix manipulation
left_col_color = x[-1:,0:1].copy() # fetching the left most column of the matrix colour for reference
x[-1:,:] = 4 # filling the last row of the matrix with yellow(4)
di = np.diag_indices(rows) # fetching the diagonal cell indices
x_flip = np.flipud(x) # flipping the matrix
x_flip[di] = 2 # filling the digonal cells with red(2) top left corner to bottom right corner
x = np.flipud(x_flip) # on flipping back the array the top left corner would become bottom left corner, similarly botton right corner to top right corner
x[-1:,0:1] = left_col_color[0][0] # filling the left most bottom cell with reference colour which was changed during diagonal colouring
return x
################################################################################################################################
def solve_c1d99e64(x):
"""
Task Description:
Input:
[[8 8 8 8 0 8 8 8 8 8 0 0 8 8]
[0 8 0 0 0 0 8 8 8 8 0 8 8 8]
[8 8 0 8 0 8 8 8 8 8 0 0 8 8]
[8 0 8 8 0 8 8 0 0 8 0 8 8 0]
[8 8 8 8 0 8 8 0 0 0 0 8 8 8]
[8 8 8 0 0 8 8 0 8 0 0 8 8 8]
[8 0 8 8 0 8 8 8 8 8 0 0 0 8]
[8 8 0 0 0 8 0 0 8 8 0 0 8 8]
[8 0 0 8 0 8 8 8 0 8 0 8 8 8]
[8 8 0 8 0 8 8 8 8 8 0 0 8 0]
[0 8 0 8 0 0 0 0 0 0 0 8 0 8]
[8 8 8 8 0 8 8 8 8 8 0 0 8 0]]
Output:
[[8 8 8 8 2 8 8 8 8 8 2 0 8 8]
[0 8 0 0 2 0 8 8 8 8 2 8 8 8]
[8 8 0 8 2 8 8 8 8 8 2 0 8 8]
[8 0 8 8 2 8 8 0 0 8 2 8 8 0]
[8 8 8 8 2 8 8 0 0 0 2 8 8 8]
[8 8 8 0 2 8 8 0 8 0 2 8 8 8]
[8 0 8 8 2 8 8 8 8 8 2 0 0 8]
[8 8 0 0 2 8 0 0 8 8 2 0 8 8]
[8 0 0 8 2 8 8 8 0 8 2 8 8 8]
[8 8 0 8 2 8 8 8 8 8 2 0 8 0]
[0 8 0 8 2 0 0 0 0 0 2 8 0 8]
[8 8 8 8 2 8 8 8 8 8 2 0 8 0]]
Colour Encoding:
Black = 0, Dark Blue = 1, Red =2 , Green = 3 , Yellow = 4 , Grey = 5 , Pink = 6 , Orange = 7 , Sky Blue = 8 , Brown = 9
Algorithm:
The description of the task is simple, the rows with all the cells black(0) and/or the columns with all the cells black(0) should be filled with colour red(2)
Implementation:
First fetch all the row indicies ('rows_black') which has all the cells in them as black(0) and similarly for columns('columns_black'), \
Iterate through 'columns_black' to fill all the cells with red(2) and likewise iterate through 'columns_black' to fill all the cells with red(2)
Results:
All the 3 train test cases and 1 testing test cases passed
"""
assert type(x) == np.ndarray
x_copy = x.copy()
rows_black = np.where(~x_copy.any(axis=1))[0] # fetch all the row indices which has all the cells with black
columns_black = np.where(~x_copy.any(axis=0))[0] # fetch all the column indicies which has all the cells with black
for row in rows_black: # iterate through row indices and fill the colour red(2) for rows
x_copy[row,:] = 2
for col in columns_black: # iterate through column indices and fill the colour red(2) for columns
x_copy[:,col] = 2
return x_copy
################################################################################################################################
"""
## Summary on python features and libraries used:
Almost every task is solved using python numpy library since it involves several mathematical calculations, and excellent features of numpy is its performance, size and functionality, while performing some mathematical calculations, it is not necessary to loop through rows and columns (because it is vectorised), instead use the numpy built-in functions which facilitates the need with faster approach, some of the functions which are more commonly used in the tasks are (given below are basic syntax and can be extended further based on the requirement)
I. np.where ( x == value), where x is the 2D numpy array and y is the value of the cell.This function will return all the indices of the matrix x for the given value
II. np.argwhere(x) or np.argwhere(<x - condition>), where x is the 2D numpy array.This function will return all the indices whose value is non-zero and takes conditional input also.
III. np.unique(x), where x is the 2D numpy array.This function will return all the unique values available in the given matrix x.
IV. np.multiply(x1,x2), where x1 and x2 are 2D numpy array with same dimensions.This function will multiply the values of similar indices of both the matrix and return a matrix with multiplied values at respective position/cells.
V. np.flip(x, axis=0 or 1), where x is the 2D numpy array.This function flips the array either vertically or horizontally based on the axis value.
VI. np. diag_indices(n), where n is the dimension of n x n matrix.This function will return all the diagonal index for the matrix with size n x n
## Similarities:
1. In ARC, all the tasks are related to matrix manipulation and understanding the pattern or the relationship between the input and output of the | |
med_l:
tr = HTMLgen.TR()
tr.append(empty_data(2)) # cover for no enstore element
self.add_to_row(tr, item.keys()[0], item, outage_d, offline_d)
tr.append(empty_data(4)) # cover for no network & alarm elements
entable.append(tr)
# add any other information we need
if other_d:
cols = 8
num = 0
entable.append(empty_row(cols))
elems = other_d.keys()
elems.sort()
tr = HTMLgen.TR()
for elem in elems:
#fix up dict in order to use already existing function
d = {other_d[elem][0] : other_d[elem][2],
enstore_constants.URL : other_d[elem][1]}
self.add_to_row(tr, other_d[elem][0], d, outage_d, offline_d)
# added ball and link
num = num + 2
if num == cols:
entable.append(tr)
tr = HTMLgen.TR()
num = 0
else:
# fix up the end of the row
if num > 0:
cols_left = cols - num
tr.append(empty_data(cols_left))
entable.append(tr)
return entable
def get_time_row(self, dict):
# if the dictionary has a time key, then use the value in a row. else
# use "???"
theTime = dict.get(enstore_constants.TIME, "???")
return (HTMLgen.TR(HTMLgen.TD("(as of %s)"%(theTime,), colspan=8,
align="CENTER")))
def add_data(self, dict, keys, tr, out_dict, offline_dict):
if len(keys) > 0:
key = keys.pop(0)
tr.append(self.get_color_ball(dict, key, "RIGHT"))
# by putting something in the last parameter, we are telling the
# function to make the element it creates a link to the server status
# page
tr.append(self.get_element(dict, key, out_dict, offline_dict, "", 2))
else:
tr.append(empty_data(2))
return keys
def make_server_table(self, dict, out_dict, offline_dict):
ignore = [enstore_constants.ENSTORE, enstore_constants.TIME,
enstore_constants.URL]
entable = HTMLgen.TableLite(cellspacing=1, cellpadding=1, border=0,
align="CENTER", width="90%")
entable.append(HTMLgen.Caption(HTMLgen.Font(HTMLgen.Bold("Enstore Individual Server Status"),
size="+3", color=BRICKRED)))
entable.append(empty_row(8))
# add the individual column headings
tr = HTMLgen.TR(empty_header())
tr.append(HTMLgen.TH(HTMLgen.Font(HTMLgen.U("Servers"), size="+1",
color=BRICKRED), align="RIGHT", colspan=2))
tr.append(empty_header())
for hdr in ["Library Managers", "Media Changers"]:
tr.append(empty_header())
tr.append(HTMLgen.TH(HTMLgen.Font(HTMLgen.U(hdr), size="+1",
color=BRICKRED), align="LEFT"))
entable.append(tr)
# now split up the data into each column
keys = sort_keys(dict)
lm = {}
mv = {}
migrators = {}
mc = {}
gs = {}
for key in keys:
if not key in ignore:
if enstore_functions2.is_library_manager(key):
lm[key] = dict[key]
elif enstore_functions2.is_mover(key):
mv[key] = dict[key]
elif enstore_functions2.is_media_changer(key):
mc[key] = dict[key]
elif enstore_functions2.is_migrator(key):
migrators[key] = dict[key]
else:
gs[key] = dict[key]
else:
lm_keys = sort_keys(lm)
mv_keys = sort_keys(mv)
mc_keys = sort_keys(mc)
migrator_keys = sort_keys(migrators)
gs_keys = sort_keys(gs)
while (len(lm_keys) + len(mc_keys) + len(gs_keys)) > 0:
tr = HTMLgen.TR()
gs_keys = self.add_data(gs, gs_keys, tr, out_dict, offline_dict)
gs_keys = self.add_data(gs, gs_keys, tr, out_dict, offline_dict)
lm_keys = self.add_data(lm, lm_keys, tr, out_dict, offline_dict)
mc_keys = self.add_data(mc, mc_keys, tr, out_dict, offline_dict)
entable.append(tr)
# now add the mover information
entable.append(empty_row(8))
entable.append(empty_row(8))
tr = HTMLgen.TR(empty_header())
tr.append(HTMLgen.TH(HTMLgen.Font(HTMLgen.U("Movers"), size="+1",
color=BRICKRED), align="RIGHT", colspan=2))
entable.append(tr)
while len(mv_keys) > 0:
tr = HTMLgen.TR()
mv_keys = self.add_data(mv, mv_keys, tr, out_dict, offline_dict)
mv_keys = self.add_data(mv, mv_keys, tr, out_dict, offline_dict)
mv_keys = self.add_data(mv, mv_keys, tr, out_dict, offline_dict)
mv_keys = self.add_data(mv, mv_keys, tr, out_dict, offline_dict)
entable.append(tr)
# now add the migrator information
tr = HTMLgen.TR(empty_header())
tr.append(HTMLgen.TH(HTMLgen.Font(HTMLgen.U("Migrators"), size="+1",
color=BRICKRED), align="RIGHT", colspan=2))
entable.append(tr)
while len( migrator_keys) > 0:
tr = HTMLgen.TR()
migrator_keys = self.add_data(migrators, migrator_keys, tr, out_dict, offline_dict)
migrator_keys = self.add_data(migrators, migrator_keys, tr, out_dict, offline_dict)
migrator_keys = self.add_data(migrators, migrator_keys, tr, out_dict, offline_dict)
migrator_keys = self.add_data(migrators, migrator_keys, tr, out_dict, offline_dict)
entable.append(tr)
return entable
def make_legend_table(self):
entable = HTMLgen.TableLite(border=0, align="CENTER")
tr = HTMLgen.TR()
for (ball, txt) in [(self.redball, "Major Problem"),
(self.yellowball, "Minor problem")]:
tr.append(HTMLgen.TD(ball))
tr.append(HTMLgen.TD(HTMLgen.Font(txt, size="-1")))
entable.append(tr)
tr = HTMLgen.TR()
for (ball, txt) in [(self.greenball, "All systems are operational"),
(self.question, "Situation under investigation")]:
tr.append(HTMLgen.TD(ball))
tr.append(HTMLgen.TD(HTMLgen.Font(txt, size="-1")))
entable.append(tr)
tr = HTMLgen.TR()
tr.append(HTMLgen.TD(self.checkmark))
tr.append(HTMLgen.TD(HTMLgen.Font("Scheduled outage", size="-1")))
tr.append(HTMLgen.TD(empty_data()))
tr.append(HTMLgen.TD(HTMLgen.Font(HTMLgen.Strike("Known Down"), size="-1"), colspan=2))
entable.append(tr)
return entable
def make_node_server_table(self, dict):
entable = HTMLgen.TableLite(cellspacing=3, cellpadding=3, align="CENTER",
border=2, width="90%", bgcolor=AQUA)
entable.append(HTMLgen.Caption(HTMLgen.Font(HTMLgen.Bold("Enstore Node/Server Mapping"),
size="+3", color=BRICKRED)))
cols = 4
# arrange each node in a table on the page
node_keys = dict.keys()
node_keys.sort()
tr = HTMLgen.TR()
for node in node_keys:
tr = check_row_length(tr, cols, entable)
tr.append(HTMLgen.TD(HTMLgen.Bold(node)))
servers = dict[node]
servers.sort()
l_list = HTMLgen.List()
for server in servers:
l_list.append(server)
tr.append(HTMLgen.TD(l_list))
else:
while len(tr) < cols:
tr.append(empty_data())
entable.append(tr)
return entable
def body(self, enstat_d, other_d, medstat_d, alarms, nodes, outage_d,
offline_d, media_tag, status_file_name):
# name of file that we will create links to
self.status_file_name = status_file_name
# create the outer table and its rows
table = self.table_top()
# add the table with the general status
# fake this for now, remove when get this working
medstat_d = {}
mtags = media_tag.keys()
for mtag in mtags:
if offline_d.has_key(mtag):
# mark the robot as down
medstat_d[mtag] = [enstore_constants.DOWN, media_tag[mtag]]
else:
medstat_d[mtag] = [enstore_constants.UP, media_tag[mtag]]
self.check_for_red(enstat_d, table)
table.append(HTMLgen.TR(HTMLgen.TD(self.make_overall_table(enstat_d, other_d,
medstat_d, alarms,
outage_d,
offline_d))))
table_spacer(table)
# add the table with the individual server status
if enstat_d:
table.append(HTMLgen.TR(HTMLgen.TD(self.make_server_table(enstat_d,
outage_d,
offline_d))))
table_spacer(table)
# add the table with the media info
#if medstat_d:
#table.append(HTMLgen.TR(HTMLgen.TD(self.make_media_table(medstat_d, outage_d))))
#table_spacer(table)
# add the section where nodes are reported and which servers run on them
table.append(HTMLgen.TR(HTMLgen.TD(self.make_node_server_table(nodes))))
table_spacer(table)
# add the legend table
table.append(HTMLgen.TR(HTMLgen.TD(self.make_legend_table())))
self.trailer(table)
self.append(table)
class EnSaagNetworkPage(EnSaagPage):
def __init__(self, title="ENSTORE Network Status-At-A-Glance", gif="ess-aag.gif",
system_tag=""):
EnBaseHtmlDoc.__init__(self, refresh=360, help_file="saagNetworkHelp.html",
system_tag=system_tag)
self.title = title
self.script_title_gif = gif
self.source_server = THE_ENSTORE
self.description = ""
def make_network_table(self, dict, out_dict, offline_dict):
ignore = [enstore_constants.NETWORK, enstore_constants.BASENODE,
enstore_constants.TIME, enstore_constants.URL]
entable = HTMLgen.TableLite(cellspacing=1, cellpadding=1, border=0,
align="CENTER", width="90%")
entable.append(HTMLgen.Caption(HTMLgen.Font(HTMLgen.Bold("Enstore Network Interface Status"),
size="+3", color=BRICKRED)))
entable.append(empty_row())
# now add the base node row
base_node = dict.get(enstore_constants.BASENODE, UNKNOWN)
entable.append(HTMLgen.TR(HTMLgen.TH(HTMLgen.Font("Base Node : %s"%(base_node,),
size="+1", color=BRICKRED),
colspan=6, align="LEFT")))
entable.append(empty_row())
keys = sort_keys(dict)
tr = HTMLgen.TR()
counter = 0
for key in keys:
if not key in ignore:
tr.append(self.get_color_ball(dict, key, "RIGHT"))
tr.append(self.get_element(dict, key, out_dict, offline_dict))
counter = counter + 1
# only allow 4 nodes per row
if counter == 4:
entable.append(tr)
counter = 0
tr = HTMLgen.TR()
else:
# if we did partly fill in the last row append it to the table
if counter > 0:
entable.append(tr)
return entable
def body(self, netstat_d, outage_d, offline_d):
# create the outer table and its rows
table = self.table_top()
# add the table with the network info
if netstat_d:
table.append(HTMLgen.TR(HTMLgen.TD(self.make_network_table(netstat_d,
outage_d,
offline_d))))
table_spacer(table)
# add the legend table
table.append(HTMLgen.TR(HTMLgen.TD(self.make_legend_table())))
self.trailer(table)
self.append(table)
class EnStatusOnlyPage(EnSaagPage):
def __init__(self, title="Mass Storage Production System's Status",
gif="en_all.gif"):
print "in EnStatusOnlyPage"
sys.stdout.flush()
EnBaseHtmlDoc.__init__(self, refresh=370, background="enstore_background.gif")
print "EnBaseHtmlDoc__init__done"
sys.stdout.flush()
self.title = title
self.script_title_gif = gif
self.source_server = THE_ENSTORE
self.description = ""
def table_top(self, cols=1):
# create the outer table and its rows
table = HTMLgen.TableLite(cellspacing=0, cellpadding=0,
align="LEFT", width="800")
tr = empty_row(3)
self.script_title(tr)
table.append(tr)
# only add this info if we know it
if self.source_server:
table.append(self.add_source_server(3))
tr = HTMLgen.TR(empty_data(3))
tr.append(self.add_last_updated())
table.append(tr)
table.append(empty_row(4))
table.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.HR(size=2, noshade=1), colspan=4)))
table.append(empty_row(4))
return table
def add_to_row(self, tr, val, dict, outage_d, offline_d, alt_txt=None):
tr.append(self.get_color_ball(dict, val, "RIGHT"))
if not alt_txt:
txt = HTMLgen.Font(val, size="+2")
else:
txt = HTMLgen.Font(alt_txt, size="+2")
if dict.has_key(enstore_constants.URL):
txt = HTMLgen.Href(dict[enstore_constants.URL], txt)
td = self.get_element(dict, val, outage_d, offline_d, txt)
td.colspan = 2
tr.append(td)
def add_status_row(self, estatus, txt, table):
# this list has the following elements -
# enstore status
# time status obtained
# txt if known down, otherwise a -1
# txt if scheduled outage, otherwise a -1
# value overridden to, otherwise a -1
# web server address
status, eftime, offline, outage, override, web_address = estatus
# first check the time. if it is 10 minutes different than ours, enstore is
# assumed to be down
etime = enstore_functions2.unformat_time(eftime)
if time.time() - etime > 600 and override == enstore_constants.ENONE:
# the time is too far off, make the enstore ball red
status = enstore_constants.DOWN
# use the functions already provided so, need to format things a little
if offline == enstore_constants.ENONE:
offline_d = {}
else:
offline_d = {enstore_constants.ENSTORE : offline}
if outage == enstore_constants.ENONE:
outage_d = {}
else:
outage_d = {enstore_constants.ENSTORE : outage}
enstat_d = {enstore_constants.ENSTORE : status}
if not web_address == enstore_constants.ENONE:
enstat_d[enstore_constants.URL] = "%s/enstore/%s"%(web_address,
enstore_constants.SAAGHTMLFILE)
if not self.check_for_red(enstat_d, table, 0):
tr = HTMLgen.TR(empty_data())
self.add_to_row(tr, enstore_constants.ENSTORE, enstat_d, outage_d, offline_d,
txt)
table.append(tr)
else:
# enstore ball is red need to identify it better on the page as to which one
txt = HTMLgen.Font(txt, size="+2", color=BRICKRED)
if enstat_d.has_key(enstore_constants.URL):
txt = HTMLgen.Href(enstat_d[enstore_constants.URL], txt)
table.append(HTMLgen.TR(HTMLgen.TD(txt, colspan=4, align="center")))
def body(self, status_d, txt_d):
# create the outer table and its rows
table = self.table_top()
keys = status_d.keys()
keys.sort()
for key in keys:
self.add_status_row(status_d[key], " %s (%s)"%(txt_d[key],
status_d[key][1]), table)
table.append(empty_row(4))
else:
# add the legend table
table_spacer(table, 4)
table.append(HTMLgen.TR(HTMLgen.TD(self.make_legend_table(), colspan=4)))
self.trailer(table, 4)
self.append(table)
class EnSGIngestPage(EnBaseHtmlDoc):
def __init__(self, dir, refresh=0, system_tag=""):
EnBaseHtmlDoc.__init__(self, refresh=refresh,
system_tag=system_tag)
self.align = NO
self.title = "Tape Ingest Rates by Storage Group"
self.web_dir = dir
def body(self):
plots='burn-rate'
plots_dir = os.path.join(self.web_dir,plots)
if not os.path.exists(plots_dir):
os.makedirs(plots_dir)
os.system("cp ${ENSTORE_DIR}/etc/*.gif %s"%(plots_dir))
stamps=[]
images=[]
files=os.listdir(plots_dir)
for filename in files:
if filename.find("_stamp.jpg") > 0:
stamps.append(filename)
if filename.find(".jpg") > 0 and filename.find("_stamp") == -1:
images.append(filename)
libraries = {}
for stamp in stamps:
sep ="_"
tape_sg = stamp.split("_")[0]
a = stamp.split("_")
b= []
for i in a:
if i.find("stamp") == -1:
b.append(i)
if len(b) > 1:
a=sep.join(b)
else:
a=b[0]
s1 ="."
image = s1.join((a, "jpg"))
if len(image.split(".")) == 3:
lib = image.split(".")[0]
| |
<reponame>onlyrico/AliceMind<gh_stars>1-10
# coding=utf-8
# Copyright 2021 The Alibaba DAMO NLP Team Authors.
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import tf_metrics
import random
import shutil
import collections
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
TRAIN_FILE_NAME = "train.txt"
DEV_FILE_NAME = "dev.txt"
TEST_FILE_NAME = "test.txt"
UNLABELED_FILE_NAME = "unlabeled.txt"
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .txt files (or other data files) "
"for the task.")
flags.DEFINE_string(
"labert_config_file", None,
"The config json file corresponding to the pre-trained LaBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the LaBERT model was trained on.")
flags.DEFINE_string("lexicon_file", None,
"The lexicon file that the LaBERT model was trained on.")
flags.DEFINE_boolean(
"use_named_lexicon", False,
"The lexicon file is named (say in the format of {entry}\t{name}).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained LaBERT model).")
flags.DEFINE_string(
"label_file", None,
"The pickle of labels.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 10.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"lr_layer_decay_rate", 1.0,
"Top layer: lr[L] = FLAGS.learning_rate. Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_float("adam_beta1", 0.9, "The beta1 for adam.")
flags.DEFINE_float("adam_beta2", 0.999, "The beta2 for adam.")
flags.DEFINE_float("adam_epsilon", 1e-6, "The epsilon for adam.")
flags.DEFINE_bool("use_as_feature", False, "Specific to use bert as feature.")
flags.DEFINE_bool("do_adversarial_train", False, "Do adversarial training [SMART algorithm].")
flags.DEFINE_string("predict_output", "test_results.tsv",
"predict_output file name")
class InputExample(object):
def __init__(self, guid, words, labels=None):
self.guid = guid
self.words = words
self.labels = labels
class PaddingInputExample(object):
""" """
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
positional_embeddings_start,
positional_embeddings_end,
label_positions,
label_ids,
label_weights):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.positional_embeddings_start = positional_embeddings_start
self.positional_embeddings_end = positional_embeddings_end
self.label_positions = label_positions
self.label_ids = label_ids
self.label_weights = label_weights
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def stable_ce_kl(logit, target, epsilon=1e-6):
# stable kl:
# Input shape:
# logit: [d1, d2, ..., num_labels]
# target: [d1, d2, .... num_labels]
#
# Output shape:
# [d1 * d2 * ..., 1]
logit = tf.reshape(logit, (-1, logit.shape[-1]))
target = tf.reshape(target, (-1, target.shape[-1]))
p = tf.math.exp(tf.math.log_softmax(logit, 1))
y = tf.math.exp(tf.math.log_softmax(target, 1))
rp = -tf.math.log((1.0 / (p + epsilon) - 1 + epsilon))
ry = -tf.math.log((1.0 / (y + epsilon) - 1 + epsilon))
return tf.reduce_mean((p * (rp - ry) * 2), axis=-1)
def sym_ce_kl_loss(logit, target):
# sym_kl_loss:
# Input shape:
# logit: [d1, d2, ..., num_labels]
# target: [d1, d2, .... num_labels]
#
# Output shape:
# [d1 * d2 * ..., 1]
loss = tf.reduce_mean(
tf.keras.losses.kld(tf.math.log_softmax(logit, axis=-1), tf.math.softmax(target, axis=-1)) + \
tf.keras.losses.kld(tf.math.log_softmax(target, axis=-1), tf.math.softmax(logit, axis=-1)),
axis=-1
)
return loss
def compute_adv_loss(embedding_output, labert_config, input_ids, input_mask,
start_positions, end_positions,
num_labels,
label_positions, label_weights, is_training,
target_logits, noise_epsilon, step_size):
z = tf.random.normal(tf.shape(embedding_output)) * noise_epsilon
with tf.compat.v1.variable_scope("bert", reuse=True):
with tf.compat.v1.variable_scope("embeddings"):
adv_embedding_output = embedding_output + z
with tf.compat.v1.variable_scope("encoder"):
attention_mask = modeling.create_attention_mask_from_input_mask(
input_ids, input_mask)
all_encoder_layers = modeling_labert.transformer_model(
position_embeddings_ids=[start_positions, end_positions],
input_tensor=adv_embedding_output,
attention_mask=attention_mask,
hidden_size=labert_config.hidden_size,
embedding_size=labert_config.embedding_size,
num_hidden_layers=labert_config.num_hidden_layers,
num_attention_heads=labert_config.num_attention_heads,
intermediate_size=labert_config.intermediate_size,
intermediate_act_fn=modeling.get_activation(labert_config.hidden_act),
hidden_dropout_prob=labert_config.hidden_dropout_prob,
attention_probs_dropout_prob=labert_config.attention_probs_dropout_prob,
initializer_range=labert_config.initializer_range,
do_share_parameter_across_layers=False,
do_return_all_layers=True,
do_return_attention_maps=False,
compute_type=tf.float32)
adv_output_layer = tf.cast(all_encoder_layers[-1], tf.float32)
adv_output_layer = gather_indexes(adv_output_layer, label_positions)
hidden_size = adv_output_layer.shape[-1].value
root_scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(root_scope, reuse=True):
output_weights = tf.compat.v1.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.compat.v1.variable_scope("loss", reuse=True):
if is_training:
adv_output_layer = tf.nn.dropout(adv_output_layer, rate=0.1)
adv_logits = tf.matmul(adv_output_layer, output_weights, transpose_b=True)
adv_logits = tf.nn.bias_add(adv_logits, output_bias)
label_weights = tf.reshape(label_weights, [-1])
adv_loss = stable_ce_kl(adv_logits, tf.stop_gradient(target_logits))
adv_loss = tf.reshape(adv_loss, [-1])
adv_loss = tf.reduce_sum(adv_loss * label_weights) / tf.reduce_sum(label_weights)
delta_grad = tf.compat.v1.gradients(adv_loss, adv_embedding_output)[0]
norm = tf.norm(delta_grad)
is_corrupted = tf.math.logical_or(tf.math.is_inf(norm), tf.math.is_nan(norm))
delta_grad = delta_grad / (tf.math.reduce_max(tf.math.abs(delta_grad), axis=-1, keepdims=True) + 1e-6)
with tf.compat.v1.variable_scope("bert", reuse=True):
with tf.compat.v1.variable_scope("embeddings"):
adv_embedding_output2 = embedding_output + tf.stop_gradient(delta_grad * step_size)
with tf.compat.v1.variable_scope("encoder"):
all_encoder_layers2 = modeling_labert.transformer_model(
input_tensor=adv_embedding_output2,
attention_mask=attention_mask,
position_embeddings_ids=[start_positions, end_positions],
hidden_size=labert_config.hidden_size,
embedding_size=labert_config.embedding_size,
num_hidden_layers=labert_config.num_hidden_layers,
num_attention_heads=labert_config.num_attention_heads,
intermediate_size=labert_config.intermediate_size,
intermediate_act_fn=modeling.get_activation(labert_config.hidden_act),
hidden_dropout_prob=labert_config.hidden_dropout_prob,
attention_probs_dropout_prob=labert_config.attention_probs_dropout_prob,
initializer_range=labert_config.initializer_range,
do_share_parameter_across_layers=False,
do_return_all_layers=True,
do_return_attention_maps=False,
compute_type=tf.float32)
adv_output_layer2 = tf.cast(all_encoder_layers2[-1], tf.float32)
adv_output_layer2 = gather_indexes(adv_output_layer2, label_positions)
with tf.compat.v1.variable_scope("loss", reuse=True):
if is_training:
adv_output_layer2 = tf.nn.dropout(adv_output_layer2, rate=0.1)
adv_logits2 = tf.matmul(adv_output_layer2, output_weights, transpose_b=True)
adv_logits2 = tf.nn.bias_add(adv_logits2, output_bias)
adv_loss2 = sym_ce_kl_loss(adv_logits2, target_logits)
adv_loss2 = tf.reshape(adv_loss2, [-1])
adv_loss2 = tf.reduce_sum(adv_loss2 * label_weights) / tf.reduce_sum(label_weights)
return tf.cond(is_corrupted, lambda: tf.constant(0.), lambda: adv_loss2)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_unlabeled_examples(self, data_dir):
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self, data_dir):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def set_labels(self, labels):
assert isinstance(labels, list)
self._labels = labels
def get_num_token_stats(self, data_dir):
raise NotImplementedError()
def has_train_file(self, data_dir):
return os.path.exists(os.path.join(data_dir, TRAIN_FILE_NAME))
def has_dev_file(self, data_dir):
return os.path.exists(os.path.join(data_dir, DEV_FILE_NAME))
def has_test_file(self, data_dir):
return os.path.exists(os.path.join(data_dir, TEST_FILE_NAME))
def has_unlabeled_file(self, data_dir):
return os.path.exists(os.path.join(data_dir, UNLABELED_FILE_NAME))
class SequenceLabelingProcessor(DataProcessor):
def __init__(self, tokenizer=None):
self._labels = None
self.tokenizer = tokenizer
@staticmethod
def get_raw_data(input_file):
"""Reads a BIO data. block-wise"""
examples = []
with tf.io.gfile.GFile(input_file, 'r') as f:
words, labels = [], []
for line in f:
line = line.strip()
if len(line) == 0:
if len(words) > 0:
examples.append((words, labels))
words, labels = [], []
else:
fields = line.strip().split()
# If the inputs are a list of words -- it's OK
# If the inputs are a list of word, label pair -- it's also OK.
# test/unlabeled data is handled by _create_examples
word, label = fields[0], fields[-1]
word = tokenization.convert_to_unicode(word)
label = tokenization.convert_to_unicode(label)
words.append(word)
labels.append(label)
if len(words) > 0:
examples.append((words, labels))
return examples
def get_train_examples(self, data_dir):
return self._create_examples(
self.get_raw_data(os.path.join(data_dir, TRAIN_FILE_NAME)), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self.get_raw_data(os.path.join(data_dir, DEV_FILE_NAME)), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self.get_raw_data(os.path.join(data_dir, TEST_FILE_NAME)), "test")
def get_unlabeled_examples(self, data_dir):
return self._create_examples(
self.get_raw_data(os.path.join(data_dir, UNLABELED_FILE_NAME)), "unlabeled")
def _create_examples(self, raw_data, set_type):
examples = []
for (i, (words, labels)) in enumerate(raw_data):
guid = "%s-%s" % (set_type, i)
if set_type == "test" or set_type == "unlabeled":
labels = None
examples.append(InputExample(guid=guid, words=words, labels=labels))
return examples
def get_labels(self, data_dir):
examples = self.get_train_examples(data_dir)
labels_set = set()
for example in examples:
labels_set.update(example.labels)
self._labels = [label for label in sorted(labels_set)]
return self._labels
def get_num_token_stats(self, data_dir):
examples = []
if self.has_train_file(data_dir):
examples.extend(self.get_train_examples(data_dir))
if self.has_dev_file(data_dir):
examples.extend(self.get_dev_examples(data_dir))
if self.has_test_file(data_dir):
examples.extend(self.get_test_examples(data_dir))
numbers = []
for example in examples:
length = 2
for word in example.words:
if self.tokenizer:
length += len(self.tokenizer.tokenize(word))
else:
length += len(tokenization.convert_to_unicode(word))
numbers.append(length)
numbers = np.array(numbers)
numbers.sort()
token_stats = {
"ave": np.mean(numbers),
"median": np.median(numbers),
"top80": np.percentile(numbers, 80),
"top90": np.percentile(numbers, | |
<filename>agml/_internal/preprocess.py
# Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing code for AgML public data sources.
This file stores the preprocessing code used to preprocess a public
dataset when added to AgML's public data sources.
If you want to use this preprocessing code, run `pip install agml[dev]`
to install the necessary preprocessing packages.
"""
import os
import sys
import json
import glob
import shutil
import csv
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from agml.utils.logging import tqdm
from agml.utils.io import create_dir, nested_dir_list, get_dir_list, get_file_list
from agml.utils.data import load_public_sources
from agml._internal.process_utils import (
read_txt_file, get_image_info, get_label2id,
convert_bbox_to_coco, get_coco_annotation_from_obj, convert_xmls_to_cocojson,
mask_annotation_per_bbox, move_segmentation_dataset,
create_sub_masks, create_sub_mask_annotation_per_bbox
)
class PublicDataPreprocessor(object):
"""Internal data preprocessing class.
Parameters
----------
data_dir : str
The directory with a folder `original` and `processed` to hold
the original and processed datasets, respectively.
"""
def __init__(self, data_dir):
self.data_dir = os.path.abspath(data_dir)
self.data_original_dir = os.path.join(self.data_dir, 'original')
self.data_processed_dir = os.path.join(self.data_dir, 'processed')
self.data_sources = load_public_sources()
def preprocess(self, dataset_name):
"""Preprocesses the provided dataset.
Parameters
----------
dataset_name : str
name of dataset to preprocess
"""
getattr(self, dataset_name)(dataset_name)
def bean_disease_uganda(self, dataset_name):
# Get the dataset classes and paths
base_path = os.path.join(self.data_original_dir, dataset_name)
dirs = ['train', 'validation', 'test']
classes = sorted(os.listdir(os.path.join(base_path, dirs[0])))[1:]
# Construct output directories
output = os.path.join(self.data_processed_dir, dataset_name)
os.makedirs(output, exist_ok = True)
for cls in classes:
os.makedirs(os.path.join(output, cls), exist_ok = True)
# Move the dataset
for dir_ in dirs:
for cls in classes:
path = os.path.join(base_path, dir_, cls)
for p in os.listdir(path):
if p.endswith('jpg') or p.endswith('png'):
img = os.path.join(base_path, dir_, cls, p)
shutil.copyfile(img, os.path.join(output, cls, p))
def leaf_counting_denmark(self, dataset_name):
pass
def plant_seedlings_aarhus(self, dataset_name):
pass
def crop_weeds_greece(self, dataset_name):
pass
def rangeland_weeds_australia(self, dataset_name):
# Get the file information.
dataset_dir = os.path.join(self.data_original_dir, dataset_name)
images = get_file_list(os.path.join(dataset_dir, 'images'))
df = pd.read_csv(os.path.join(dataset_dir, 'labels.csv'))
# Construct the new structure.
processed_dir = os.path.join(self.data_processed_dir, dataset_name)
unique_labels = np.unique(df['Species'])
for unique_label in unique_labels:
os.makedirs(os.path.join(
processed_dir, unique_label.title()), exist_ok = True)
for file in tqdm(images, desc = "Moving Images", file = sys.stdout):
save_dir = df.loc[df['Filename'] == file]['Species'].values[0].title()
shutil.copyfile(
os.path.join(dataset_dir, 'images', file),
os.path.join(processed_dir, save_dir, file)
)
def fruit_detection_worldwide(self, dataset_name):
# Get the dataset directory
dataset_dir = os.path.join(self.data_original_dir, dataset_name, 'datasets')
# Get folder list
dataset_folders = get_dir_list(dataset_dir)
label2id = get_label2id(dataset_folders)
anno_data_all = []
for folder in dataset_folders:
annotations = ['test_RGB.txt', 'train_RGB.txt']
dataset_path = os.path.join(dataset_dir, folder)
# @TODO: Make separate json files for train and test?
for anno_file_name in annotations:
# Read annotations
try:
anno_data = read_txt_file(os.path.join(dataset_path, anno_file_name))
except:
try:
anno_data = read_txt_file(os.path.join(dataset_path, anno_file_name + '.txt'))
except Exception as e:
raise e
# Concat fruit name at head of line
for i, anno in enumerate(anno_data):
# Change to test path if the text file is test
if "test" in anno_file_name and "TRAIN" in anno[0]:
anno_data[i][0] = anno[0].replace("TRAIN", "TEST")
anno_data[i][0] = os.path.join(dataset_path, anno_data[i][0])
anno_data_all += anno_data
# Process annotation files
save_dir_anno = os.path.join(self.data_processed_dir, dataset_name, 'annotations')
create_dir(save_dir_anno)
output_json_file = os.path.join(save_dir_anno, 'instances.json')
general_info = {
"description": "fruits dataset",
"url": "https://drive.google.com/drive/folders/1CmsZb1caggLRN7ANfika8WuPiywo4mBb",
"version": "1.0",
"year": 2018,
"contributor": "<NAME>",
"date_created": "2018/11/12"
}
# Process image files
output_img_path = os.path.join(
self.data_processed_dir, dataset_name, 'images')
create_dir(output_img_path)
convert_bbox_to_coco(
anno_data_all, label2id, output_json_file,
output_img_path, general_info)
def apple_detection_usa(self, dataset_name, fix = False):
# Just a quick fix to clip over-sized bounding boxes.
if fix:
# Load in the annotations.
dataset_dir = os.path.join(self.data_original_dir, dataset_name)
with open(os.path.join(dataset_dir, 'annotations.json'), 'r') as f:
annotations = json.load(f)
# Get the images and all of their heights/widths.
images = annotations['images']
image_id_content_map = {}
for image in images:
image_id_content_map[image['id']] = (image['height'], image['width'])
# Load all of the annotations.
new_annotations = []
for a in annotations['annotations']:
new_a = a.copy()
height, width = image_id_content_map[a['image_id']]
(x, y, w, h) = a['bbox']
x1, y1, x2, y2 = x, y, x + w, y + h
x1 = np.clip(x1, 0, width)
x2 = np.clip(x2, 0, width)
y1 = np.clip(y1, 0, height)
y2 = np.clip(y2, 0, height)
new_a['bbox'] = [int(i) for i in [x1, y1, x2 - x1, y2 - y1]]
new_annotations.append(new_a)
# Save the annotations.
annotations['annotations'] = new_annotations
with open(os.path.join(dataset_dir, 'annotations.json'), 'w') as f:
json.dump(annotations, f)
return
# resize the dataset
resize = 1.0
# Read public_datasources.json to get class information
category_info = self.data_sources[dataset_name]['classes']
labels_str = []
labels_ids = []
for info in category_info:
labels_str.append(category_info[info])
labels_ids.append(int(info))
label2id = dict(zip(labels_str, labels_ids))
# Task 1: Image classification
dataset_dir = os.path.join(self.data_original_dir, dataset_name)
obj_Detection_data = os.path.join(dataset_dir, 'Dataset')
# get folders
plant_folders = nested_dir_list(obj_Detection_data)
# do tasks along folders
anno_data_all = []
for folder in plant_folders:
# Get image file and xml file
full_path = os.path.join(obj_Detection_data,folder)
all_files = get_file_list(full_path)
anno_files = [x for x in all_files if "txt" in x]
for anno_file in anno_files:
anno_line = []
anno_path = os.path.join(full_path, anno_file)
# Opening annotation file
anno_data = read_txt_file(anno_path, delimiter=',')[0]
for i, anno in enumerate(anno_data):
new_anno = [os.path.join(dataset_dir, anno_data[i][0])]
# Add bbox count
# Update image file path to abs path
bbox_cnt = int((len(anno_data[i]) - 1) / 4)
new_anno.append(str(bbox_cnt))
for idx in range(bbox_cnt):
xmin = int(anno[1 + 4 * idx])
ymin = int(anno[1 + 4 * idx+1])
w = int(anno[1 + 4 * idx+2])
h = int(anno[1 + 4 * idx+3])
new_anno.append(str(xmin)) # xmin
new_anno.append(str(ymin)) # ymin
new_anno.append(str(xmin + w)) # xmax
new_anno.append(str(ymin + h)) # ymax
new_anno.append(str(1)) # label
anno_data[i] = new_anno
anno_data_all += anno_data
# Process annotation files
save_dir_anno = os.path.join(self.data_processed_dir, dataset_name, 'annotations')
create_dir(save_dir_anno)
output_json_file = os.path.join(save_dir_anno, 'instances.json')
general_info = {
"description": "apple dataset",
"url": "https://research.libraries.wsu.edu:8443/xmlui/handle/2376/17721",
"version": "1.0",
"year": 2019,
"contributor": "<NAME>, Karkee, <NAME>",
"date_created": "2019/04/20"
}
# Process image files
output_img_path = os.path.join(self.data_processed_dir, dataset_name, 'images')
create_dir(output_img_path)
convert_bbox_to_coco(
anno_data_all,
label2id,
output_json_file,
output_img_path,
general_info, None, None,
get_label_from_folder=False,
resize=resize, add_foldername=True)
def mango_detection_australia(self, dataset_name):
# resize the dataset
resize = 1.0
# Read public_datasources.json to get class information
datasource_file = os.path.join(
os.path.dirname(__file__), "../_assets/public_datasources.json")
with open(datasource_file) as f:
data = json.load(f)
category_info = data[dataset_name]['crop_types']
labels_str = []
labels_ids = []
for info in category_info:
labels_str.append(category_info[info])
labels_ids.append(int(info))
name_converter = dict(zip(["M"], ["mango"])) # src -> dst
label2id = dict(zip(labels_str, labels_ids))
dataset_dir = os.path.join(self.data_original_dir, dataset_name)
ann_dir = os.path.join(dataset_dir, "VOCDevkit/VOC2007/Annotations")
# Get image file and xml file
all_files = get_file_list(ann_dir)
anno_files = [os.path.join(ann_dir, x) for x in all_files if "xml" in x]
img_files = [x.replace(".xml", ".jpg").replace(
"Annotations", "JPEGImages") for x in anno_files]
# Process annotation files
save_dir_anno = os.path.join(
self.data_processed_dir, dataset_name, 'annotations')
create_dir(save_dir_anno)
output_json_file = os.path.join(
save_dir_anno, 'instances.json')
# Process image files
output_img_path = os.path.join(
self.data_processed_dir, dataset_name, 'images')
create_dir(output_img_path)
general_info = {
"description": "MangoYOLO data set",
"url": "https://researchdata.edu.au/mangoyolo-set/1697505",
"version": "1.0",
"year": 2019,
"contributor": "<NAME>, <NAME>, <NAME>, <NAME>",
"date_created": "2019/02/25"
}
convert_xmls_to_cocojson(
general_info,
annotation_paths=anno_files,
img_paths=img_files,
label2id=label2id,
name_converter = name_converter,
output_jsonpath=output_json_file,
output_imgpath = output_img_path,
extract_num_from_imgid=True
)
def cotton_seedling_counting(self, dataset_name):
# Get all of the relevant data
dataset_dir = os.path.join(self.data_original_dir, dataset_name)
image_dir = os.path.join(dataset_dir, 'Images')
images = sorted([os.path.join(image_dir, i) for i in os.listdir(image_dir)])
with open(os.path.join(dataset_dir, 'Images.json'), 'r') as f:
annotations = json.load(f)
# Get all of the unique labels
labels = []
for label_set in annotations['frames'].values():
for individual_set in label_set:
labels.extend(individual_set['tags'])
labels = np.unique(labels).tolist()
label2id = get_label2id(labels) # noqa
# Extract all of the bounding boxes and images
image_data = []
annotation_data = []
valid_paths = [] # some paths are not in the annotations, track the ones which are
for indx, (img_path, annotation) in enumerate(
zip(tqdm(images, file = sys.stdout, desc = "Generating Data"),
annotations['frames'].values())):
image_data.append(get_image_info(img_path, indx))
valid_paths.append(img_path)
for a_set in annotation:
formatted_set = [
a_set['x1'], a_set['y1'], a_set['x2'], a_set['y2'],
label2id[a_set['tags'][0]]]
base_annotation_data = get_coco_annotation_from_obj(formatted_set, a_set['name'])
base_annotation_data['image_id'] = indx + 1
annotation_data.append(base_annotation_data)
# Set up the annotation dictionary
all_annotation_data = {
"images": [], "type": "instances",
"annotations": [], "categories": [],
"info": {
"description": "cotton seedling counting dataset",
"url": "https://figshare.com/s/616956f8633c17ceae9b",
"version": "1.0",
"year": 2019,
"contributor": "<NAME>",
"date_created": "2019/11/23"
}
| |
0.3474924642544, 0.9999999999729492, 2.7050825080149146e-11),
(100, 0.434365580318, 1.0, 1.1316805400125278e-17),
(110, 0.016566053235566584, 5.418491679241947e-15, 0.9999999999999946),
(110, 0.02070756654445823, 9.015166010325563e-10, 0.9999999990984834),
(110, 0.027610088725944303, 1.2616166581384716e-05, 0.9999873838334186),
(110, 0.04141513308891646, 0.012260676886138374, 0.9877393231138616),
(110, 0.08283026617783291, 0.5851128879792205, 0.4148871120207796),
(110, 0.16566053235566583, 0.9958334390971256, 0.004166560902874364),
(110, 0.24849079853349876, 0.9999981940540781, 1.8059459218939107e-06),
(110, 0.33132106471133166, 0.9999999999708863, 2.911366993335675e-11),
(110, 0.41415133088916456, 1.0, 1.360598090677705e-17),
(120, 0.01586078843626604, 4.6172675254482685e-15, 0.9999999999999953),
(120, 0.01982598554533255, 8.209916165276452e-10, 0.9999999991790084),
(120, 0.026434647393776732, 1.206852433035564e-05, 0.9999879314756697),
(120, 0.0396519710906651, 0.012086513441053544, 0.9879134865589465),
(120, 0.0793039421813302, 0.5841935646900176, 0.4158064353099824),
(120, 0.1586078843626604, 0.9958045317953282, 0.004195468204671804),
(120, 0.23791182654399062, 0.9999981547040431, 1.8452959569129297e-06),
(120, 0.3172157687253208, 0.9999999999690551, 3.094487216616905e-11),
(120, 0.396519710906651, 1.0, 1.5825362299251e-17),
(130, 0.015238552621154138, 3.981506971376443e-15, 0.999999999999996),
(130, 0.01904819077644267, 7.54742270424985e-10, 0.9999999992452577),
(130, 0.02539758770192356, 1.1599452167384703e-05, 0.9999884005478327),
(130, 0.03809638155288534, 0.011933357410775438, 0.9880666425892246),
(130, 0.07619276310577068, 0.5833799866886901, 0.41662001331130993),
(130, 0.15238552621154136, 0.9957792304905194, 0.00422076950948053),
(130, 0.22857828931731206, 0.9999981204045577, 1.8795954422729057e-06),
(130, 0.3047710524230827, 0.99999999996742, 3.25800610004164e-11),
(130, 0.3809638155288534, 1.0, 1.7955255730886202e-17),
(140, 0.014684236732535156, 3.467961744829668e-15, 0.9999999999999966),
(140, 0.018355295915668944, 6.99239619609546e-10, 0.9999999993007603),
(140, 0.02447372788755859, 1.1191396098096952e-05, 0.9999888086039019),
(140, 0.03671059183133789, 0.011797290695073515, 0.9882027093049265),
(140, 0.07342118366267578, 0.5826532396068723, 0.4173467603931277),
(140, 0.14684236732535155, 0.995756852946992, 0.004243147053008),
(140, 0.22026355098802733, 0.999998090204374, 1.909795625975354e-06),
(140, 0.2936847346507031, 0.9999999999659516, 3.404848092024851e-11),
(140, 0.3671059183133789, 1.0, 1.9986084638001886e-17),
(150, 0.014186320448093382, 3.0580781142008757e-15, 0.9999999999999969),
(150, 0.017732900560116727, 6.520586846457575e-10, 0.9999999993479414),
(150, 0.023643867413488968, 1.0832191336423938e-05, 0.9999891678086635),
(150, 0.03546580112023345, 0.011675320822642561, 0.9883246791773574),
(150, 0.0709316022404669, 0.5819988398054936, 0.4180011601945064),
(150, 0.1418632044809338, 0.9957368837260597, 0.004263116273940285),
(150, 0.21279480672140072, 0.9999980633804098, 1.9366195902653106e-06),
(150, 0.2837264089618676, 0.9999999999646259, 3.5374145080912415e-11),
(150, 0.35465801120233453, 1.0, 2.191463384552106e-17),
(160, 0.01373584570985685, 2.7263588495283635e-15, 0.9999999999999972),
(160, 0.017169807137321063, 6.114209920922329e-10, 0.999999999388579),
(160, 0.02289307618309475, 1.0513079035720533e-05, 0.9999894869209642),
(160, 0.034339614274642126, 0.011565192341321192, 0.9884348076586789),
(160, 0.06867922854928425, 0.5814054821769084, 0.41859451782309165),
(160, 0.1373584570985685, 0.9957189253833221, 0.004281074616677893),
(160, 0.20603768564785274, 0.9999980393727677, 1.9606272322951503e-06),
(160, 0.274716914197137, 0.9999999999634231, 3.657684300080896e-11),
(160, 0.3433961427464213, 1.0, 2.3741603934688108e-17),
(170, 0.013325727698571058, 2.4533724810548926e-15, 0.9999999999999976),
(170, 0.016657159623213822, 5.760416604228301e-10, 0.9999999994239583),
(170, 0.022209546164285096, 1.0227551704735477e-05, 0.9999897724482952),
(170, 0.033314319246427644, 0.011465135234063905, 0.9885348647659361),
(170, 0.06662863849285529, 0.5808641947136667, 0.41913580528633326),
(170, 0.13325727698571058, 0.9957026660083748, 0.004297333991625284),
(170, 0.19988591547856588, 0.9999980177408346, 1.9822591653617204e-06),
(170, 0.26651455397142115, 0.999999999962327, 3.76729689338356e-11),
(170, 0.3331431924642764, 1.0, 2.5470032163040355e-17),
(180, 0.012950279529029236, 2.225110507195331e-15, 0.9999999999999978),
(180, 0.016187849411286544, 5.449936355876676e-10, 0.9999999994550064),
(180, 0.021583799215048723, 9.970581871390147e-06, 0.9999900294181286),
(180, 0.03237569882257309, 0.011373717001139049, 0.9886262829988609),
(180, 0.06475139764514617, 0.5803677530995353, 0.4196322469004648),
(180, 0.12950279529029235, 0.9956878570219521, 0.004312142978047954),
(180, 0.19425419293543852, 0.9999979981328798, 2.001867120222344e-06),
(180, 0.2590055905805847, 0.9999999999613238, 3.8676185401027866e-11),
(180, 0.32375698822573085, 1.0, 2.710427853720645e-17),
(190, 0.012604876495438469, 2.0315954982057123e-15, 0.999999999999998),
(190, 0.015756095619298086, 5.175952232560829e-10, 0.9999999994824048),
(190, 0.021008127492397447, 9.73808777931441e-06, 0.9999902619122207),
(190, 0.03151219123859617, 0.011289763983004028, 0.9887102360169959),
(190, 0.06302438247719234, 0.5799102654619068, 0.4200897345380932),
(190, 0.1260487649543847, 0.9956742976148064, 0.004325702385193604),
(190, 0.18907314743157705, 0.9999979802645377, 2.0197354622726807e-06),
(190, 0.2520975299087694, 0.999999999960402, 3.959794913718855e-11),
(190, 0.3151219123859617, 1.0, 2.864937871112668e-17),
(200, 0.012285713894275511, 1.8657051000674983e-15, 0.9999999999999981),
(200, 0.015357142367844388, 4.933130219749461e-10, 0.9999999995066869),
(200, 0.020476189823792516, 9.526580286986155e-06, 0.999990473419713),
(200, 0.030714284735688775, 0.011212313840533674, 0.9887876861594663),
(200, 0.06142856947137755, 0.5794868717804904, 0.4205131282195096),
(200, 0.1228571389427551, 0.995661823605658, 0.004338176394341921),
(200, 0.18428570841413267, 0.9999979639032807, 2.0360967193278076e-06),
(200, 0.2457142778855102, 0.999999999959552, 4.0447926822309473e-11),
(200, 0.30714284735688774, 1.0, 3.011063480783186e-17),
(210, 0.01198962908564828, 1.7222468212993649e-15, 0.9999999999999982),
(210, 0.014987036357060349, 4.716495119236842e-10, 0.9999999995283505),
(210, 0.01998271514274713, 9.333100255542873e-06, 0.9999906668997445),
(210, 0.029974072714120698, 0.01114057369556164, 0.9888594263044383),
(210, 0.059948145428241395, 0.5790935223151144, 0.42090647768488565),
(210, 0.11989629085648279, 0.9956502993106433, 0.004349700689356743),
(210, 0.17984443628472419, 0.9999979488570164, 2.051142983575276e-06),
(210, 0.23979258171296558, 0.9999999999587657, 4.123432413049335e-11),
(210, 0.299740727141207, 1.0, 3.1493360655890154e-17),
(220, 0.011713968580366477, 1.597253820133587e-15, 0.9999999999999984),
(220, 0.014642460725458096, 4.5218876343752577e-10, 0.9999999995478113),
(220, 0.01952328096727746, 9.155257709706761e-06, 0.9999908447422903),
(220, 0.029284921450916192, 0.011073880810536216, 0.9889261191894638),
(220, 0.058569842901832384, 0.578726811579811, 0.421273188420189),
(220, 0.11713968580366477, 0.9956396115091443, 0.004360388490855694),
(220, 0.17570952870549716, 0.9999979349655841, 2.065034415900981e-06),
(220, 0.23427937160732953, 0.9999999999580359, 4.1964147106247866e-11),
(220, 0.2928492145091619, 1.0, 3.2802727275448543e-17),
(230, 0.011456487279076742, 1.4875629925071446e-15, 0.9999999999999986),
(230, 0.014320609098845928, 4.3460119911200134e-10, 0.9999999995653988),
(230, 0.0190941454651279, 8.991108881784716e-06, 0.9999910088911182),
(230, 0.028641218197691855, 0.0110116741169428, 0.9889883258830572),
(230, 0.05728243639538371, 0.5783838521279262, 0.4216161478720738),
(230, 0.11456487279076742, 0.995629664896983, 0.00437033510301704),
(230, 0.17184730918615113, 0.9999979220943207, 2.07790567932335e-06),
(230, 0.22912974558153484, 0.9999999999573566, 4.26434107907295e-11),
(230, 0.28641218197691853, 1.0, 3.404367337034118e-17),
(240, 0.011215271058248894, 1.3906448045848905e-15, 0.9999999999999986),
(240, 0.014019088822811117, 4.1862247361792736e-10, 0.9999999995813775),
(240, 0.018692118430414822, 8.83904794417799e-06, 0.9999911609520559),
(240, 0.028038177645622234, 0.010953474765533827, 0.9890465252344661),
(240, 0.05607635529124447, 0.5780621773247729, 0.42193782267522706),
(240, 0.11215271058248893, 0.9956203786130886, 0.0043796213869113365),
(240, 0.1682290658737334, 0.9999979101291312, 2.089870868772373e-06),
(240, 0.22430542116497787, 0.9999999999567227, 4.327730665901717e-11),
(240, 0.2803817764562223, 1.0, 3.522085780829751e-17),
(250, 0.010988676567885483, 1.3044830459729999e-15, 0.9999999999999987),
(250, 0.013735845709856852, 4.040376222081045e-10, 0.9999999995959624),
(250, 0.0183144609464758, 8.697728404554118e-06, 0.9999913022715955),
(250, 0.027471691419713704, 0.010898871937950695, 0.9891011280620493),
(250, 0.05494338283942741, 0.5777596655495548, 0.4222403344504452),
(250, 0.10988676567885482, 0.9956116835530071, 0.004388316446992907),
(250, 0.16483014851828223, 0.9999978989726668, 2.101027333222321e-06),
(250, 0.21977353135770963, 0.9999999999561296, 4.3870337779742716e-11),
(250, 0.27471691419713706, 1.0, 3.633863900395765e-17))
# Values produced by the Marsaglia & Tsang code (without its approximation).
# With about 14 digits of accuracy. See doi:10.18637/jss.v008.i18.
# The statistics are 1/5, 1/4, ..., 4, 5 times the mean for each sample count.
marsaglia_values = (
(10, .054943382839427402, .32382132520535754e-13),
(10, .068679228549284252, .19215529170715173e-7),
(10, .091572304732378998, .57293322784834989e-4),
(10, .1373584570985685, .021523322622349104),
(10, .27471691419713701, .63156658894271622),
(10, .54943382839427402, .99768559162369053),
(10, .82415074259141097, .9999999420800233),
(10, 1.098867656788548, 1),
(10, 1.3735845709856851, 1),
(20, .038850838587087703, .17228437581696657e-12),
(20, .048563548233859631, .71045779730765533e-8),
(20, .064751397645146175, .3667930830876966e-4),
(20, .097127096467719262, .017985829271761268),
(20, .19425419293543852, .6127380411231369),
(20, .38850838587087705, .996854534409372),
(20, .58276257880631555, .99999944503234328),
(20, .7770167717417541, .99999999999970091),
(20, .97127096467719265, 1.0000000000000004),
(30, .031721576872532083, .60940362155938466e-13),
(30, .039651971090665102, .45636258941820899e-8),
(30, .052869294787553471, .27728653822523069e-4),
(30, .079303942181330203, .016061192511760566),
(30, .15860788436266041, .6041862525220193),
(30, .31721576872532081, .99650770235258646),
(30, .47582365308798125, .9999990779543303),
(30, .63443153745064162, .99999999999695954),
(30, .793039421813302, .99999999999999878),
(40, .027471691419713701, .44890656943321606e-13),
(40, .034339614274642126, .29755417909055264e-8),
(40, .045786152366189499, .2305894059904745e-4),
(40, .068679228549284252, .015007081301604349),
(40, .1373584570985685, .59896461546470081),
(40, .27471691419713701, .9963091952001456),
(40, .41207537129570548, .99999883398748979),
(40, .54943382839427402, .99999999999274536),
(40, .68679228549284255, 1),
(50, .024571427788551019, .26278753848717178e-13),
(50, .030714284735688775, .22809622413858878e-8),
(50, .040952379647585031, .19914256263711656e-4),
(50, .06142856947137755, .014261793427981397),
(50, .1228571389427551, .59534554170231924),
(50, .2457142778855102, .99617770116110449),
(50, .36857141682826533, .99999866231263879),
(50, .4914285557710204, .99999999998842937),
(50, .61428569471377548, .99999999999999911),
(60, .022430542116497788, .17306629728992625e-13),
(60, .028038177645622234, .18255819850919381e-8),
(60, .037384236860829645, .17723225033102087e-4),
(60, .056076355291244467, .01372402561964047),
(60, .11215271058248893, .59264451465977663),
(60, .22430542116497787, .99608285271783381),
(60, .3364581317474668, .99999853494355739),
(60, .44861084232995574, .99999999998448241),
(60, .56076355291244462, .99999999999999933),
(70, .020766646740248397, .12832664352687508e-13),
(70, .025958308425310498, .15216690215385908e-8),
(70, .034611077900413997, .16175881497136119e-4),
(70, .051916616850620996, .013307259395076043),
(70, .10383323370124199, .59052770891543271),
(70, .20766646740248398, .99601049965278143),
(70, .31149970110372599, .99999843646804842),
(70, .41533293480496797, .99999999998099642),
(70, .51916616850621, 1),
(80, .019425419293543852, .99850706766473548e-14),
(80, .024281774116929816, .13025671037776924e-8),
(80, .032375698822573087, .14993252388149402e-4),
(80, .048563548233859631, .012970770619725519),
(80, .097127096467719262, .58880994038898993),
(80, .19425419293543852, .99595307627034479),
(80, .29138128940315777, .99999835785791991),
(80, .38850838587087705, .99999999997795064),
(80, .48563548233859632, 1.0000000000000004),
(90, .018314460946475802, .7927920543272791e-14),
(90, .022893076183094753, .11317177474311404e-8),
(90, .030524101577459672, .14047212094273758e-4),
(90, .045786152366189506, .012693962870558333),
(90, .091572304732379012, .58737920927504295),
(90, .18314460946475802, .99590613589573118),
(90, .27471691419713706, .9999982935017897),
(90, .36628921892951605, .99999999997528766),
(90, .45786152366189503, .99999999999999933),
(100, .01737462321272, .64737036819694295e-14),
(100, .0217182790159, .10020188620082422e-8),
(100, .028957705354533333, .13267307943689059e-4),
(100, .0434365580318, .012460859367854047),
(100, .0868731160636, .58616322010719946),
(100, .1737462321272, .99586687722384593),
(100, .26061934819080002, .99999823973299984),
(100, .3474924642544, .99999999997294875),
(100, .43436558031799999, .99999999999999956),
(110, .016566053235566584, .54184916792419637e-14),
(110, .020707566544458229, .90151660103256042e-9),
(110, .027610088725944306, .12616166581384756e-4),
(110, .041415133088916457, .012260676886138374),
(110, .082830266177832915, .58511288797921979),
(110, .16566053235566583, .99583343909712463),
(110, .24849079853349876, .99999819405407697),
(110, .33132106471133166, .99999999997088418),
(110, .41415133088916456, .99999999999999856),
(120, .015860788436266041, .4617267525448274e-14),
(120, .019825985545332551, .82099161652764474e-9),
(120, .026434647393776736, .12068524330355696e-4),
(120, .039651971090665102, .012086513441053538),
(120, .079303942181330203, .58419356469001682),
(120, .15860788436266041, .99580453179532702),
(120, .23791182654399062, .99999815470404096),
(120, .31721576872532081, .99999999996905387),
(120, .396519710906651, .999999999999999),
(130, .015238552621154136, .39815069713764172e-14),
(130, .01904819077644267, .75474227042498141e-9),
(130, .02539758770192356, .11599452167384669e-4),
(130, .03809638155288534, .011933357410775435),
(130, .076192763105770681, .58337998668868973),
(130, .15238552621154136, .9957792304905192),
(130, .22857828931731206, .99999812040455627),
(130, .30477105242308272, .99999999996741806),
(130, .38096381552885339, 1.0000000000000004),
(140, .014684236732535156, .34679617448296338e-14),
(140, .018355295915668944, .69923961960954503e-9),
(140, .024473727887558593, .11191396098097002e-4),
(140, .036710591831337888, .011797290695073522),
(140, .073421183662675776, .58265323960687143),
(140, .14684236732535155, .99575685294699101),
(140, .22026355098802733, .99999809020437347),
(140, .29368473465070311, .99999999996595057),
(140, .36710591831337891, .99999999999999856),
(150, .014186320448093381, .30580781142008584e-14),
(150, .017732900560116727, .65205868464575603e-9),
(150, .023643867413488968, .10832191336423951e-4),
(150, .035465801120233453, .011675320822642552),
(150, .070931602240466907, .5819988398054935),
(150, .14186320448093381, .9957368837260604),
(150, .21279480672140072, .99999806338040875),
(150, .28372640896186763, .99999999996462352),
(150, .35465801120233453, .99999999999999922),
(160, .01373584570985685, .27263588495283679e-14),
(160, .017169807137321063, .6114209920922327e-9),
(160, .022893076183094749, .10513079035720496e-4),
(160, .034339614274642126, .011565192341321183),
(160, .068679228549284252, .58140548217690802),
(160, .1373584570985685, .99571892538332352),
(160, .20603768564785274, .99999803937276766),
(160, .27471691419713701, .99999999996342326),
(160, .34339614274642127, 1.0000000000000002),
(170, .013325727698571058, .24533724810549112e-14),
(170, .016657159623213822, .57604166042282698e-9),
(170, .022209546164285096, .10227551704735462e-4),
(170, .033314319246427644, .011465135234063907),
(170, .066628638492855288, .58086419471366657),
(170, .13325727698571058, .99570266600837387),
(170, .19988591547856588, .99999801774083474),
(170, .26651455397142115, .99999999996232791),
(170, .33314319246427643, .99999999999999867),
(180, .012950279529029236, .2225110507195345e-14),
(180, .016187849411286544, .54499363558766639e-9),
(180, .021583799215048726, .99705818713901797e-5),
(180, .032375698822573087, .011373717001139049),
(180, .064751397645146175, .58036775309953559),
(180, .12950279529029235, .99568785702195206),
(180, .19425419293543852, .99999799813287893),
(180, .2590055905805847, .99999999996132227),
(180, .32375698822573085, .99999999999999822),
(190, .012604876495438469, .2031595498205708e-14),
(190, .015756095619298086, .51759522325608342e-9),
(190, .021008127492397447, .97380877793144015e-5),
(190, .031512191238596172, .011289763983004037),
(190, .063024382477192345, .57991026546190738),
(190, .12604876495438469, .99567429761480519),
(190, .18907314743157705, .99999798026453857),
(190, .25209752990876938, .99999999996040223),
(190, .31512191238596171, .99999999999999878),
(200, .01228571389427551, .18657051000674754e-14),
(200, .015357142367844388, .49331302197494576e-9),
(200, .020476189823792516, .95265802869861482e-5),
(200, .030714284735688775, .011212313840533648),
(200, .06142856947137755, .57948687178048996),
(200, .1228571389427551, .99566182360565525),
(200, .18428570841413267, .99999796390327944),
(200, .2457142778855102, .99999999995954969),
(200, .30714284735688774, .999999999999997),
(200, .01228571389427551, .18657051000674754e-14),
(200, .015357142367844388, .49331302197494576e-9),
(200, .020476189823792516, .95265802869861482e-5),
(200, .030714284735688775, .011212313840533648),
(200, .06142856947137755, .57948687178048996),
(200, .1228571389427551, .99566182360565525),
(200, .18428570841413267, .99999796390327944),
(200, .2457142778855102, .99999999995954969),
(200, .30714284735688774, .999999999999997),
(210, .011989629085648278, .17222468212993471e-14),
(210, .014987036357060349, .47164951192368483e-9),
(210, .019982715142747132, .93331002555428238e-5),
(210, .029974072714120698, .011140573695561642),
(210, .059948145428241395, .57909352231511191),
(210, .11989629085648279, .99565029931064097),
(210, .17984443628472419, .99999794885701365),
(210, .23979258171296558, .9999999999587631),
(210, .29974072714120698, .99999999999999811),
(220, .011713968580366477, .15972538201335833e-14),
(220, .014642460725458096, .45218876343752385e-9),
(220, .01952328096727746, .91552577097067593e-5),
(220, .029284921450916192, .011073880810536183),
(220, .058569842901832384, .57872681157980899),
(220, .11713968580366477, .9956396115091426),
(220, .17570952870549716, .99999793496558276),
(220, .23427937160732953, .99999999995803535),
(220, .2928492145091619, .99999999999999789),
(230, .011456487279076742, .14875629925071405e-14),
(230, .014320609098845928, .4346011991119999e-9),
(230, .019094145465127905, .89911088817847063e-5),
(230, .028641218197691855, .011011674116942765),
(230, .057282436395383711, .57838385212792487),
(230, .11456487279076742, .99562966489697979),
(230, .17184730918615113, .99999792209431815),
(230, .22912974558153484, .99999999995735223),
(230, .28641218197691853, .99999999999999789),
(240, .011215271058248894, .13906448045848864e-14),
(240, .014019088822811117, .41862247361792715e-9),
(240, .018692118430414822, .88390479441779598e-5),
(240, .028038177645622234, .010953474765533813),
(240, .056076355291244467, .57806217732477083),
(240, .11215271058248893, .99562037861308472),
(240, .1682290658737334, .99999791012912809),
(240, .22430542116497787, .99999999995671951),
(240, .28038177645622231, .99999999999999722),
(250, .010988676567885481, .13044830459729825e-14),
(250, .013735845709856852, .40403762220810372e-9),
(250, .018314460946475802, .86977284045540992e-5),
(250, .027471691419713704, .010898871937950664),
(250, .054943382839427408, .5777596655495536),
(250, .10988676567885482, .99561168355300456),
(250, .16483014851828223, .99999789897266367),
(250, | |
<filename>rpc/train_eval.py<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for training and evaluating RPC agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import rpc_agent
import rpc_utils
from six.moves import range
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.agents import data_converter
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import greedy_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_multi_string('gin_file', None, 'Path to the trainer config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding to pass through.')
FLAGS = flags.FLAGS
DEFAULT_KL_CONSTRAINT = 1.0
DEFAULT_DUAL_LR = 1.0
@gin.configurable
def train_eval(
root_dir,
env_name='HalfCheetah-v2',
num_iterations=3000000,
actor_fc_layers=(),
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=(256, 256),
initial_collect_steps=10000,
collect_steps_per_iteration=1,
replay_buffer_capacity=1000000,
# Params for target update
target_update_tau=0.005,
target_update_period=1,
# Params for train
train_steps_per_iteration=1,
batch_size=256,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
alpha_learning_rate=3e-4,
dual_learning_rate=3e-4,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=0.1,
gradient_clipping=None,
use_tf_functions=True,
# Params for eval
num_eval_episodes=30,
eval_interval=10000,
# Params for summaries and logging
train_checkpoint_interval=50000,
policy_checkpoint_interval=50000,
rb_checkpoint_interval=50000,
log_interval=1000,
summary_interval=1000,
summaries_flush_secs=10,
debug_summaries=False,
summarize_grads_and_vars=False,
eval_metrics_callback=None,
latent_dim=10,
log_prob_reward_scale=0.0,
predictor_updates_encoder=False,
predict_prior=True,
use_recurrent_actor=False,
rnn_sequence_length=20,
clip_max_stddev=10.0,
clip_min_stddev=0.1,
clip_mean=30.0,
predictor_num_layers=2,
use_identity_encoder=False,
identity_encoder_single_stddev=False,
kl_constraint=1.0,
eval_dropout=(),
use_residual_predictor=True,
gym_kwargs=None,
predict_prior_std=True,
random_seed=0,):
"""A simple train and eval for SAC."""
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
if use_recurrent_actor:
batch_size = batch_size // rnn_sequence_length
root_dir = os.path.expanduser(root_dir)
train_dir = os.path.join(root_dir, 'train')
eval_dir = os.path.join(root_dir, 'eval')
train_summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=summaries_flush_secs * 1000)
train_summary_writer.set_as_default()
eval_summary_writer = tf.compat.v2.summary.create_file_writer(
eval_dir, flush_millis=summaries_flush_secs * 1000)
global_step = tf.compat.v1.train.get_or_create_global_step()
with tf.compat.v2.summary.record_if(
lambda: tf.math.equal(global_step % summary_interval, 0)):
_build_env = functools.partial(suite_gym.load, environment_name=env_name, # pylint: disable=invalid-name
gym_env_wrappers=(), gym_kwargs=gym_kwargs)
tf_env = tf_py_environment.TFPyEnvironment(_build_env())
eval_vec = [] # (name, env, metrics)
eval_metrics = [
tf_metrics.AverageReturnMetric(buffer_size=num_eval_episodes),
tf_metrics.AverageEpisodeLengthMetric(buffer_size=num_eval_episodes)
]
eval_tf_env = tf_py_environment.TFPyEnvironment(_build_env())
name = ''
eval_vec.append((name, eval_tf_env, eval_metrics))
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
if latent_dim == 'obs':
latent_dim = observation_spec.shape[0]
def _activation(t):
t1, t2 = tf.split(t, 2, axis=1)
low = -np.inf if clip_mean is None else -clip_mean
high = np.inf if clip_mean is None else clip_mean
t1 = rpc_utils.squash_to_range(t1, low, high)
if clip_min_stddev is None:
low = -np.inf
else:
low = tf.math.log(tf.exp(clip_min_stddev) - 1.0)
if clip_max_stddev is None:
high = np.inf
else:
high = tf.math.log(tf.exp(clip_max_stddev) - 1.0)
t2 = rpc_utils.squash_to_range(t2, low, high)
return tf.concat([t1, t2], axis=1)
if use_identity_encoder:
assert latent_dim == observation_spec.shape[0]
obs_input = tf.keras.layers.Input(observation_spec.shape)
zeros = 0.0 * obs_input[:, :1]
stddev_dim = 1 if identity_encoder_single_stddev else latent_dim
pre_stddev = tf.keras.layers.Dense(stddev_dim, activation=None)(zeros)
ones = zeros + tf.ones((1, latent_dim))
pre_stddev = pre_stddev * ones # Multiply to broadcast to latent_dim.
pre_mean_stddev = tf.concat([obs_input, pre_stddev], axis=1)
output = tfp.layers.IndependentNormal(latent_dim)(pre_mean_stddev)
encoder_net = tf.keras.Model(inputs=obs_input,
outputs=output)
else:
encoder_net = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(
tfp.layers.IndependentNormal.params_size(latent_dim),
activation=_activation,
kernel_initializer='glorot_uniform'),
tfp.layers.IndependentNormal(latent_dim),
])
# Build the predictor net
obs_input = tf.keras.layers.Input(observation_spec.shape)
action_input = tf.keras.layers.Input(action_spec.shape)
class ConstantIndependentNormal(tfp.layers.IndependentNormal):
"""A keras layer that always returns N(0, 1) distribution."""
def call(self, inputs):
loc_scale = tf.concat([
tf.zeros((latent_dim,)),
tf.fill((latent_dim,), tf.math.log(tf.exp(1.0) - 1))
],
axis=0)
# Multiple by [B x 1] tensor to broadcast batch dimension.
loc_scale = loc_scale * tf.ones_like(inputs[:, :1])
return super(ConstantIndependentNormal, self).call(loc_scale)
if predict_prior:
z = encoder_net(obs_input)
if not predictor_updates_encoder:
z = tf.stop_gradient(z)
za = tf.concat([z, action_input], axis=1)
if use_residual_predictor:
za_input = tf.keras.layers.Input(za.shape[1])
loc_scale = tf.keras.Sequential(
predictor_num_layers * [tf.keras.layers.Dense(256, activation='relu')] + [ # pylint: disable=line-too-long
tf.keras.layers.Dense(
tfp.layers.IndependentNormal.params_size(latent_dim),
activation=_activation,
kernel_initializer='zeros'),
])(za_input)
if predict_prior_std:
combined_loc_scale = tf.concat([
loc_scale[:, :latent_dim] + za_input[:, :latent_dim],
loc_scale[:, latent_dim:]
],
axis=1)
else:
# Note that softplus(log(e - 1)) = 1.
combined_loc_scale = tf.concat([
loc_scale[:, :latent_dim] + za_input[:, :latent_dim],
tf.math.log(np.e - 1) * tf.ones_like(loc_scale[:, latent_dim:])
],
axis=1)
dist = tfp.layers.IndependentNormal(latent_dim)(combined_loc_scale)
output = tf.keras.Model(inputs=za_input, outputs=dist)(za)
else:
assert predict_prior_std
output = tf.keras.Sequential(
predictor_num_layers * [tf.keras.layers.Dense(256, activation='relu')] + # pylint: disable=line-too-long
[tf.keras.layers.Dense(
tfp.layers.IndependentNormal.params_size(latent_dim),
activation=_activation,
kernel_initializer='zeros'),
tfp.layers.IndependentNormal(latent_dim),
])(za)
else:
# scale is chosen by inverting the softplus function to equal 1.
if len(obs_input.shape) > 2:
input_reshaped = tf.reshape(
obs_input, [-1, tf.math.reduce_prod(obs_input.shape[1:])])
# Multiply by [B x 1] tensor to broadcast batch dimension.
za = tf.zeros(latent_dim + action_spec.shape[0],) * tf.ones_like(input_reshaped[:, :1]) # pylint: disable=line-too-long
else:
# Multiple by [B x 1] tensor to broadcast batch dimension.
za = tf.zeros(latent_dim + action_spec.shape[0],) * tf.ones_like(obs_input[:, :1]) # pylint: disable=line-too-long
output = tf.keras.Sequential([
ConstantIndependentNormal(latent_dim),
])(
za)
predictor_net = tf.keras.Model(inputs=(obs_input, action_input),
outputs=output)
if use_recurrent_actor:
ActorClass = rpc_utils.RecurrentActorNet # pylint: disable=invalid-name
else:
ActorClass = rpc_utils.ActorNet # pylint: disable=invalid-name
actor_net = ActorClass(
input_tensor_spec=observation_spec,
output_tensor_spec=action_spec,
encoder=encoder_net,
predictor=predictor_net,
fc_layers=actor_fc_layers)
critic_net = rpc_utils.CriticNet(
(observation_spec, action_spec),
observation_fc_layer_params=critic_obs_fc_layers,
action_fc_layer_params=critic_action_fc_layers,
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
critic_net_2 = None
target_critic_net_1 = None
target_critic_net_2 = None
tf_agent = rpc_agent.RpAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
critic_network_2=critic_net_2,
target_critic_network=target_critic_net_1,
target_critic_network_2=target_critic_net_2,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=td_errors_loss_fn,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
gradient_clipping=gradient_clipping,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=global_step)
dual_optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=dual_learning_rate)
tf_agent.initialize()
# Make the replay buffer.
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
replay_observer = [replay_buffer.add_batch]
train_metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.EnvironmentSteps(),
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes, batch_size=tf_env.batch_size),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=num_eval_episodes, batch_size=tf_env.batch_size),
]
kl_metric = rpc_utils.AverageKLMetric(
encoder=encoder_net,
predictor=predictor_net,
batch_size=tf_env.batch_size)
eval_policy = greedy_policy.GreedyPolicy(tf_agent.policy)
initial_collect_policy = random_tf_policy.RandomTFPolicy(
tf_env.time_step_spec(), tf_env.action_spec())
collect_policy = tf_agent.collect_policy
checkpoint_items = {
'ckpt_dir': train_dir,
'agent': tf_agent,
'global_step': global_step,
'metrics': metric_utils.MetricsGroup(train_metrics, 'train_metrics'),
'dual_optimizer': dual_optimizer,
}
train_checkpointer = common.Checkpointer(**checkpoint_items)
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'policy'),
policy=eval_policy,
global_step=global_step)
rb_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'replay_buffer'),
max_to_keep=1,
replay_buffer=replay_buffer)
train_checkpointer.initialize_or_restore()
rb_checkpointer.initialize_or_restore()
initial_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=replay_observer + train_metrics,
num_steps=initial_collect_steps,
transition_observers=[kl_metric])
collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=replay_observer + train_metrics,
num_steps=collect_steps_per_iteration,
transition_observers=[kl_metric])
if use_tf_functions:
initial_collect_driver.run = common.function(initial_collect_driver.run)
collect_driver.run = common.function(collect_driver.run)
tf_agent.train = common.function(tf_agent.train)
if replay_buffer.num_frames() == 0:
# Collect initial replay data.
logging.info(
'Initializing replay buffer by collecting experience for %d steps '
'with a random policy.', initial_collect_steps)
initial_collect_driver.run()
for name, eval_tf_env, eval_metrics in eval_vec:
results = metric_utils.eager_compute(
eval_metrics,
eval_tf_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
summary_writer=eval_summary_writer,
summary_prefix='Metrics-%s' % name,
)
if eval_metrics_callback is not None:
eval_metrics_callback(results, global_step.numpy())
metric_utils.log_metrics(eval_metrics, prefix=name)
time_step = None
policy_state = collect_policy.get_initial_state(tf_env.batch_size)
timed_at_step = global_step.numpy()
time_acc = 0
train_time_acc = 0
env_time_acc = 0
if use_recurrent_actor: # default from sac/train_eval_rnn.py
num_steps = rnn_sequence_length + 1
def _filter_invalid_transition(trajectories, unused_arg1):
return tf.reduce_all(~trajectories.is_boundary()[:-1])
tf_agent._as_transition = data_converter.AsTransition( # pylint: disable=protected-access
tf_agent.data_context, squeeze_time_dim=False)
else:
num_steps = 2
def _filter_invalid_transition(trajectories, unused_arg1):
return ~trajectories.is_boundary()[0]
dataset = replay_buffer.as_dataset(
sample_batch_size=batch_size,
num_steps=num_steps).unbatch().filter(_filter_invalid_transition)
dataset = dataset.batch(batch_size).prefetch(5)
# Dataset generates trajectories with shape [Bx2x...]
iterator = iter(dataset)
@tf.function
def train_step():
experience, _ = next(iterator)
prior = predictor_net((experience.observation[:, 0],
experience.action[:, 0]), training=False)
z_next = encoder_net(experience.observation[:, 1], training=False)
# predictor_kl is a vector of size batch_size.
predictor_kl = tfp.distributions.kl_divergence(z_next, prior)
with tf.GradientTape() as tape:
tape.watch(actor_net._log_kl_coefficient) # pylint: disable=protected-access
dual_loss = -1.0 * actor_net._log_kl_coefficient * ( # pylint: disable=protected-access
tf.stop_gradient(tf.reduce_mean(predictor_kl)) - kl_constraint)
dual_grads = tape.gradient(dual_loss, [actor_net._log_kl_coefficient]) # pylint: disable=protected-access
grads_and_vars = list(zip(dual_grads, [actor_net._log_kl_coefficient])) # pylint: disable=protected-access
dual_optimizer.apply_gradients(grads_and_vars)
# Clip the dual variable so exp(log_kl_coef) <= 1e6.
log_kl_coef = tf.clip_by_value(actor_net._log_kl_coefficient, # pylint: disable=protected-access
-1.0 * np.log(1e6), np.log(1e6))
actor_net._log_kl_coefficient.assign(log_kl_coef) # pylint: disable=protected-access
with tf.name_scope('dual_loss'):
tf.compat.v2.summary.scalar(name='dual_loss',
data=tf.reduce_mean(dual_loss),
step=global_step)
tf.compat.v2.summary.scalar(name='log_kl_coefficient',
data=actor_net._log_kl_coefficient, # pylint: disable=protected-access
step=global_step)
z_entropy = z_next.entropy()
log_prob = prior.log_prob(z_next.sample())
with tf.name_scope('rp-metrics'):
common.generate_tensor_summaries('predictor_kl', predictor_kl,
global_step)
common.generate_tensor_summaries('z_entropy', z_entropy, global_step)
common.generate_tensor_summaries('log_prob', log_prob, global_step)
common.generate_tensor_summaries('z_mean', z_next.mean(), global_step)
common.generate_tensor_summaries('z_stddev', z_next.stddev(),
global_step)
common.generate_tensor_summaries('prior_mean', prior.mean(),
global_step)
common.generate_tensor_summaries('prior_stddev', prior.stddev(),
global_step)
if log_prob_reward_scale == 'auto':
coef = tf.stop_gradient(tf.exp(actor_net._log_kl_coefficient)) # pylint: disable=protected-access
else:
coef = log_prob_reward_scale
tf.debugging.check_numerics(
tf.reduce_mean(predictor_kl), 'predictor_kl is inf or nan.')
tf.debugging.check_numerics(coef, 'coef is inf or nan.')
new_reward = experience.reward - coef * predictor_kl[:, None]
experience = experience._replace(reward=new_reward)
return tf_agent.train(experience)
if use_tf_functions:
train_step = common.function(train_step)
# Save the hyperparameters
operative_filename = os.path.join(root_dir, 'operative.gin')
with tf.compat.v1.gfile.Open(operative_filename, 'w') as f:
f.write(gin.operative_config_str())
print(gin.operative_config_str())
global_step_val = global_step.numpy()
while global_step_val < num_iterations:
start_time = time.time()
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state,
)
env_time_acc += time.time() - start_time
train_start_time = time.time()
for _ in range(train_steps_per_iteration):
train_loss = train_step()
train_time_acc += time.time() - train_start_time
time_acc += time.time() - start_time
global_step_val = global_step.numpy()
if global_step_val % log_interval == 0:
logging.info('step = %d, loss | |
files extracted and stored by this packager
In this case ["filesandjats_jats.xml", "filesandjats_epmc.xml"]
:return: list of metadata files
"""
return ["filesandjats_jats.xml", "filesandjats_epmc.xml"]
def url_name(self):
"""
Get the name of the package as it should appear in any content urls
In this case FilesAndJATS
:return: url name
"""
return "FilesAndJATS"
################################################
## Overrids of methods for retriving data from the actual package
def metadata_streams(self):
"""
A generator which yields tuples of metadata file names and data streams
In this handler, this will yield up to 2 metadata streams; for "filesandjats_jats.xml" and "filesandjats_epmc.xml",
in that order, where there is a stream present for that file.
:return: generator for file names/data streams
"""
sources = [("filesandjats_jats.xml", self.jats), ("filesandjats_epmc.xml", self.epmc)]
for n, x in sources:
if x is not None:
yield n, StringIO(x.tostring().decode('utf-8'))
def notification_metadata(self):
"""
Get the notification metadata as extracted from the package
This will extract metadata from both of the JATS XML and the EPMC XML, whichever is present
and merge them before responding.
:return: NotificationMetadata populated
"""
emd = None
jmd = None
# extract all the relevant data from epmc
if self.epmc is not None:
emd = self._epmc_metadata()
# extract all the relevant data from jats
if self.jats is not None:
jmd = self._jats_metadata()
return self._merge_metadata(emd, jmd)
def match_data(self):
"""
Get the match data as extracted from the package
This will extract match data from both of the JATS XML and the EPMC XML, whichever is present
and merge them before responding.
:return: RoutingMetadata populated
"""
match = models.RoutingMetadata()
# extract all the relevant match data from epmc
if self.epmc is not None:
self._epmc_match_data(match)
# extract all the relevant match data from jats
if self.jats is not None:
self._jats_match_data(match)
return match
def convertible(self, target_format):
"""
Checks whether this handler can do the conversion to the target format.
This handler currently supports the following conversion formats:
* http://purl.org/net/sword/package/SimpleZip
* http://purl.org/net/sword/package/OPUS4Zip
* http://purl.org/net/sword/package/ESciDoc
* http://purl.org/net/sword/package/METSDSpaceSIP
* http://purl.org/net/sword/package/METSMODS
:param target_format: target format
:return: True if in the above list, else False
"""
# 2017-03-21 TD : added another zip format (here: OPUS4Zip)
# 2017-05-15 TD : added another two zip formats (here: ESciDoc and METSDSpaceSIP)
# 2017-07-11 TD : added another zip format (here: METSMODS)
return target_format in ["http://purl.org/net/sword/package/SimpleZip",
"http://purl.org/net/sword/package/OPUS4Zip",
"http://purl.org/net/sword/package/ESciDoc",
"http://purl.org/net/sword/package/METSDSpaceSIP",
"http://purl.org/net/sword/package/METSMODS"]
def convert(self, in_path, target_format, out_path):
"""
Convert the file at the specified in_path to a package file of the
specified target_format at the out_path.
You should check first that this target_format is supported via convertible()
This handler currently supports the following conversion formats:
* http://purl.org/net/sword/package/SimpleZip
* http://purl.org/net/sword/package/OPUS4Zip
* http://purl.org/net/sword/package/ESciDoc
* http://purl.org/net/sword/package/METSDSpaceSIP
* http://purl.org/net/sword/package/METSMODS
:param in_path: locally accessible file path to the source package
:param target_format: the format identifier for the format we want to convert to
:param out_path: locally accessible file path for the output to be written
:return: True/False on success/fail
"""
# 2017-03-21 TD : additional handling of a new format (here: OPUS4Zip)
# 2017-05-15 TD : added another two zip formats (here: ESciDoc and METSDSpaceSIP)
# 2017-07-11 TD : added another zip format (here: METSMODS)
if target_format == "http://purl.org/net/sword/package/SimpleZip":
self._simple_zip(in_path, out_path)
return True
elif target_format == "http://purl.org/net/sword/package/OPUS4Zip":
self._opus4_zip(in_path, out_path)
return True
elif target_format == "http://purl.org/net/sword/package/ESciDoc":
self._escidoc_zip(in_path, out_path)
return True
elif target_format == "http://purl.org/net/sword/package/METSDSpaceSIP":
self._metsdspace_zip(in_path, out_path)
return True
elif target_format == "http://purl.org/net/sword/package/METSMODS":
self._metsmods_zip(in_path, out_path)
return True
return False
################################################
## Internal methods
def _simple_zip(self, in_path, out_path):
"""
convert to simple zip
:param in_path:
:param out_path:
:return:
"""
# files and jats are already basically a simple zip, so a straight copy
shutil.copyfile(in_path, out_path)
# 2017-03-21 TD : added an internal method converting to OPUS4 zip format;
# basically by invoking an xslt transformation of the xml metadata
def _opus4_zip(self, in_path, out_path):
"""
convert to OPUS4 zip
:param in_path:
:param out_path:
:return:
"""
# 2017-03-21 TD :
# files and jats are already basically a OPUS4 zip, so a straight copy
# well, almost...
#shutil.copyfile(in_path, out_path)
app.logger.debug("PackageHandler FilesAndJATS._opus4_zip(): ... converting {x} into {y}.".format(x=in_path,y=out_path))
try:
zin = zipfile.ZipFile(in_path, "r", allowZip64=True)
except zipfile.BadZipfile as e:
raise PackageException("Zip file is corrupt - cannot read.")
# 2017-03-22 TD : still missing [Done: correct 'document()' handling in XSLT string]
# MD5 calculation of all the wonderfull payload plus the
# corres. '<files/>' appendum as of 'add_files2opus_xml.xsl'
#
# 2017-04-21 TD : all of the above missing list done!! (-:
#
xslt_root = etree.XML(models.XSLT.jats2opus4)
transform = etree.XSLT(xslt_root)
xslt_addf = etree.XML(models.XSLT.addfiles2opus4)
addfile = etree.XSLT(xslt_addf)
parser = etree.XMLParser(load_dtd=True, no_network=False)
try:
with zipfile.ZipFile(out_path, "w") as zout:
for item in zin.infolist():
if item.filename.endswith(".xml"):
data = zin.read(item.filename)
opus4xml = transform( etree.fromstring(data, parser) )
break # only *one* .xml allowed per .zip
for item in zin.infolist():
if not item.filename.endswith(".xml"):
data = zin.read(item.filename)
md5sum = hashlib.md5(data).hexdigest()
opus4xml = addfile( opus4xml,
md5=etree.XSLT.strparam(md5sum),
file=etree.XSLT.strparam(item.filename) )
zout.writestr(item, data)
zout.writestr("opus.xml", str(opus4xml))
zin.close()
except Exception:
zin.close()
raise PackageException("Unable to parse and/or transform XML file in package {x}".format(x=in_path))
# 2017-05-15 TD : added an internal method converting to ESciDoc zip format;
# basically by invoking an xslt transformation of the xml metadata
def _escidoc_zip(self, in_path, out_path):
"""
convert to ESciDoc zip
:param in_path:
:param out_path:
:return:
"""
# 2017-05-15 TD :
# files and jats are already basically a ESciDoc zip, so a straight copy
# well, almost...
#shutil.copyfile(in_path, out_path)
app.logger.debug("PackageHandler FilesAndJATS._escidoc_zip(): ... converting {x} into {y}.".format(x=in_path,y=out_path))
try:
zin = zipfile.ZipFile(in_path, "r", allowZip64=True)
except zipfile.BadZipfile as e:
raise PackageException("Zip file is corrupt - cannot read.")
# 2017-03-22 TD : still missing [Done: correct 'document()' handling in XSLT string]
# 2017-05-15 TD : all of the above missing list done!! (-:
#
xslt_root = etree.XML(models.XSLT.jats2escidoc)
transform = etree.XSLT(xslt_root)
# xslt_addf = etree.XML(models.XSLT.addfiles2escidoc)
# addfile = etree.XSLT(xslt_addf)
parser = etree.XMLParser(load_dtd=True, no_network=False)
try:
with zipfile.ZipFile(out_path, "w") as zout:
for item in zin.infolist():
if item.filename.endswith(".xml"):
data = zin.read(item.filename)
escidoc = transform( etree.fromstring(data, parser) )
break # only *one* .xml allowed per .zip
for item in zin.infolist():
if not item.filename.endswith(".xml"):
data = zin.read(item.filename)
# md5sum = hashlib.md5(data).hexdigest()
# escidoc = addfile( escidoc,
# md5=etree.XSLT.strparam(md5sum),
# file=etree.XSLT.strparam(item.filename) )
zout.writestr(item, data)
zout.writestr("escidoc.xml", str(escidoc))
zin.close()
except Exception:
zin.close()
raise PackageException("Unable to parse and/or transform XML file in package {x}".format(x=in_path))
# 2017-05-15 TD : added an internal method converting to METSDSpaceSIP zip format;
# basically by invoking an xslt transformation of the xml metadata
def _metsdspace_zip(self, in_path, out_path):
"""
convert to METSDSpaceSIP zip
:param in_path:
:param out_path:
:return:
"""
# 2017-05-15 TD :
# files and jats are already basically a METSDSpaceSIP, so a straight copy
# well, almost...
#shutil.copyfile(in_path, out_path)
app.logger.debug("PackageHandler FilesAndJATS._metsdspace_zip(): ... converting {x} into {y}.".format(x=in_path,y=out_path))
try:
zin = zipfile.ZipFile(in_path, "r", allowZip64=True)
except zipfile.BadZipfile as e:
raise PackageException("Zip file is corrupt - cannot read.")
# 2017-03-22 TD : still missing [Done: correct 'document()' handling in XSLT string]
# MD5 calculation of all the wonderfull payload plus the
# corres. '<filesGrp/>' appendum as of 'add_files2METSDSpaceSIP_xml.xsl'
#
# 2017-05-15 TD : all of the above missing list done!! (-:
#
xslt_root = etree.XML(models.XSLT.jats2metsdspace)
transform = etree.XSLT(xslt_root)
xslt_addf = etree.XML(models.XSLT.addfiles2mets)
addfile = etree.XSLT(xslt_addf)
# 2018-03-20 TD : Separate structMap part necessary in case of /no/ file addition
xslt_adds = etree.XML(models.XSLT.addstruct2mets)
addstruct = etree.XSLT(xslt_adds)
parser = etree.XMLParser(load_dtd=True, no_network=False)
try:
with zipfile.ZipFile(out_path, "w") as zout:
for item in zin.infolist():
if item.filename.endswith(".xml"):
data = zin.read(item.filename)
now = datetime.now().strftime("%FT%T.%f")
mets = transform( etree.fromstring(data, parser),
currdatetime=etree.XSLT.strparam(now) )
break # only *one* .xml allowed per .zip
count = 0
mimetypes.init()
for item in zin.infolist():
if not item.filename.endswith(".xml"):
count = count + 1
data = zin.read(item.filename)
md5sum = hashlib.md5(data).hexdigest()
mimetype = mimetypes.MimeTypes().guess_type(item.filename)
if mimetype[0] is None:
mimetype = ("application/octet-stream", None)
mets = addfile( mets,
md5=etree.XSLT.strparam(md5sum),
file=etree.XSLT.strparam(item.filename),
mime=etree.XSLT.strparam(mimetype[0]),
cnt=etree.XSLT.strparam(str(count)) )
zout.writestr(item, data)
# 2018-03-20 TD : closing the mets xml by adding the (final) structMap
mets = addstruct( mets )
# 2018-02-21 TD : Strictly needs to be 'mets.xml' due to DSPACE requirements.
| |
<reponame>naacl943/simpletransformers
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import gc
import io
import json
import logging
import math
import os
import pickle as pkl
import random
import warnings
from dataclasses import asdict
from multiprocessing import cpu_count
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import LanguageModelingArgs
from simpletransformers.custom_models.models import (
ElectraForLanguageModelingModel, ElectraForPreTraining)
from simpletransformers.language_modeling.language_modeling_utils import (
DocumentDataset, SimpleDataset, calculate_acc, get_metrics, mask_tokens,
mask_tokens_vanilla, merge_batches, mp_score, neg_entropy, pl_score,
plot_confusion_matrix, plot_to_image)
from sklearn.dummy import DummyClassifier
from sklearn.metrics import (PrecisionRecallDisplay, RocCurveDisplay, auc,
average_precision_score, confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef, mean_squared_error,
plot_roc_curve, precision_recall_curve, roc_curve)
from tensorboardX import SummaryWriter
from tokenizers import BertWordPieceTokenizer, ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from torch.nn.utils.rnn import pad_sequence
from torch.optim import SGD, AdamW
from torch.utils.data import (DataLoader, Dataset, RandomSampler,
SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
# from tqdm.auto import tqdm, trange
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, AutoConfig, AutoModelWithLMHead,
AutoTokenizer, BertConfig, BertForMaskedLM,
BertTokenizer, CamembertConfig, CamembertForMaskedLM,
CamembertTokenizer, DistilBertConfig,
DistilBertForMaskedLM, DistilBertTokenizer,
ElectraConfig, ElectraForMaskedLM, ElectraTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
LongformerConfig, LongformerForMaskedLM,
LongformerTokenizer, OpenAIGPTConfig,
OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
PreTrainedModel, PreTrainedTokenizer, RobertaConfig,
RobertaForMaskedLM, RobertaTokenizer,
get_linear_schedule_with_warmup)
from transformers.data.datasets.language_modeling import (
LineByLineTextDataset, TextDataset)
from transformers.modeling_electra import ElectraEmbeddings
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModelWithLMHead, AutoTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"camembert": (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer),
"distilbert":
(DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"electra":
(ElectraConfig, ElectraForLanguageModelingModel, ElectraTokenizer),
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
"longformer":
(LongformerConfig, LongformerForMaskedLM, LongformerTokenizer),
"openai-gpt": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
}
class LanguageModelingModel:
def __init__(
self,
model_type,
model_name,
masks,
generator_name=None,
discriminator_name=None,
train_files=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a LanguageModelingModel.
Args:
model_type: The type of model (gpt2, openai-gpt, bert, roberta, distilbert, camembert)
model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
generator_name (optional): A pretrained model name or path to a directory containing an ELECTRA generator model.
discriminator_name (optional): A pretrained model name or path to a directory containing an ELECTRA discriminator model.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
train_files (optional): List of files to be used when training the tokenizer.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
self.args = self._load_model_args(model_name)
self.extra_args = args
self.current_epoch = 0
if not ("vanilla_electra" in self.extra_args and self.extra_args["vanilla_electra"] != False):
self.CELoss = nn.CrossEntropyLoss()
self.masks = masks # should be in the if
logger.info(f'# MASKS: {len(self.masks)}')
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, LanguageModelingArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {
key: value["value"]
for key, value in sweep_config.as_dict().items()
if key != "_wandb"
}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if self.args.local_rank != -1:
logger.info(f"local_rank: {self.args.local_rank}")
torch.distributed.init_process_group(backend="nccl")
cuda_device = self.args.local_rank
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False.")
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args.fp16 = False
self.args.model_name = model_name
self.args.model_type = model_type
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
self.tokenizer_class = tokenizer_class
new_tokenizer = False
if self.args.tokenizer_name:
self.tokenizer = tokenizer_class.from_pretrained(
self.args.tokenizer_name, cache_dir=self.args.cache_dir)
elif self.args.model_name:
if self.args.model_name == "electra":
self.tokenizer = tokenizer_class.from_pretrained(
generator_name, cache_dir=self.args.cache_dir, **kwargs)
self.args.tokenizer_name = self.args.model_name
else:
self.tokenizer = tokenizer_class.from_pretrained(
model_name, cache_dir=self.args.cache_dir, **kwargs)
self.args.tokenizer_name = self.args.model_name
else:
if not train_files:
raise ValueError(
"model_name and tokenizer_name are not specified."
"You must specify train_files to train a Tokenizer.")
else:
print("train_files", train_files)
self.train_tokenizer(train_files)
new_tokenizer = True
if self.args.config_name:
self.config = config_class.from_pretrained(
self.args.config_name, cache_dir=self.args.cache_dir)
elif self.args.model_name and self.args.model_name != "electra":
self.config = config_class.from_pretrained(
model_name, cache_dir=self.args.cache_dir, **kwargs)
else:
self.config = config_class(**self.args.config, **kwargs)
if self.args.vocab_size:
self.config.vocab_size = self.args.vocab_size
if new_tokenizer:
self.config.vocab_size = len(self.tokenizer)
if self.args.model_type == "electra":
if generator_name:
self.generator_config = ElectraConfig.from_pretrained(
generator_name)
elif self.args.model_name:
self.generator_config = ElectraConfig.from_pretrained(
os.path.join(self.args.model_name, "generator_config"),
**kwargs,
)
else:
self.generator_config = ElectraConfig(
**self.args.generator_config, **kwargs)
if new_tokenizer:
self.generator_config.vocab_size = len(self.tokenizer)
if discriminator_name:
self.discriminator_config = ElectraConfig.from_pretrained(
discriminator_name)
elif self.args.model_name:
self.discriminator_config = ElectraConfig.from_pretrained(
os.path.join(self.args.model_name, "discriminator_config"),
**kwargs,
)
else:
self.discriminator_config = ElectraConfig(
**self.args.discriminator_config, **kwargs)
if new_tokenizer:
self.discriminator_config.vocab_size = len(self.tokenizer)
if self.args.block_size <= 0:
self.args.block_size = min(self.args.max_seq_length,
self.tokenizer.max_len)
else:
self.args.block_size = min(self.args.block_size,
self.tokenizer.max_len,
self.args.max_seq_length)
if self.args.model_name:
if self.args.model_type == "electra":
if self.args.model_name == "electra":
generator_model = ElectraForMaskedLM.from_pretrained(
generator_name)
discriminator_model = ElectraForPreTraining.from_pretrained(
discriminator_name)
self.model = ElectraForLanguageModelingModel(
config=self.config,
output_size=len(self.masks),
generator_model=generator_model,
discriminator_model=discriminator_model,
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
tie_generator_and_discriminator_embeddings=self.args.
tie_generator_and_discriminator_embeddings,
random_generator=self.extra_args['random_generator'])
model_to_resize = (self.model.generator_model.module
if hasattr(self.model.generator_model,
"module") else
self.model.generator_model)
model_to_resize.resize_token_embeddings(len(
self.tokenizer))
model_to_resize = (self.model.discriminator_model.module if
hasattr(self.model.discriminator_model,
"module") else
self.model.discriminator_model)
model_to_resize.resize_token_embeddings(len(
self.tokenizer))
self.model.generator_model = generator_model
self.model.discriminator_model = discriminator_model
else:
self.model = model_class.from_pretrained(
model_name,
config=self.config,
cache_dir=self.args.cache_dir,
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
**kwargs,
)
self.model.load_state_dict(
torch.load(
os.path.join(self.args.model_name,
"pytorch_model.bin")))
else:
self.model = model_class.from_pretrained(
model_name,
config=self.config,
cache_dir=self.args.cache_dir,
**kwargs,
)
else:
logger.info(" Training language model from scratch")
if self.args.model_type == "electra":
generator_model = ElectraForMaskedLM(
config=self.generator_config)
discriminator_model = ElectraForPreTraining(
config=self.discriminator_config,
extra_args=self.extra_args)
self.model = ElectraForLanguageModelingModel(
config=self.config,
output_size=len(self.masks),
extra_args=self.extra_args,
generator_model=generator_model,
discriminator_model=discriminator_model,
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
tie_generator_and_discriminator_embeddings=self.args.
tie_generator_and_discriminator_embeddings,
random_generator=self.extra_args['random_generator'])
model_to_resize = (self.model.generator_model.module
if hasattr(self.model.generator_model,
"module") else
self.model.generator_model)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
model_to_resize = (self.model.discriminator_model.module
if hasattr(self.model.discriminator_model,
"module") else
self.model.discriminator_model)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
else:
self.model = model_class(config=self.config)
model_to_resize = self.model.module if hasattr(
self.model, "module") else self.model
model_to_resize.resize_token_embeddings(len(self.tokenizer))
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion.")
self.args.use_multiprocessing = False
if self.args.wandb_project and not wandb_available:
warnings.warn(
"wandb_project specified but wandb is not available. Wandb disabled."
)
self.args.wandb_project = None
def train_model(
self,
train_file,
output_dir=None,
show_running_loss=True,
args=None,
eval_file=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_file'
Args:
train_file: Path to text file containing the text to train the language model on.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_file (optional): Path to eval file containing the text to evaluate the language model on.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.silent:
show_running_loss = False
if self.args.evaluate_during_training and eval_file is None:
raise ValueError(
"evaluate_during_training is enabled but eval_file is not specified."
" Pass eval_file to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(
output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(
output_dir))
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_file,
verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_file=eval_file,
verbose=verbose,
**kwargs,
)
self._save_model(output_dir, model=self.model)
if self.args.model_type == "electra":
self.save_discriminator()
self.save_generator()
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(
self.args.model_type, output_dir))
def build_text_samples(self, inlier_file, outlier_file):
self.lines = []
with open(inlier_file) as fp:
line = fp.readline()
while line:
if line != '\n':
self.lines.append(line)
line = fp.readline()
self.inlier_len = len(self.lines)
with open(outlier_file) as fp:
line = fp.readline()
while line:
if line != '\n':
self.lines.append(line)
line = fp.readline()
self.outlier_len = len(self.lines) - self.inlier_len
logger.info(
f" [LM CLASS - EVAL] inliers: {self.inlier_len} / outliers: {self.outlier_len}"
)
def train_model_anomaly(
self,
train_file,
output_dir=None,
show_running_loss=True,
args=None,
eval_file=None,
eval_file_outlier=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_file'
Args:
train_file: Path to text file containing the text to train the language model on.
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_file (optional): Path to eval file containing the text to evaluate the language model on.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update(args)
if not output_dir:
output_dir = | |
(кубометр)':0.12,
'Раствор кладочный М200 (кубометр)':0.08,
'Раствор асбестоцементный (кубометр)':0.044,
'Вода для строительства (кубометр)':62.8,
'Использование электростанции передвижной 4 кВт (часов)':2.9,
'Использование установки для гидравлических испытаний трубопрводов (часов)':25,
'Использование трубоукладчика для труб диаметром до 400 мм (часов)':18.01,
'Использование бортового автомобиля до 5 тонн (часов)':0.3,
}
metadict_model['Прокладка 100-мм водопровода (1000 метров)'] = {
# Таблица ГЭСН 22-01-006 Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i127040
# 01. Опускание и укладка труб.
# 02. Заделка раструбов смоляной прядью и асбестоцементным раствором.
# 03. Гидравлическое испытание трубопровода с устройством и разборкой временных упоров.
# Измеритель: 1 км трубопровода
# Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом диаметром:
# 22-01-006-03 100 мм
# "22-01-006-03" (рабочие): 371 нормо-часа
# "22-01-006-03" (машинисты): 3.63 нормо-часа
'_-Работа строительной бригады (нормо-часов)':371 + 3.63,
'Чугунная труба напорная раструбная 200-мм (метр)':1000,
'Каболка (килограмм)':33,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':0.06,
'Раствор кладочный М200 (кубометр)':0.08,
'Раствор асбестоцементный (кубометр)':0.0235,
'Вода для строительства (кубометр)':15.7,
'Использование электростанции передвижной 4 кВт (часов)':1.39,
'Использование установки для гидравлических испытаний трубопрводов (часов)':12,
'Использование трубоукладчика для труб диаметром до 400 мм (часов)':2.24,
'Использование бортового автомобиля до 5 тонн (часов)':0.17,
}
metadict_model['Прокладка 50-мм водопровода (100 метров)'] = {
# Таблица ГЭСН 16-03-002 Прокладка водопроводов водоснабжения
# из многослойных металл-полимерных труб
# http://www.norm-load.ru/SNiP/Data1/56/56040/index.htm#i282367
# 01. Прокладка трубопроводов из многослойных металл-полимерных труб.
# 02. Установка и заделка креплений.
# 03. Промывка трубопровода водой.
# Измеритель: 100 м трубопровода
# Прокладка трубопроводов водоснабжения из многослойных металл-полимерных труб диаметром:
# 16-03-002-03 25 мм
# "16-03-002-01" (рабочие): 103 нормо-часа
'_-Работа строительной бригады (нормо-часов)':103,
'Чугунная труба напорная 50-мм (метр)':100,
'Крепления для труб диаметром 50-мм (штук)':50,
'Арматура запорная для труб диаметром 50-мм (штук)':10,
'Муфты надвижные для чугунных труб диаметром 50-мм (штук)':10,
'Известь негашёная (килограмм)':0.0026,
'Вода для строительства (кубометр)':0.4,
'Использование башенного крана на 8 тонн (часов)':0.04,
'Использование крана на автомобильном ходу 10 тонн (часов)':0.03,
'Использование дрели электрической (часов)':4.9,
'Использование бортового автомобиля до 5 тонн (часов)':0.37,
}
metadict_model['Прокладка 50-мм трубопровода канализации (100 метров)'] = {
# Таблица ГЭСН 16-01-005 Прокладка по стенам зданий и в каналах трубопроводов
# из чугунных канализационных труб
# http://www.norm-load.ru/SNiP/Data1/56/56040/index.htm#i106890
# 01. Прокладка трубопровода из готовых узлов с заделкой раструбов.
# 02. Установка и заделка креплений.
# 03. Установка задвижек.
# 04. Гидравлическое испытание трубопровода.
# Измеритель: 100 м трубопровода
# Прокладка по стенам зданий и в каналах трубопроводов
# из чугунных канализационных труб диаметром:
# 16-01-005-01 50 мм
# "16-01-005-01" (рабочие): 77.7 нормо-часа
'_-Работа строительной бригады (нормо-часов)':77.7,
'Каболка (килограмм)':8,
'Цемент гипсоглинозёмный расширяющийся (килограмм)':22,
'Чугунная труба безнапорная 50-мм (метр)':100,
'Муфты надвижные для чугунных труб диаметром 50-мм (штук)':10,
'Вода для строительства (кубометр)':0.39,
'Использование башенного крана на 8 тонн (часов)':0.24,
'Использование бортового автомобиля до 5 тонн (часов)':0.66,
}
metadict_model['Прокладка 150-мм трубопровода канализации (100 метров)'] = {
# Таблица ГЭСН 16-01-005 Прокладка по стенам зданий и в каналах трубопроводов
# http://www.norm-load.ru/SNiP/Data1/56/56040/index.htm#i106890
# из чугунных канализационных труб
# 01. Прокладка трубопровода из готовых узлов с заделкой раструбов.
# 02. Установка и заделка креплений.
# 03. Установка задвижек.
# 04. Гидравлическое испытание трубопровода.
# Измеритель: 100 м трубопровода
# Прокладка по стенам зданий и в каналах трубопроводов
# из чугунных канализационных труб диаметром:
# 16-01-005-03 150 мм
# "16-01-005-03" (рабочие): 101 нормо-часа
'_-Работа строительной бригады (нормо-часов)':101,
'Каболка (килограмм)':12,
'Цемент гипсоглинозёмный расширяющийся (килограмм)':31,
'Чугунная труба безнапорная 150-мм (метр)':100,
'Муфты надвижные для чугунных труб диаметром 150-мм (штук)':10,
'Вода для строительства (кубометр)':3.53,
'Использование башенного крана на 8 тонн (часов)':0.6,
'Использование бортового автомобиля до 5 тонн (часов)':1.66,
}
metadict_model['Установка фасонных частей 400-мм водопровода (тонн)'] = {
# Таблица ГЭСН 22-03-001 Установка фасонных частей чугунных, стальных сварных
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i478144
# 01. Опускание и установка фасонных частей на готовое основание.
# 02. Соединение с трубопроводом сваркой, свертыванием фланцев или заделкой раструбов.
# Измеритель: 1 т фасонных частей
# Установка фасонных частей чугунных диаметром:
# 22-03-001-03 250-400 мм
# "22-03-001-03" (рабочие): 34.3 нормо-часа
# "22-03-001-03" (машинисты): 1.78 нормо-часа
'_-Работа строительной бригады (нормо-часов)':34.3 + 1.78,
'Каболка (килограмм)':14.15,
'Фасонные соединительные части для 400-мм водопровода (килограмм)':1000,
'Раствор асбестоцементный (кубометр)':0.0081,
'Использование крана на автомобильном ходу 10 тонн (часов)':0.25,
'Использование трубоукладчика для труб диаметром до 400 мм (часов)':1.53,
'Использование бортового автомобиля до 5 тонн (часов)':0.75,
}
metadict_model['Установка фасонных частей 200-мм водопровода (тонн)'] = {
# Таблица ГЭСН 22-03-001 Установка фасонных частей чугунных, стальных сварных
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i478144
# 01. Опускание и установка фасонных частей на готовое основание.
# 02. Соединение с трубопроводом сваркой, свертыванием фланцев или заделкой раструбов.
# Измеритель: 1 т фасонных частей
# Установка фасонных частей чугунных диаметром:
# 22-03-001-02 125-200 мм
# "22-03-001-02" (рабочие): 37.74 нормо-часа
# "22-03-001-02" (машинисты): 2.03 нормо-часа
'_-Работа строительной бригады (нормо-часов)':37.74 + 2.03,
'Каболка (килограмм)':18.49,
'Фасонные соединительные части для 200-мм водопровода (килограмм)':1000,
'Раствор асбестоцементный (кубометр)':0.012,
'Использование крана на автомобильном ходу 10 тонн (часов)':0.28,
'Использование трубоукладчика для труб диаметром до 400 мм (часов)':1.75,
'Использование бортового автомобиля до 5 тонн (часов)':0.85,
}
metadict_model['Установка фасонных частей 100-мм водопровода (тонн)'] = {
# Таблица ГЭСН 22-03-001 Установка фасонных частей чугунных, стальных сварных
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i478144
# 01. Опускание и установка фасонных частей на готовое основание.
# 02. Соединение с трубопроводом сваркой, свертыванием фланцев или заделкой раструбов.
# Измеритель: 1 т фасонных частей
# Установка фасонных частей чугунных диаметром:
# 22-03-001-01 50-100 мм
# "22-03-001-01" (рабочие): 47 нормо-часа
'_-Работа строительной бригады (нормо-часов)':47,
'Каболка (килограмм)':23.27,
'Фасонные соединительные части для 100-мм водопровода (килограмм)':1000,
'Раствор асбестоцементный (кубометр)':0.014,
'Использование бортового автомобиля до 5 тонн (часов)':1.05,
}
metadict_model['Установка фасонных частей 50-мм водопровода (тонн)'] = {
# Таблица ГЭСН 22-03-001 Установка фасонных частей чугунных, стальных сварных
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i478144
# 01. Опускание и установка фасонных частей на готовое основание.
# 02. Соединение с трубопроводом сваркой, свертыванием фланцев или заделкой раструбов.
# Измеритель: 1 т фасонных частей
# Установка фасонных частей чугунных диаметром:
# 22-03-001-01 50-100 мм
# "22-03-001-01" (рабочие): 47 нормо-часа
'_-Работа строительной бригады (нормо-часов)':47,
'Каболка (килограмм)':23.27,
'Фасонные соединительные части для 50-мм водопровода (килограмм)':1000,
'Раствор асбестоцементный (кубометр)':0.014,
'Использование бортового автомобиля до 5 тонн (часов)':1.05,
}
metadict_model['Сборка дощатых перегородок (100 квадратных метров)'] = {
# Исправить
# Это именно сборка, без подготовки дощатых щитов.
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i1143783
# Таблица ГЭСН 10-02-031 Сборка перегородок
# 01. Сборка перегородок щитовых.
# Измеритель: 100 м2 панелей и перегородок (без вычета проемов)
# 10-02-031-03 дощатых щитовых
# "10-02-031-03" (рабочие): 46.89 нормо-часа
'_-Работа строительной бригады (нормо-часов)':47,
'Гвозди строительные (килограмм)':8,
'Использование крана на автомобильном ходу 10 тонн (часов)':0.66,
'Использование бортового автомобиля до 5 тонн (часов)':0.99,
}
metadict_model['Разборка булыжной мостовой (100 кубометров)'] = {
# Таблица ГЭСН 27-03-008 Разборка покрытий и оснований
# http://www.norm-load.ru/SNiP/Data1/56/56051/index.htm#i422750
# 01. Очистка покрытия или основания.
# 02. Разборка покрытия и основания.
# 03. Сгребание материала, полученного от разборки.
# 04. Сортировка камня с выборкой годной шашки.
# 05. Оправка в штабеля материала, полученного при разборке.
# Измеритель: 100 м3 конструкций
# Разборка покрытий и оснований:
# 27-03-008-01 мостовой из булыжного камня
# "27-03-008-01" (рабочие): 204 нормо-часа
# "27-03-008-01" (машинисты): 1.12 нормо-часа
'_-Работа строительной бригады (нормо-часов)':204 + 1.12,
'Использование трактора на гусеничном ходу 59 кВт (часов)':0.9,
'Использование рыхлителя прицепного (часов)':0.9,
'Использование машины поливомоечной 6000 литров (часов)':0.22,
}
metadict_model['Разборка щебёночного шоссе (100 кубометров)'] = {
# Таблица ГЭСН 27-03-008 Разборка покрытий и оснований
# http://www.norm-load.ru/SNiP/Data1/56/56051/index.htm#i422750
# 01. Очистка покрытия или основания.
# 02. Разборка покрытия и основания.
# 03. Сгребание материала, полученного от разборки.
# 04. Оправка в штабеля материала, полученного при разборке.
# Измеритель: 100 м3 конструкций
# Разборка покрытий и оснований:
# 27-03-008-02 щебеночных
# "27-03-008-02" (рабочие): 13.22 нормо-часа
# "27-03-008-02" (машинисты): 3.79 нормо-часа
'_-Работа строительной бригады (нормо-часов)':13.22 + 3.79,
'Использование трактора на гусеничном ходу 59 кВт (часов)':1.39,
'Использование рыхлителя прицепного (часов)':1.39,
'Использование автогрейдера среднего типа 99 кВт (часов)':1.94,
'Использование машины поливомоечной 6000 литров (часов)':0.46,
}
metadict_model['Устройство булыжной мостовой (1000 квадратных метров)'] = {
# Таблица ГЭСН 27-05-001 Устройство мостовых из колотого и булыжного камня по готовому основанию
# http://www.norm-load.ru/SNiP/Data1/56/56051/index.htm#i854760
# 01. Укладка версты из крупного камня с подсыпкой грунтом.
# 02. Мощение подбором камня и трамбованием.
# 03. Россыпь расклинцовки.
# 04. Укладка с проверкой профиля и засыпкой песком.
# 05. Уход за мостовой.
# Измеритель: 1000 м2 покрытия
| |
#
## Copyright (c) 2018-2020, <NAME>
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
#
import tkinter as tk
import numpy as np
import math
import codecs
class tkplot:
class curve:
def __init__(self, **kwargs):
self.data_x = kwargs.get('data_x', np.array([]))
self.data_y = kwargs.get('data_y', np.array([]))
self.points_x = kwargs.get('points_x', [np.array([])])
self.points_y = kwargs.get('points_y', [np.array([])])
self.name = kwargs.get('name', '')
self.yaxis = kwargs.get('yaxis', 'left')
self.marker_color = kwargs.get('marker_color', '')
self.marker = kwargs.get('marker', '')
self.curve_color = kwargs.get('curve_color', '')
self.curve_style = kwargs.get('curve_style', '')
class y_axis:
def __init__(self, **kwargs):
self.name = kwargs.get('name', 'left')
self.color = kwargs.get('color', '#000000')
self.yaxis_mode = kwargs.get('yaxis_mode', 'linear')
self.yaxis_sign = 1.
self.ylimits_mode = kwargs.get('ylimits_mode', 'auto')
self.ylim = kwargs.get('ylim', [0., 1.])
self.ymin = self.ylim[0]
self.ymax = self.ylim[1]
self.ylabel_value = kwargs.get('ylabel', '')
def __init__(self, **kwargs):
self.canvas_left = float(kwargs.get('left', 0.))
self.canvas_top = float(kwargs.get('top', 0.))
self.canvas_width = float(kwargs.get('width', 560.))
self.canvas_height = float(kwargs.get('height', 420.))
self.marker_radius = float(kwargs.get('marker_radius', 4.))
self.marker_lineweight = float(kwargs.get('marker_lineweight', 1.))
self.curve_lineweight = float(kwargs.get('curve_lineweight', 1.))
self.tick_length = float(kwargs.get('tick_length', 6.))
self.tick_lineweight = float(kwargs.get('tick_lineweight', 1.))
self.canvas_background_color = kwargs.get('background', '#CDCDCD')
self.axes_background_color = kwargs.get('axes_background', '#FFFFFF')
self.axes_color = kwargs.get('axes_color', '#000000')
self.axes_lineweight = kwargs.get('axes_lineweight', 1.)
self.label_font_baseline = float(kwargs.get('baseline', 0.6))
self.label_fontsize = int(kwargs.get('fontsize', 12))
self.label_font = kwargs.get('font', 'Helvetica')
self.linear_minor_ticks = kwargs.get('linear_minor_ticks', 'off')
self.init_markers(self.marker_radius)
self.marker_names = [[' ', 'No marker'], ['.', 'Point'], ['o', 'Circle'], ['x', 'Ex'],
['+', 'Plus'], ['*', 'Star'], ['s', 'Square'], ['d', 'Diamond'],
['v', 'Triangle (down)'], ['^', 'Triangle (up)'], ['<', 'Triangle (left)'],
['>', 'Triangle (right)'], ['p', 'Pentagram'], ['h', 'Hexagram']]
self.colors = {'b': '#0000FF', 'g': '#00FF00', 'r': '#FF0000',
'c': '#00FFFF', 'm': '#FF00FF', 'y': '#FFFF00',
'k': '#000000', 'w': '#FFFFFF'}
self.color_names = [['b', 'Blue'], ['r', 'Red'], ['g', 'Green'], ['c', 'Cyan'],
['m', 'Magenta'], ['y', 'Yellow'], ['k', 'Black'], ['w', 'White']]
self.linestyles = {'-': (), ':': (1, 4), '--': (10, 4), '-.': (10, 4, 1, 4), '-:': (10, 4, 1, 4, 1, 4)}
self.linestyle_names = [[' ', 'No line'], ['-', 'Solid'], [':', 'Dotted'],
['-.', 'Dash-dot'], ['-:', 'Dash-dot-dot'], ['--', 'Dashed']]
self.multipliers = (1., 1e-3, 1e-6, 1e-9, 1e-12, 1e-15, 1e-18, 1e-21, 1e-24,
1e24, 1e21, 1e18, 1e15, 1e12, 1e9, 1e6, 1e3)
self.prefixes = (u'', u'k', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y',
u'y', u'z', u'a', u'f', u'p', u'n', u'\xB5', u'm')
self.default_color_order = ('b', 'g', 'r', 'c', 'm', 'y')
self.default_color_index = 0
self.default_marker = '.'
self.default_curve_style = '-'
self.curve_id = 0
self.curves = {}
self.xaxis_mode = 'linear'
self.xaxis_sign = 1.
self.xlimits_mode = 'auto'
self.xlim = [0., 1.]
self.xmin = 0.
self.xmax = 1.
self.xlabel_value = ''
self.yaxes = {}
self.yaxes['left'] = self.y_axis()
self.left_yaxis = 'left'
self.right_yaxis = ''
self.update_sizes()
self.find_x_ticks()
self.find_y_ticks()
self.grid_state = 'off'
self.dw = 0.
self.dh = 0.
self.x0 = 0.
self.y0 = 0.
self.svg_file = None
self.svg_indent_level = 1
self.tk_backend()
self.root = kwargs.get('parent')
if self.root == None:
self.root = tk.Tk()
self.root.title('tkplot')
self.canvas = tk.Canvas(self.root, width = self.canvas_width, height = self.canvas_height, background = self.canvas_background_color, highlightbackground = self.canvas_background_color)
self.draw_background()
self.draw_axes()
self.draw_x_ticks()
self.draw_y_ticks()
self.draw_axis_labels()
self.canvas.pack(fill = 'both', expand = 'yes')
self.dw = 2.*float(self.canvas.cget('highlightthickness'))
self.dh = self.dw
self.canvas.bind('<Configure>', self.resize)
def init_markers(self, r = 4.):
r_over_sqrt2 = r / math.sqrt(2.)
r_over_sqrt3 = r / math.sqrt(3.)
pi_over_180 = math.pi / 180.
r2 = r * math.sin(pi_over_180 * 18.) / math.sin(pi_over_180 * 54.)
self.marker_coords = {}
self.marker_coords['.'] = ((-0.5 * r, -0.5 * r), (0.5 * r, 0.5 * r))
self.marker_coords['o'] = ((-r, -r), (r, r))
self.marker_coords['x'] = ((0., 0.), (r_over_sqrt2, -r_over_sqrt2),
(0., 0.), (r_over_sqrt2, r_over_sqrt2),
(0., 0.), (-r_over_sqrt2, r_over_sqrt2),
(0., 0.), (-r_over_sqrt2, -r_over_sqrt2),
(0., 0.))
self.marker_coords['+'] = ((0., 0.), (0., -r),
(0., 0.), (r, 0.),
(0., 0.), (0., r),
(0., 0.), (-r, 0.),
(0., 0.))
self.marker_coords['*'] = ((0., 0.), (0., -r),
(0., 0.), (r_over_sqrt2, -r_over_sqrt2),
(0., 0.), (r, 0.),
(0., 0.), (r_over_sqrt2, r_over_sqrt2),
(0., 0.), (0., r),
(0., 0.), (-r_over_sqrt2, r_over_sqrt2),
(0., 0.), (-r, 0.),
(0., 0.), (-r_over_sqrt2, -r_over_sqrt2),
(0., 0.))
self.marker_coords['s'] = ((-r_over_sqrt2, -r_over_sqrt2), (r_over_sqrt2, -r_over_sqrt2),
(r_over_sqrt2, r_over_sqrt2), (-r_over_sqrt2, r_over_sqrt2),
(-r_over_sqrt2, -r_over_sqrt2))
self.marker_coords['d'] = ((0., -1.25 * r), (r, 0.), (0., 1.25 * r), (-r, 0.), (0., -1.25 * r))
self.marker_coords['v'] = ((0., r),
(r * math.cos(pi_over_180 * 150.), -r * math.sin(pi_over_180 * 150.)),
(r * math.cos(pi_over_180 * 30.), -r * math.sin(pi_over_180 * 30.)),
(0., r))
self.marker_coords['^'] = ((0., -r),
(r * math.cos(pi_over_180 * 330.), -r * math.sin(pi_over_180 * 330.)),
(r * math.cos(pi_over_180 * 210.), -r * math.sin(pi_over_180 * 210.)),
(0., -r))
self.marker_coords['<'] = ((-r, 0.),
(r * math.cos(pi_over_180 * 60.), -r * math.sin(pi_over_180 * 60.)),
(r * math.cos(pi_over_180 * 300.), -r * math.sin(pi_over_180 * 300.)),
(-r, 0.))
self.marker_coords['>'] = ((r, 0.),
(r * math.cos(pi_over_180 * 240.), -r * math.sin(pi_over_180 * 240.)),
(r * math.cos(pi_over_180 * 120.), -r * math.sin(pi_over_180 * 120.)),
(r, 0.))
self.marker_coords['p'] = ((0., -r),
(r2 * math.cos(pi_over_180 * 54.), -r2 * math.sin(pi_over_180 * 54.)),
(r * math.cos(pi_over_180 * 18.), -r * math.sin(pi_over_180 * 18.)),
(r2 * math.cos(pi_over_180 * 342.), -r2 * math.sin(pi_over_180 * 342.)),
(r * math.cos(pi_over_180 * 306.), -r * math.sin(pi_over_180 * 306.)),
(0., r2),
(r * math.cos(pi_over_180 * 234.), -r * math.sin(pi_over_180 * 234.)),
(r2 * math.cos(pi_over_180 * 198.), -r2 * math.sin(pi_over_180 * 198.)),
(r * math.cos(pi_over_180 * 162.), -r * math.sin(pi_over_180 * 162.)),
(r2 * math.cos(pi_over_180 * 126.), -r2 * math.sin(pi_over_180 * 126.)),
(0., -r))
self.marker_coords['h'] = ((0., -r),
(r_over_sqrt3 * math.cos(pi_over_180 * 60.), -r_over_sqrt3 * math.sin(pi_over_180 * 60.)),
(r * math.cos(pi_over_180 * 30.), -r * math.sin(pi_over_180 * 30.)),
(r_over_sqrt3, 0.),
(r * math.cos(pi_over_180 * 330.), -r * math.sin(pi_over_180 * 330.)),
(r_over_sqrt3 * math.cos(pi_over_180 * 300.), -r_over_sqrt3 * math.sin(pi_over_180 * 300.)),
(0., r),
(r_over_sqrt3 * math.cos(pi_over_180 * 240.), -r_over_sqrt3 * math.sin(pi_over_180 * 240.)),
(r * math.cos(pi_over_180 * 210.), -r * math.sin(pi_over_180 * 210.)),
(-r_over_sqrt3, 0.),
(r * math.cos(pi_over_180 * 150.), -r * math.sin(pi_over_180 * 150.)),
(r_over_sqrt3 * math.cos(pi_over_180 * 120.), -r_over_sqrt3 * math.sin(pi_over_180 * 120.)),
(0., -r))
def update_sizes(self):
self.axes_left = self.canvas_left + 6. * self.label_fontsize
self.axes_top = self.canvas_top + 3. * self.label_fontsize
self.axes_right = self.canvas_left + self.canvas_width - 6. * self.label_fontsize
self.axes_bottom = self.canvas_top + self.canvas_height - 4. * self.label_fontsize
self.axes_width = self.axes_right - self.axes_left
self.axes_height = self.axes_bottom - self.axes_top
self.xrange = self.xlim[1] - self.xlim[0]
self.x_pix_per_unit = self.axes_width / self.xrange
self.x_epsilon = self.xrange / self.axes_width
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].yrange = self.yaxes[yaxis].ylim[1] - self.yaxes[yaxis].ylim[0]
self.yaxes[yaxis].y_pix_per_unit = self.axes_height / self.yaxes[yaxis].yrange
self.yaxes[yaxis].y_epsilon = self.yaxes[yaxis].yrange / self.axes_height
def resize(self, event):
self.canvas_width = max(event.width - self.dw, 17. * self.label_fontsize)
self.canvas_height = max(event.height - self.dh, 12. * self.label_fontsize)
self.refresh_plot()
def configure(self, **kwargs):
self.canvas_left = float(kwargs.get('left', self.canvas_left))
self.canvas_top = float(kwargs.get('top', self.canvas_top))
self.canvas_width = float(kwargs.get('width', self.canvas_width))
self.canvas_height = float(kwargs.get('height', self.canvas_height))
self.marker_radius = float(kwargs.get('marker_radius', self.marker_radius))
self.marker_lineweight = float(kwargs.get('marker_lineweight', self.marker_lineweight))
self.curve_lineweight = | |
# Training script with LazyLoader
#
# Instead of dumping all input into memory, we lazy load on the fly.
# This can create an IO bound where slow training down but helping to training large dataset such as MetaVideoLazy
import os
from tqdm.auto import tqdm
from opt import config_parser
import logging
import ruamel.yaml
yaml2 = ruamel.yaml.YAML()
from utils import set_logger, printlog
from collections import OrderedDict
import json, random
from renderer import *
from utils import *
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import autocast, GradScaler
import datetime
from torch.utils.data import DataLoader
from dataLoader import dataset_dict
import sys
import pdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
renderer = OctreeRender_trilinear_fast
@torch.no_grad()
def evaluation_lazy(test_dataset,tensorf, args, renderer, savePath=None, N_vis=5, prtx='', N_samples=-1,
white_bg=False, ndc_ray=False, compute_extra_metrics=True, device='cuda'):
PSNRs, rgb_maps, depth_maps = [], [], []
ssims,l_alex,l_vgg=[],[],[]
#os.makedirs(savePath+'/img', exist_ok=True)
os.makedirs(savePath+"/img/rgbd", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
#img_eval_interval = 1 if N_vis < 0 else test_dataset.all_rays.shape[0] // N_vis
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=int(os.cpu_count() * args.dataloader_thread_ratio))
for idx, samples in tqdm(enumerate(test_dataloader), file=sys.stdout):
if N_vis > 0 and idx % N_vis != 0: continue
W, H = test_dataset.img_wh
rays = samples['rays'].view(-1,samples['rays'].shape[-1])
rgb_map, _, depth_map, _, _ = renderer(rays, tensorf, chunk=512, N_samples=N_samples, ndc_ray=ndc_ray, white_bg = white_bg, device=device)
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape(H, W).cpu()
depth_map, min_max = visualize_depth_numpy(depth_map.numpy(),near_far)
if True: #temporary predict
gt_rgb = samples['rgbs'].view(H, W, 3)
loss = torch.mean((rgb_map - gt_rgb) ** 2)
PSNRs.append(-10.0 * np.log(loss.item()) / np.log(10.0))
if compute_extra_metrics:
ssim = rgb_ssim(rgb_map, gt_rgb, 1)
l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', tensorf.device)
l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', tensorf.device)
ssims.append(ssim)
l_alex.append(l_a)
l_vgg.append(l_v)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
# rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
rgb_maps.append(rgb_map)
depth_maps.append(depth_map)
if savePath is not None:
imageio.imwrite(f'{savePath}/img/{prtx}{idx:03d}.png', rgb_map)
rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
imageio.imwrite(f'{savePath}/img/rgbd/{prtx}{idx:03d}.png', rgb_map)
imageio.mimwrite(f'{savePath}/{prtx}video.mp4', np.stack(rgb_maps), fps=30, quality=10)
imageio.mimwrite(f'{savePath}/{prtx}depthvideo.mp4', np.stack(depth_maps), fps=30, quality=10)
if PSNRs:
psnr = np.mean(np.asarray(PSNRs))
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr, ssim, l_a, l_v]))
else:
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr]))
return PSNRs
@torch.no_grad()
def evaluation_path_lazy(test_dataset,tensorf, c2ws, renderer, savePath=None, N_vis=5, prtx='', N_samples=-1,
white_bg=False, ndc_ray=False, compute_extra_metrics=True, device='cuda'):
PSNRs, rgb_maps, depth_maps = [], [], []
ssims,l_alex,l_vgg=[],[],[]
os.makedirs(savePath, exist_ok=True)
os.makedirs(savePath+"/img/rgbd", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
for idx, c2w in enumerate(tqdm(c2ws)):
W, H = test_dataset.img_wh
c2w = torch.FloatTensor(c2w)
rays_o, rays_d = get_rays(test_dataset.directions, c2w) # both (h*w, 3)
if ndc_ray:
rays_o, rays_d = ndc_rays_blender(H, W, test_dataset.focal[0], 1.0, rays_o, rays_d)
if hasattr(test_dataset, 'max_t'):
rays = torch.cat([rays_o, rays_d, torch.ones_like(rays_o[:, :1]) * idx], 1)
else:
rays = torch.cat([rays_o, rays_d], 1) # (h*w, 6)
rgb_map, _, depth_map, _, _ = renderer(rays, tensorf, chunk=512, N_samples=N_samples,
ndc_ray=ndc_ray, white_bg = white_bg, device=device)
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape(H, W).cpu()
depth_map, _ = visualize_depth_numpy(depth_map.numpy(),near_far)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
# rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
rgb_maps.append(rgb_map)
depth_maps.append(depth_map)
if savePath is not None:
imageio.imwrite(f'{savePath}/img/{prtx}{idx:03d}.png', rgb_map)
rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
imageio.imwrite(f'{savePath}/img/rgbd/{prtx}{idx:03d}.png', rgb_map)
imageio.mimwrite(f'{savePath}/{prtx}video.mp4', np.stack(rgb_maps), fps=30, quality=8)
imageio.mimwrite(f'{savePath}/{prtx}depthvideo.mp4', np.stack(depth_maps), fps=30, quality=8)
if PSNRs:
psnr = np.mean(np.asarray(PSNRs))
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr, ssim, l_a, l_v]))
else:
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr]))
return PSNRs
@torch.no_grad()
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = get_dataset(args, 'test')
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
if args.num_frames > 1 or args.model_name == 'TensoRFVideo': #only some model support max_t, so we pass max_t if num_frames provide
kwargs.update({'max_t': args.num_frames})
kwargs.update({'t_keyframe': args.t_keyframe})
kwargs.update({'upsamp_list': args.upsamp_list})
tensorf = eval(args.model_name)(**kwargs)
tensorf.load(ckpt)
#pdb.set_trace()
if args.model_name in ['TensorSph']:
tensorf.set_origin(test_dataset.origin,test_dataset.sph_box,test_dataset.sph_frontback)
tensorf_for_renderer = tensorf
if args.data_parallel:
tensorf_for_renderer = torch.nn.DataParallel(tensorf)
logfolder = os.path.dirname(args.ckpt)
if False and args.render_train:
os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
train_dataset = get_dataset(args, 'train')
train_dataset.is_sampling = False
PSNRs_test = evaluation_lazy(train_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_train_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if True or args.render_test:
test_dataset = get_dataset(args, 'test')
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_test_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if False and args.render_dynerf:
test_dataset = get_dataset(args, 'test', hold_every_frame=10)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_test_dynerf/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_dynerf psnr: {np.mean(PSNRs_test)} <========================')
if True or args.render_path:
c2ws = test_dataset.render_path
print('========>',c2ws.shape)
os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
evaluation_path_lazy(test_dataset,tensorf_for_renderer, c2ws, renderer, f'{logfolder}/imgs_path_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
def get_dataset(args, split, hold_every_frame=1, psudo_length=-1):
dataset_class = dataset_dict[args.dataset_name]
dataset = dataset_class(
args.datadir,
split=split,
downsample=args.downsample_train,
is_stack=(split == False),
ndc_ray=args.ndc_ray,
max_t=args.num_frames,
hold_every=args.hold_every,
num_rays=args.batch_size,
hold_every_frame=hold_every_frame,
psudo_length=psudo_length
)
return dataset
def reconstruction(args):
train_dataset = get_dataset(args, 'train')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis_train', exist_ok=True)
os.makedirs(f'{logfolder}/imgs_rgba', exist_ok=True)
os.makedirs(f'{logfolder}/rgba', exist_ok=True)
gfile_stream = open(os.path.join(logfolder, 'stdout.txt'), 'w')
set_logger(gfile_stream)
printlog('Start Training')
summary_writer = SummaryWriter(logfolder)
with open(os.path.join(logfolder, "config.yml"), "w") as f:
yaml2.dump(vars(args), f)
# init parameters
# tensorVM, renderer = init_parameters(args, train_dataset.scene_bbox.to(device), reso_list[0])
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb)
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device':device})
tensorf = eval(args.model_name)(**kwargs)
tensorf.load(ckpt)
else:
# Pure: Dynamic Ordered dict for easily design a model without conflict
kwargs = OrderedDict([
("aabb", aabb),
("gridSize", reso_cur),
("device", device),
("density_n_comp", n_lamb_sigma),
("appearance_n_comp", n_lamb_sh),
("app_dim", args.data_dim_color),
("near_far", near_far),
("shadingMode", args.shadingMode),
("alphaMask_thres", args.alpha_mask_thre),
("density_shift", args.density_shift),
("distance_scale", args.distance_scale),
("pos_pe",args.pos_pe),
("view_pe",args.view_pe),
("fea_pe", args.fea_pe),
("featureC", args.featureC),
("step_ratio", args.step_ratio),
("fea2denseAct", args.fea2denseAct)
])
if args.num_frames > 1 or args.model_name == 'TensoRFVideo': #only some model support max_t, so we pass max_t if num_frames provide
kwargs["max_t"] = args.num_frames
kwargs["t_keyframe"] = args.t_keyframe
kwargs["upsamp_list"] = args.upsamp_list
if args.model_name == 'TensoRF5dSigma':
kwargs['train_dataset'] = train_dataset
tensorf = eval(args.model_name)(**kwargs)
if args.model_name in ['TensorSph']:
tensorf.set_origin(train_dataset.origin,train_dataset.sph_box,train_dataset.sph_frontback)
grad_vars = tensorf.get_optparam_groups(args.lr_init, args.lr_basis)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9,0.99))
scaler = GradScaler()
training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args=args, hierarchy_type='coarse') #key frame training
training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args=args, hierarchy_type='fine') #all frame trainign
tensorf.save(f'{logfolder}/{args.expname}.th')
if args.render_train:
os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
train_dataset = get_dataset(args, 'train')
train_dataset.is_sampling = False
PSNRs_test = evaluation_lazy(train_dataset,tensorf, args, renderer, f'{logfolder}/imgs_train_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
test_dataset = get_dataset(args, 'test')
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_dynerf:
test_dataset = get_dataset(args, 'test', hold_every_frame=10)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_dynerf/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_dynerf psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test_dynerf/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_firstframe:
test_dataset = get_dataset(args, 'test', hold_every_frame=args.num_frames)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_firstframe/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_firstframe psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test_dynerf/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_path:
c2ws = test_dataset.render_path
print('========>',c2ws.shape)
os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
evaluation_path_lazy(test_dataset,tensorf_for_renderer, c2ws, renderer, f'{logfolder}/imgs_path_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
def training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args, hierarchy_type='coarse'):
test_dataset = get_dataset(args, 'test')
train_viz_dataset = get_dataset(args, 'train')
train_viz_dataset.is_sampling = False
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
n_iters = args.keyframe_iters if hierarchy_type=='coarse' else args.n_iters
hold_every_frame = 1# args.t_keyframe if hierarchy_type=='coarse' else 1
train_dataset = get_dataset(args, 'train', hold_every_frame= hold_every_frame, psudo_length=n_iters)
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=int(os.cpu_count() * args.dataloader_thread_ratio))
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
if hierarchy_type=='coarse' or args.keyframe_iters < 0:
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio**(1/args.lr_decay_iters)
lr_decay_iters = args.lr_decay_iters
else:
lr_decay_iters = n_iters
lr_factor = args.lr_decay_target_ratio**(1/n_iters)
printlog(f"lr decay {args.lr_decay_target_ratio} {lr_decay_iters}")
else:
printlog(f"continue tuning without decay")
# continue training without further more deacy in fine step
lr_factor = 1.0
TV_weight_density *= args.lr_decay_target_ratio
TV_weight_app *= args.lr_decay_target_ratio
reso_mask = None
#linear in logrithmic space, note that we can upsampling only coarse
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
N_voxel_list = (torch.round(torch.exp(torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list)+1))).long()).tolist()[1:]
ndc_ray = args.ndc_ray
torch.cuda.empty_cache()
PSNRs,PSNRs_test = [],[0]
if not args.ndc_ray:
raise NotImplementError('haven\'t implement filter ray to support non-ndc mode yet')
allrays, allrgbs = train_dataset.all_rays, train_dataset.all_rgbs
allrays, allrgbs = tensorf.filtering_rays(allrays, allrgbs, bbox_only=True)
Ortho_reg_weight = args.Ortho_weight
L1_reg_weight = args.L1_weight_inital
tvreg = TVLoss()
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init if (hierarchy_type=='coarse' or args.keyframe_iters < 0) else args.N_voxel_final, aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur,args.step_ratio))
if hierarchy_type == 'coarse':
print("==== | |
<filename>src/v5.1/resources/swagger_client/models/ed_fi_staff_address.py
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class EdFiStaffAddress(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address_type_descriptor': 'str',
'state_abbreviation_descriptor': 'str',
'city': 'str',
'postal_code': 'str',
'street_number_name': 'str',
'locale_descriptor': 'str',
'apartment_room_suite_number': 'str',
'building_site_number': 'str',
'congressional_district': 'str',
'county_fips_code': 'str',
'do_not_publish_indicator': 'bool',
'latitude': 'str',
'longitude': 'str',
'name_of_county': 'str',
'periods': 'list[EdFiStaffAddressPeriod]'
}
attribute_map = {
'address_type_descriptor': 'addressTypeDescriptor',
'state_abbreviation_descriptor': 'stateAbbreviationDescriptor',
'city': 'city',
'postal_code': 'postalCode',
'street_number_name': 'streetNumberName',
'locale_descriptor': 'localeDescriptor',
'apartment_room_suite_number': 'apartmentRoomSuiteNumber',
'building_site_number': 'buildingSiteNumber',
'congressional_district': 'congressionalDistrict',
'county_fips_code': 'countyFIPSCode',
'do_not_publish_indicator': 'doNotPublishIndicator',
'latitude': 'latitude',
'longitude': 'longitude',
'name_of_county': 'nameOfCounty',
'periods': 'periods'
}
def __init__(self, address_type_descriptor=None, state_abbreviation_descriptor=None, city=None, postal_code=None, street_number_name=None, locale_descriptor=None, apartment_room_suite_number=None, building_site_number=None, congressional_district=None, county_fips_code=None, do_not_publish_indicator=None, latitude=None, longitude=None, name_of_county=None, periods=None, _configuration=None): # noqa: E501
"""EdFiStaffAddress - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._address_type_descriptor = None
self._state_abbreviation_descriptor = None
self._city = None
self._postal_code = None
self._street_number_name = None
self._locale_descriptor = None
self._apartment_room_suite_number = None
self._building_site_number = None
self._congressional_district = None
self._county_fips_code = None
self._do_not_publish_indicator = None
self._latitude = None
self._longitude = None
self._name_of_county = None
self._periods = None
self.discriminator = None
self.address_type_descriptor = address_type_descriptor
self.state_abbreviation_descriptor = state_abbreviation_descriptor
self.city = city
self.postal_code = postal_code
self.street_number_name = street_number_name
if locale_descriptor is not None:
self.locale_descriptor = locale_descriptor
if apartment_room_suite_number is not None:
self.apartment_room_suite_number = apartment_room_suite_number
if building_site_number is not None:
self.building_site_number = building_site_number
if congressional_district is not None:
self.congressional_district = congressional_district
if county_fips_code is not None:
self.county_fips_code = county_fips_code
if do_not_publish_indicator is not None:
self.do_not_publish_indicator = do_not_publish_indicator
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if name_of_county is not None:
self.name_of_county = name_of_county
if periods is not None:
self.periods = periods
@property
def address_type_descriptor(self):
"""Gets the address_type_descriptor of this EdFiStaffAddress. # noqa: E501
The type of address listed for an individual or organization. For example: Physical Address, Mailing Address, Home Address, etc.) # noqa: E501
:return: The address_type_descriptor of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._address_type_descriptor
@address_type_descriptor.setter
def address_type_descriptor(self, address_type_descriptor):
"""Sets the address_type_descriptor of this EdFiStaffAddress.
The type of address listed for an individual or organization. For example: Physical Address, Mailing Address, Home Address, etc.) # noqa: E501
:param address_type_descriptor: The address_type_descriptor of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and address_type_descriptor is None:
raise ValueError("Invalid value for `address_type_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
address_type_descriptor is not None and len(address_type_descriptor) > 306):
raise ValueError("Invalid value for `address_type_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._address_type_descriptor = address_type_descriptor
@property
def state_abbreviation_descriptor(self):
"""Gets the state_abbreviation_descriptor of this EdFiStaffAddress. # noqa: E501
The abbreviation for the state (within the United States) or outlying area in which an address is located. # noqa: E501
:return: The state_abbreviation_descriptor of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._state_abbreviation_descriptor
@state_abbreviation_descriptor.setter
def state_abbreviation_descriptor(self, state_abbreviation_descriptor):
"""Sets the state_abbreviation_descriptor of this EdFiStaffAddress.
The abbreviation for the state (within the United States) or outlying area in which an address is located. # noqa: E501
:param state_abbreviation_descriptor: The state_abbreviation_descriptor of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and state_abbreviation_descriptor is None:
raise ValueError("Invalid value for `state_abbreviation_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
state_abbreviation_descriptor is not None and len(state_abbreviation_descriptor) > 306):
raise ValueError("Invalid value for `state_abbreviation_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._state_abbreviation_descriptor = state_abbreviation_descriptor
@property
def city(self):
"""Gets the city of this EdFiStaffAddress. # noqa: E501
The name of the city in which an address is located. # noqa: E501
:return: The city of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this EdFiStaffAddress.
The name of the city in which an address is located. # noqa: E501
:param city: The city of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and city is None:
raise ValueError("Invalid value for `city`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
city is not None and len(city) > 30):
raise ValueError("Invalid value for `city`, length must be less than or equal to `30`") # noqa: E501
self._city = city
@property
def postal_code(self):
"""Gets the postal_code of this EdFiStaffAddress. # noqa: E501
The five or nine digit zip code or overseas postal code portion of an address. # noqa: E501
:return: The postal_code of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""Sets the postal_code of this EdFiStaffAddress.
The five or nine digit zip code or overseas postal code portion of an address. # noqa: E501
:param postal_code: The postal_code of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and postal_code is None:
raise ValueError("Invalid value for `postal_code`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
postal_code is not None and len(postal_code) > 17):
raise ValueError("Invalid value for `postal_code`, length must be less than or equal to `17`") # noqa: E501
self._postal_code = postal_code
@property
def street_number_name(self):
"""Gets the street_number_name of this EdFiStaffAddress. # noqa: E501
The street number and street name or post office box number of an address. # noqa: E501
:return: The street_number_name of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._street_number_name
@street_number_name.setter
def street_number_name(self, street_number_name):
"""Sets the street_number_name of this EdFiStaffAddress.
The street number and street name or post office box number of an address. # noqa: E501
:param street_number_name: The street_number_name of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and street_number_name is None:
raise ValueError("Invalid value for `street_number_name`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
street_number_name is not None and len(street_number_name) > 150):
raise ValueError("Invalid value for `street_number_name`, length must be less than or equal to `150`") # noqa: E501
self._street_number_name = street_number_name
@property
def locale_descriptor(self):
"""Gets the locale_descriptor of this EdFiStaffAddress. # noqa: E501
A general geographic indicator that categorizes U.S. territory (e.g., City, Suburban). # noqa: E501
:return: The locale_descriptor of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._locale_descriptor
@locale_descriptor.setter
def locale_descriptor(self, locale_descriptor):
"""Sets the locale_descriptor of this EdFiStaffAddress.
A general geographic indicator that categorizes U.S. territory (e.g., City, Suburban). # noqa: E501
:param locale_descriptor: The locale_descriptor of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
locale_descriptor is not None and len(locale_descriptor) > 306):
raise ValueError("Invalid value for `locale_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._locale_descriptor = locale_descriptor
@property
def apartment_room_suite_number(self):
"""Gets the apartment_room_suite_number of this EdFiStaffAddress. # noqa: E501
The apartment, room, or suite number of an address. # noqa: E501
:return: The apartment_room_suite_number of this EdFiStaffAddress. # noqa: E501
:rtype: str
"""
return self._apartment_room_suite_number
@apartment_room_suite_number.setter
def apartment_room_suite_number(self, apartment_room_suite_number):
"""Sets the apartment_room_suite_number of this EdFiStaffAddress.
The apartment, room, or suite number of an address. # noqa: E501
:param apartment_room_suite_number: The apartment_room_suite_number of this EdFiStaffAddress. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
apartment_room_suite_number is not None and len(apartment_room_suite_number) > 50):
raise ValueError("Invalid value for `apartment_room_suite_number`, length must | |
<gh_stars>1-10
#!/usr/bin/env python
from CMS import CMS
import sys
import os
# Initiate the CMS Class
manager = CMS(hostname="localhost", username="test", password="<PASSWORD>", db="CMS")
# Misc strings
tchar = ": "
working = tchar + "Working..."
# Checks if user input is an in-program command
def verify_input(cmd):
new_cmd = str(cmd).lower()
if new_cmd == "exit":
print tchar + "Exiting CMS..."
sys.exit()
os.system("clear")
elif new_cmd == "help" or new_cmd == "?":
print tchar + "\n" + tchar + "clear :: Clear the screen\n" + tchar + "exit :: Exit the program\n" + tchar + "help or ?:: Display this help text"
print tchar + "get <arg>\n" + tchar + "\tall :: Display all entries\n" + tchar + "\tpost :: Display entry that matches specified Selection Query"
print tchar + "new <arg>\n" + tchar + "\tpost :: Create a new entry"
print tchar + "update <arg>\n" + tchar + "\tpost :: Update specified feilds of an entry"
print tchar + "delete <arg>\n" + tchar + "\tpost :: Delete an entry"
print tchar
main()
elif new_cmd == "clear":
os.system("clear")
main()
elif new_cmd == "get":
print tchar + "Get what?\n" + tchar + "\tget <arg>\n" + tchar + "\t\tall :: Display all entries\n" + tchar + "\t\tpost :: Display entry that matches specified Selection Query"
main()
elif new_cmd == "new":
print tchar + "New what?\n" + tchar + "\tnew <arg>\n" + tchar + "\t\tpost :: Create a new entry"
main()
elif new_cmd == "update":
print tchar + "Update what?\n" + tchar + "\tupdate <arg>\n" + tchar + "\t\tpost :: Update specified feilds of an entry"
main()
elif new_cmd == "delete":
print tchar + "Delete what?\n" + tchar + "\tdelete <arg>\n" + tchar + "\t\tpost :: Deletes an entry"
main()
else:
if new_cmd not in commands:
default()
else:
commands[new_cmd]()
# Interactively gets a post using CMS.get_post_by_*(), then prints formatted results
def get_post():
print tchar + "Select by pid, title, or author?"
method = str(raw_input(tchar)).lower()
if method == "pid":
print tchar + "Enter Post ID (pid)"
pid = raw_input(tchar)
try:
pid = int(pid)
except ValueError:
print tchar+ "Value entered was not a number."
get_post()
print working
post = manager.get_post_by_id(pid)
print organize_post_data(post)
main()
elif method == "title":
print tchar + "Enter Post Title (title)"
title = str(raw_input(tchar))
print working
posts = manager.get_posts_by_title(title)
print organize_post_data(posts)
main()
elif method == "author":
print tchar + "Enter Post Author (author)"
author = str(raw_input(tchar))
print working
posts = manager.get_posts_by_author(author)
print organize_post_data(posts)
main()
elif method != "pid" and method != "title" and method != "author":
print "Invalid Selection Method."
get_post()
# Prints all post entries in the Post table with JSON like formatting
def get_all():
count = manager.get_entry_count()
if count == 1:
print tchar + "There is " + str(count) + " total entry.\n" + tchar + "Are you sure you want to list them all? (y/n)"
else:
print tchar + "There are " + str(count) + " total entries\n" + tchar + "Are you sure you want to list them all? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print working
print organize_post_data(manager.get_all_posts())
main()
elif choice == "n":
print tchar + "Okay, exiting \"get all\" command."
main()
else:
print tchar + "There was an error... Exiting \"get all\" command."
main()
# Interactively creates a new post using CMS.new_post()
def new_post():
print tchar + "Your are about to create a new post! A series of prompts are going to ask you to enter some information.\n Continue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print tchar + "Enter the title of this Post"
title = raw_input(tchar + "\ttitle: ")
print tchar + "Enter author of this Post"
author = raw_input(tchar + "\tauthor: ")
print tchar + "Enter path to markdown file"
path = raw_input(tchar + "\tpath to md: ")
f = open(path, 'r')
content = f.read()
print tchar + "You are about to create the post " + title + ". \n Continue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print working
if manager.new_post(title, author, content):
f.close()
print tchar + "New Post created. To view it, use \"get post\" with pid: " + str(manager.get_entry_count())
main()
else:
print tchar + "Failed to create new post."
main()
elif choice == "n":
print tchar + "Okay, exiting \"new post\" command."
else:
print tchar + "There was an error... Exiting \"new post\" command."
elif choice == "n":
print tchar + "Okay, exiting \"new post\" command."
main()
else:
print tchar + "There was an error... Exiting \"new post\" command."
main()
# Interactively updates specified values of a post using CMS.update_post_*()
def update_post():
print tchar + "You're about to update a post! A series of prompts will ask you for update information.\n Continue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print tchar + "Enter the Post ID (pid) of the post to update"
pid = raw_input(tchar)
print tchar + "What attribute do you want to update: title, author, or content?"
attr = raw_input(tchar).lower()
if attr == "title":
print tchar + "Enter the new title for Post with pid: " + str(pid)
title = raw_input(tchar + "\ttitle: ")
print tchar + "You are about to update Post with pid: " + str(pid) + " with the new title:\"" + title + "\". \nContinue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print working
if manager.update_post_title(pid, title):
print tchar + "Updated post. Use \"get post\" with pid: " + str(pid) + " to view changes."
main()
else:
print tchar + "Failed to update post."
elif choice == "n":
print tchar + "Okay, exiting \"update post\" command."
main()
else:
print tchar + "There was an error... Exiting \"update post\" command."
main()
elif attr == "author":
print tchar + "Enter the new author for Post with pid: " + str(pid)
author = raw_input(tchar + "\tauthor: ")
print tchar + "You are about to update Post with pid: " + str(pid) + " with the new author:\"" + author + "\". \nContinue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print working
if manager.update_post_author(pid, author):
print tchar + "Updated post. Use \"get post\" with pid: " + str(pid) + " to view changes."
main()
else:
print tchar + "Failed to update post."
elif choice == "n":
print tchar + "Okay, exiting \"update post\" command."
main()
else:
print tchar + "There was an error... Exiting \"update post\" command."
main()
elif attr == "content":
print tchar + "Enter the path to the markdown file containing the new post content for post with pid: " + str(pid)
path = raw_input(tchar + "\tpath to md: ")
f = open(path, 'r')
content = f.read()
print tchar + "You are about to update Post with pid: " + str(pid) + " with the new content.\nContinue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print working
if manager.update_post_content(pid, content):
f.close()
print tchar + "Post content updated. Use \"get post\" with pid: " + str(pid) + " to view changes."
main()
else:
print thcar + "Failed to update content."
main()
elif choice == "n":
print tchar + "Okay, exiting \"update post\" command."
main()
else:
print tchar + "There was an error... Exiting \"update post\" command."
main()
elif attr != "title" or attr != "author" or attr != "content":
print tchar + "Invalid attribute."
update_post()
elif choice == "n":
print tchar + "Okay, exiting \"update post\" command."
main()
else:
print tchar + "There was an error... Exiting \"update post\" command."
main()
# Interactively removes a specified post entry
def delete_post():
print tchar + "You are about to delete a post! This action can not be reversed. \nContinue? (y/n)"
choice = raw_input(tchar).lower()
if choice == "y":
print tchar + "Enter Post ID (pid) of post to delete"
pid = raw_input(tchar)
print tchar + "Are you sure you want to delete Post with pid:\"" + str(pid) + "\"? (y/n)"
choice = raw_input(tchar)
if choice == "y":
if manager.remove_post(pid):
print tchar + "Post with pid:\"" + str(pid) + "\" deleted."
main()
else:
print tchar + "Failed | |
<filename>telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import datetime
import hashlib
import logging
import os
import os.path
import random
import re
import shutil
import signal
import subprocess as subprocess
import sys
import tempfile
import py_utils
from py_utils import cloud_storage
from py_utils import exc_util
from telemetry.core import exceptions
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.backends.chrome import minidump_finder
from telemetry.internal.backends.chrome import desktop_minidump_symbolizer
from telemetry.internal.util import format_for_logging
DEVTOOLS_ACTIVE_PORT_FILE = 'DevToolsActivePort'
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, desktop_platform_backend, browser_options,
browser_directory, profile_directory,
executable, flash_path, is_content_shell,
build_dir=None):
super(DesktopBrowserBackend, self).__init__(
desktop_platform_backend,
browser_options=browser_options,
browser_directory=browser_directory,
profile_directory=profile_directory,
supports_extensions=not is_content_shell,
supports_tab_control=not is_content_shell,
build_dir=build_dir)
self._executable = executable
self._flash_path = flash_path
self._is_content_shell = is_content_shell
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_output_file = None
# pylint: disable=invalid-name
self._minidump_path_crashpad_retrieval = {}
# pylint: enable=invalid-name
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
if self._flash_path and not os.path.exists(self._flash_path):
raise RuntimeError('Flash path does not exist: %s' % self._flash_path)
if self.is_logging_enabled:
self._log_file_path = os.path.join(tempfile.mkdtemp(), 'chrome.log')
else:
self._log_file_path = None
@property
def is_logging_enabled(self):
return self.browser_options.logging_verbosity in [
self.browser_options.NON_VERBOSE_LOGGING,
self.browser_options.VERBOSE_LOGGING,
self.browser_options.SUPER_VERBOSE_LOGGING]
@property
def log_file_path(self):
return self._log_file_path
@property
def supports_uploading_logs(self):
return (self.browser_options.logs_cloud_bucket and self.log_file_path and
os.path.isfile(self.log_file_path))
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, DEVTOOLS_ACTIVE_PORT_FILE)
def _FindDevToolsPortAndTarget(self):
devtools_file_path = self._GetDevToolsActivePortPath()
if not os.path.isfile(devtools_file_path):
raise EnvironmentError('DevTools file doest not exist yet')
# Attempt to avoid reading the file until it's populated.
# Both stat and open may raise IOError if not ready, the caller will retry.
lines = None
if os.stat(devtools_file_path).st_size > 0:
with open(devtools_file_path) as f:
lines = [line.rstrip() for line in f]
if not lines:
raise EnvironmentError('DevTools file empty')
devtools_port = int(lines[0])
browser_target = lines[1] if len(lines) >= 2 else None
return devtools_port, browser_target
def Start(self, startup_args):
assert not self._proc, 'Must call Close() before Start()'
self._dump_finder = minidump_finder.MinidumpFinder(
self.browser.platform.GetOSName(), self.browser.platform.GetArchName())
# macOS displays a blocking crash resume dialog that we need to suppress.
if self.browser.platform.GetOSName() == 'mac':
# Default write expects either the application name or the
# path to the application. self._executable has the path to the app
# with a few other bits tagged on after .app. Thus, we shorten the path
# to end with .app. If this is ineffective on your mac, please delete
# the saved state of the browser you are testing on here:
# /Users/.../Library/Saved\ Application State/...
# http://stackoverflow.com/questions/20226802
dialog_path = re.sub(r'\.app\/.*', '.app', self._executable)
subprocess.check_call([
'defaults', 'write', '-app', dialog_path, 'NSQuitAlwaysKeepsWindows',
'-bool', 'false'
])
cmd = [self._executable]
if self.browser.platform.GetOSName() == 'mac':
cmd.append('--use-mock-keychain') # crbug.com/865247
cmd.extend(startup_args)
cmd.append('about:blank')
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
if self.is_logging_enabled:
sys.stderr.write(
'Chrome log file will be saved in %s\n' % self.log_file_path)
env['CHROME_LOG_FILE'] = self.log_file_path
# Make sure we have predictable language settings that don't differ from the
# recording.
for name in ('LC_ALL', 'LC_MESSAGES', 'LANG'):
encoding = 'en_US.UTF-8'
if env.get(name, encoding) != encoding:
logging.warn('Overriding env[%s]=="%s" with default value "%s"',
name, env[name], encoding)
env[name] = 'en_US.UTF-8'
self.LogStartCommand(cmd, env)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
cmd, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(cmd, env=env)
self.BindDevToolsClient()
# browser is foregrounded by default on Windows and Linux, but not Mac.
if self.browser.platform.GetOSName() == 'mac':
subprocess.Popen([
'osascript', '-e',
('tell application "%s" to activate' % self._executable)
])
if self._supports_extensions:
self._WaitForExtensionsToLoad()
def LogStartCommand(self, command, env):
"""Log the command used to start Chrome.
In order to keep the length of logs down (see crbug.com/943650),
we sometimes trim the start command depending on browser_options.
The command may change between runs, but usually in innocuous ways like
--user-data-dir changes to a new temporary directory. Some benchmarks
do use different startup arguments for different stories, but this is
discouraged. This method could be changed to print arguments that are
different since the last run if need be.
"""
formatted_command = format_for_logging.ShellFormat(
command, trim=self.browser_options.trim_logs)
logging.info('Starting Chrome: %s\n', formatted_command)
if not self.browser_options.trim_logs:
logging.info('Chrome Env: %s', env)
def BindDevToolsClient(self):
# In addition to the work performed by the base class, quickly check if
# the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
'Return code: %d' % self._proc.returncode)
super(DesktopBrowserBackend, self).BindDevToolsClient()
def GetPid(self):
if self._proc:
return self._proc.pid
return None
def IsBrowserRunning(self):
return self._proc and self._proc.poll() is None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print("Can't get standard output with --show-stdout", file=sys.stderr)
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _IsExecutableStripped(self):
if self.browser.platform.GetOSName() == 'mac':
try:
symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
except subprocess.CalledProcessError as err:
logging.warning(
'Error when checking whether executable is stripped: %s',
err.output)
# Just assume that binary is stripped to skip breakpad symbol generation
# if this check failed.
return True
num_symbols = len(symbols.splitlines())
# We assume that if there are more than 10 symbols the executable is not
# stripped.
return num_symbols < 10
else:
return False
def _GetStackFromMinidump(self, minidump):
# Create an executable-specific directory if necessary to store symbols
# for re-use. We purposefully don't clean this up so that future
# tests can continue to use the same symbols that are unique to the
# executable.
symbols_dir = self._CreateExecutableUniqueDirectory('chrome_symbols_')
dump_symbolizer = desktop_minidump_symbolizer.DesktopMinidumpSymbolizer(
self.browser.platform.GetOSName(),
self.browser.platform.GetArchName(),
self._dump_finder, self.build_dir, symbols_dir=symbols_dir)
return dump_symbolizer.SymbolizeMinidump(minidump)
def _CreateExecutableUniqueDirectory(self, prefix):
"""Creates a semi-permanent directory unique to the browser executable.
This directory will persist between different tests, and potentially
be available between different test suites, but is liable to be cleaned
up by the OS at any point outside of a test suite's run.
Args:
prefix: A string to include before the unique identifier in the
directory name.
Returns:
A string containing an absolute path to the created directory.
"""
hashfunc = hashlib.sha1()
with open(self._executable, 'rb') as infile:
hashfunc.update(infile.read())
symbols_dirname = prefix + hashfunc.hexdigest()
# We can't use mkdtemp() directly since that will result in the directory
# being different, and thus not shared. So, create an unused directory
# and use the same parent directory.
unused_dir = tempfile.mkdtemp().rstrip(os.path.sep)
symbols_dir = os.path.join(os.path.dirname(unused_dir), symbols_dirname)
if not os.path.exists(symbols_dir) or not os.path.isdir(symbols_dir):
os.makedirs(symbols_dir)
shutil.rmtree(unused_dir)
return symbols_dir
def _UploadMinidumpToCloudStorage(self, minidump_path):
""" Upload minidump_path to cloud storage and return the cloud storage url.
"""
remote_path = ('minidump-%s-%i.dmp' %
(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, 1000000)))
try:
return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT, remote_path,
minidump_path)
except cloud_storage.CloudStorageError as err:
logging.error('Cloud storage error while trying to upload dump: %s',
repr(err))
return '<Missing link>'
def SymbolizeMinidump(self, minidump_path):
return self._InternalSymbolizeMinidump(minidump_path)
def _InternalSymbolizeMinidump(self, minidump_path):
cloud_storage_link = self._UploadMinidumpToCloudStorage(minidump_path)
stack = self._GetStackFromMinidump(minidump_path)
if not stack:
error_message = ('Failed to symbolize minidump. Raw stack is uploaded to'
' cloud storage: %s.' % cloud_storage_link)
return (False, error_message)
self._symbolized_minidump_paths.add(minidump_path)
return (True, stack)
def __del__(self):
self.Close()
def _TryCooperativeShutdown(self):
if self.browser.platform.IsCooperativeShutdownSupported():
# Ideally there would be a portable, cooperative shutdown
# mechanism for the browser. This seems difficult to do
# correctly for all embedders of the content API. The only known
# problem with unclean shutdown of the browser process is on
# Windows, where suspended child processes frequently leak. For
# now, just solve this particular problem. See Issue 424024.
if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"):
try:
# Use a long timeout to handle slow Windows debug
# (see crbug.com/815004)
# Allow specifying a custom shutdown timeout via the
# 'CHROME_SHUTDOWN_TIMEOUT' environment variable.
# TODO(sebmarchand): Remove this now that there's an option to shut
# down Chrome via Devtools.
py_utils.WaitFor(lambda: not self.IsBrowserRunning(),
timeout=int(os.getenv('CHROME_SHUTDOWN_TIMEOUT', 15))
)
logging.info('Successfully shut down browser cooperatively')
except py_utils.TimeoutException as e:
logging.warning('Failed to cooperatively shutdown. ' +
'Proceeding to terminate: ' + str(e))
def Background(self):
raise NotImplementedError
@exc_util.BestEffort
def Close(self):
super(DesktopBrowserBackend, self).Close()
# First, try to cooperatively shutdown.
if self.IsBrowserRunning():
self._TryCooperativeShutdown()
# Second, try to politely shutdown with SIGINT. Use SIGINT instead of
# SIGTERM (or terminate()) here since the browser treats SIGTERM as a more
# urgent shutdown signal and | |
<gh_stars>10-100
import os
import time
from datetime import datetime
from pandac.PandaModules import *
from direct.distributed.MsgTypes import *
from direct.gui.DirectGui import *
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.otpgui import OTPDialog
from otp.otpbase import OTPLocalizer
from otp.otpbase import OTPGlobals
from otp.uberdog.AccountDetailRecord import AccountDetailRecord, SubDetailRecord
import TTAccount
import GuiScreen
class LoginScreen(StateData.StateData, GuiScreen.GuiScreen):
AutoLoginName = base.config.GetString('%s-auto-login%s' % (game.name, os.getenv('otp_client', '')), '')
AutoLoginPassword = base.config.GetString('%s-auto-password%s' % (game.name, os.getenv('otp_client', '')), '')
notify = DirectNotifyGlobal.directNotify.newCategory('LoginScreen')
ActiveEntryColor = Vec4(1, 1, 1, 1)
InactiveEntryColor = Vec4(0.8, 0.8, 0.8, 1)
def __init__(self, cr, doneEvent):
self.notify.debug('__init__')
StateData.StateData.__init__(self, doneEvent)
GuiScreen.GuiScreen.__init__(self)
self.cr = cr
self.loginInterface = self.cr.loginInterface
self.userName = ''
self.password = ''
self.fsm = ClassicFSM.ClassicFSM('LoginScreen', [
State.State('off', self.enterOff, self.exitOff, [
'login', 'waitForLoginResponse']),
State.State('login', self.enterLogin, self.exitLogin, [
'waitForLoginResponse', 'login', 'showLoginFailDialog']),
State.State('showLoginFailDialog', self.enterShowLoginFailDialog, self.exitShowLoginFailDialog, [
'login', 'showLoginFailDialog']),
State.State('waitForLoginResponse', self.enterWaitForLoginResponse, self.exitWaitForLoginResponse, [
'login', 'showLoginFailDialog', 'showConnectionProblemDialog']),
State.State('showConnectionProblemDialog', self.enterShowConnectionProblemDialog, self.exitShowConnectionProblemDialog, [
'login'])], 'off', 'off')
self.fsm.enterInitialState()
def load(self):
self.notify.debug('load')
masterScale = 0.8
textScale = 0.1 * masterScale
entryScale = 0.08 * masterScale
lineHeight = 0.21 * masterScale
buttonScale = 1.15 * masterScale
buttonLineHeight = 0.14 * masterScale
self.frame = DirectFrame(parent=aspect2d, relief=None, sortOrder=20)
self.frame.hide()
linePos = -0.26
self.nameLabel = DirectLabel(parent=self.frame, relief=None, pos=(-0.21, 0, linePos), text=OTPLocalizer.LoginScreenUserName, text_scale=textScale, text_align=TextNode.ARight)
self.nameEntry = DirectEntry(parent=self.frame, relief=DGG.SUNKEN, borderWidth=(0.1,
0.1), scale=entryScale, pos=(-0.125, 0.0, linePos), width=OTPGlobals.maxLoginWidth, numLines=1, focus=0, cursorKeys=1)
linePos -= lineHeight
self.passwordLabel = DirectLabel(parent=self.frame, relief=None, pos=(-0.21, 0, linePos), text=OTPLocalizer.LoginScreenPassword, text_scale=textScale, text_align=TextNode.ARight)
self.passwordEntry = DirectEntry(parent=self.frame, relief=DGG.SUNKEN, borderWidth=(0.1,
0.1), scale=entryScale, pos=(-0.125, 0.0, linePos), width=OTPGlobals.maxLoginWidth, numLines=1, focus=0, cursorKeys=1, obscured=1, command=self.__handleLoginPassword)
linePos -= lineHeight
buttonImageScale = (1.7, 1.1, 1.1)
self.loginButton = DirectButton(parent=self.frame, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0, 0, linePos), scale=buttonScale, text=OTPLocalizer.LoginScreenLogin, text_scale=0.06, text_pos=(0, -0.02), command=self.__handleLoginButton)
linePos -= buttonLineHeight
self.createAccountButton = DirectButton(parent=self.frame, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0, 0, linePos), scale=buttonScale, text=OTPLocalizer.LoginScreenCreateAccount, text_scale=0.06, text_pos=(0, -0.02), command=self.__handleCreateAccount)
linePos -= buttonLineHeight
self.quitButton = DirectButton(parent=self.frame, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0, 0, linePos), scale=buttonScale, text=OTPLocalizer.LoginScreenQuit, text_scale=0.06, text_pos=(0, -0.02), command=self.__handleQuit)
linePos -= buttonLineHeight
self.dialogDoneEvent = 'loginDialogAck'
dialogClass = OTPGlobals.getGlobalDialogClass()
self.dialog = dialogClass(dialogName='loginDialog', doneEvent=self.dialogDoneEvent, message='', style=OTPDialog.Acknowledge, sortOrder=NO_FADE_SORT_INDEX + 100)
self.dialog.hide()
self.failDialog = DirectFrame(parent=aspect2dp, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0,
0.1,
0), text='', text_scale=0.08, text_pos=(0.0,
0.3), text_wordwrap=15, sortOrder=NO_FADE_SORT_INDEX)
linePos = -0.05
self.failTryAgainButton = DirectButton(parent=self.failDialog, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0, 0, linePos), scale=0.9, text=OTPLocalizer.LoginScreenTryAgain, text_scale=0.06, text_pos=(0,
-0.02), command=self.__handleFailTryAgain)
linePos -= buttonLineHeight
self.failCreateAccountButton = DirectButton(parent=self.failDialog, relief=DGG.RAISED, borderWidth=(0.01,
0.01), pos=(0, 0, linePos), scale=0.9, text=OTPLocalizer.LoginScreenCreateAccount, text_scale=0.06, text_pos=(0,
-0.02), command=self.__handleFailCreateAccount)
linePos -= buttonLineHeight
self.failDialog.hide()
self.connectionProblemDialogDoneEvent = 'loginConnectionProblemDlgAck'
dialogClass = OTPGlobals.getGlobalDialogClass()
self.connectionProblemDialog = dialogClass(dialogName='connectionProblemDialog', doneEvent=self.connectionProblemDialogDoneEvent, message='', style=OTPDialog.Acknowledge, sortOrder=NO_FADE_SORT_INDEX + 100)
self.connectionProblemDialog.hide()
return
def unload(self):
self.notify.debug('unload')
self.nameEntry.destroy()
self.passwordEntry.destroy()
self.failTryAgainButton.destroy()
self.failCreateAccountButton.destroy()
self.createAccountButton.destroy()
self.loginButton.destroy()
self.quitButton.destroy()
self.dialog.cleanup()
del self.dialog
self.failDialog.destroy()
del self.failDialog
self.connectionProblemDialog.cleanup()
del self.connectionProblemDialog
self.frame.destroy()
del self.fsm
del self.loginInterface
del self.cr
def enter(self):
if self.cr.blue:
self.userName = 'blue'
self.password = <PASSWORD>
self.fsm.request('waitForLoginResponse')
elif self.cr.playToken:
self.userName = '*'
self.password = self.cr.playToken
self.fsm.request('waitForLoginResponse')
elif hasattr(self.cr, 'DISLToken') and self.cr.DISLToken:
self.userName = '*'
self.password = self.cr.DISLToken
self.fsm.request('waitForLoginResponse')
elif self.AutoLoginName:
self.userName = self.AutoLoginName
self.password = self.AutoLoginPassword
self.fsm.request('waitForLoginResponse')
else:
self.fsm.request('login')
def exit(self):
self.frame.hide()
self.ignore(self.dialogDoneEvent)
self.fsm.requestFinalState()
def enterOff(self):
pass
def exitOff(self):
pass
def enterLogin(self):
self.cr.resetPeriodTimer(None)
self.userName = ''
self.password = ''
self.userName = launcher.getLastLogin()
if self.userName and self.nameEntry.get():
if self.userName != self.nameEntry.get():
self.userName = ''
self.frame.show()
self.nameEntry.enterText(self.userName)
self.passwordEntry.enterText(self.password)
self.focusList = [
self.nameEntry, self.passwordEntry]
focusIndex = 0
if self.userName:
focusIndex = 1
self.startFocusMgmt(startFocus=focusIndex)
return
def exitLogin(self):
self.stopFocusMgmt()
def enterShowLoginFailDialog(self, msg):
base.transitions.fadeScreen(0.5)
self.failDialog['text'] = msg
self.failDialog.show()
def __handleFailTryAgain(self):
self.fsm.request('login')
def __handleFailCreateAccount(self):
messenger.send(self.doneEvent, [{'mode': 'createAccount'}])
def __handleFailNoNewAccountsAck(self):
self.dialog.hide()
self.fsm.request('showLoginFailDialog', [self.failDialog['text']])
def exitShowLoginFailDialog(self):
base.transitions.noTransitions()
self.failDialog.hide()
def __handleLoginPassword(self, password):
if password != '':
if self.nameEntry.get() != '':
self.__handleLoginButton()
def __handleLoginButton(self):
self.removeFocus()
self.userName = self.nameEntry.get()
self.password = self.passwordEntry.get()
if self.userName == '':
self.dialog.setMessage(OTPLocalizer.LoginScreenLoginPrompt)
self.dialog.show()
self.acceptOnce(self.dialogDoneEvent, self.__handleEnterLoginAck)
else:
self.fsm.request('waitForLoginResponse')
def __handleQuit(self):
self.removeFocus()
messenger.send(self.doneEvent, [{'mode': 'quit'}])
def __handleCreateAccount(self):
self.removeFocus()
messenger.send(self.doneEvent, [{'mode': 'createAccount'}])
def enterWaitForLoginResponse(self):
self.cr.handler = self.handleWaitForLoginResponse
self.cr.userName = self.userName
self.cr.password = <PASSWORD>
try:
error = self.loginInterface.authorize(self.userName, self.password)
except TTAccount.TTAccountException, e:
self.fsm.request('showConnectionProblemDialog', [str(e)])
return
if error:
self.notify.info(error)
freeTimeExpired = self.loginInterface.getErrorCode() == 10
if freeTimeExpired:
self.cr.logAccountInfo()
messenger.send(self.doneEvent, [{'mode': 'freeTimeExpired'}])
else:
self.fsm.request('showLoginFailDialog', [error])
else:
self.loginInterface.sendLoginMsg()
self.waitForDatabaseTimeout(requestName='WaitForLoginResponse')
def exitWaitForLoginResponse(self):
self.cleanupWaitingForDatabase()
self.cr.handler = None
return
def enterShowConnectionProblemDialog(self, msg):
self.connectionProblemDialog.setMessage(msg)
self.connectionProblemDialog.show()
self.acceptOnce(self.connectionProblemDialogDoneEvent, self.__handleConnectionProblemAck)
def __handleConnectionProblemAck(self):
self.connectionProblemDialog.hide()
self.fsm.request('login')
def exitShowConnectionProblemDialog(self):
pass
def handleWaitForLoginResponse(self, msgType, di):
if msgType == CLIENT_LOGIN_2_RESP:
self.handleLoginResponseMsg2(di)
elif msgType == CLIENT_LOGIN_RESP:
self.handleLoginResponseMsg(di)
elif msgType == CLIENT_LOGIN_3_RESP:
self.handleLoginResponseMsg3(di)
elif msgType == CLIENT_LOGIN_TOONTOWN_RESP:
self.handleLoginToontownResponse(di)
else:
self.cr.handleMessageType(msgType, di)
def getExtendedErrorMsg(self, errorString):
prefix = 'Bad DC Version Compare'
if len(errorString) < len(prefix):
return errorString
if errorString[:len(prefix)] == prefix:
return '%s%s' % (errorString, ', address=%s' % base.cr.getServerAddress())
return errorString
def handleLoginResponseMsg3(self, di):
now = time.time()
returnCode = di.getInt8()
errorString = self.getExtendedErrorMsg(di.getString())
self.notify.info('Login response return code %s' % returnCode)
if returnCode != 0:
self.notify.info('Login failed: %s' % errorString)
messenger.send(self.doneEvent, [{'mode': 'reject'}])
return
accountDetailRecord = AccountDetailRecord()
accountDetailRecord.openChatEnabled = di.getString() == 'YES'
accountDetailRecord.createFriendsWithChat = di.getString() == 'YES'
chatCodeCreation = di.getString()
accountDetailRecord.chatCodeCreation = chatCodeCreation == 'YES'
parentControlledChat = chatCodeCreation == 'PARENT'
access = di.getString()
if access == 'VELVET':
access = OTPGlobals.AccessVelvetRope
else:
if access == 'FULL':
access = OTPGlobals.AccessFull
else:
self.notify.warning('Unknown access: %s' % access)
access = OTPGlobals.AccessUnknown
accountDetailRecord.piratesAccess = access
accountDetailRecord.familyAccountId = di.getInt32()
accountDetailRecord.playerAccountId = di.getInt32()
accountDetailRecord.playerName = di.getString()
accountDetailRecord.playerNameApproved = di.getInt8()
accountDetailRecord.maxAvatars = di.getInt32()
self.cr.openChatAllowed = accountDetailRecord.openChatEnabled
self.cr.secretChatAllowed = accountDetailRecord.chatCodeCreation or parentControlledChat
self.cr.setIsPaid(accountDetailRecord.piratesAccess)
self.userName = accountDetailRecord.playerName
self.cr.userName = accountDetailRecord.playerName
accountDetailRecord.numSubs = di.getUint16()
for i in range(accountDetailRecord.numSubs):
subDetailRecord = SubDetailRecord()
subDetailRecord.subId = di.getUint32()
subDetailRecord.subOwnerId = di.getUint32()
subDetailRecord.subName = di.getString()
subDetailRecord.subActive = di.getString()
access = di.getString()
if access == 'VELVET':
access = OTPGlobals.AccessVelvetRope
elif access == 'FULL':
access = OTPGlobals.AccessFull
else:
access = OTPGlobals.AccessUnknown
subDetailRecord.subAccess = access
subDetailRecord.subLevel = di.getUint8()
subDetailRecord.subNumAvatars = di.getUint8()
subDetailRecord.subNumConcur = di.getUint8()
subDetailRecord.subFounder = di.getString() == 'YES'
accountDetailRecord.subDetails[subDetailRecord.subId] = subDetailRecord
accountDetailRecord.WLChatEnabled = di.getString() == 'YES'
if accountDetailRecord.WLChatEnabled:
self.cr.whiteListChatEnabled = 1
self.cr.whiteListChatEnabled = 0
self.notify.info('End of DISL token parse')
self.notify.info('accountDetailRecord: %s' % accountDetailRecord)
self.cr.accountDetailRecord = accountDetailRecord
self.__handleLoginSuccess()
def handleLoginResponseMsg2(self, di):
self.notify.debug('handleLoginResponseMsg2')
if self.notify.getDebug():
dgram = di.getDatagram()
dgram.dumpHex(ostream)
now = time.time()
returnCode = di.getUint8()
errorString = self.getExtendedErrorMsg(di.getString())
self.userName = di.getString()
self.cr.userName = self.userName
accountDetailRecord = AccountDetailRecord()
self.cr.accountDetailRecord = accountDetailRecord
canChat = di.getUint8()
self.cr.secretChatAllowed = canChat
self.notify.info('Chat from game server login: %s' % canChat)
sec = di.getUint32()
usec = di.getUint32()
serverTime = sec + usec / 1000000.0
self.cr.serverTimeUponLogin = serverTime
self.cr.clientTimeUponLogin = now
self.cr.globalClockRealTimeUponLogin = globalClock.getRealTime()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.toontownTimeManager.updateLoginTimes(serverTime, now, self.cr.globalClockRealTimeUponLogin)
serverDelta = serverTime - now
self.cr.setServerDelta(serverDelta)
self.notify.setServerDelta(serverDelta, 28800)
self.isPaid = di.getUint8()
self.cr.setIsPaid(self.isPaid)
if self.isPaid:
launcher.setPaidUserLoggedIn()
self.notify.info('Paid from game server login: %s' % self.isPaid)
self.cr.resetPeriodTimer(None)
if di.getRemainingSize() >= 4:
minutesRemaining = di.getInt32()
self.notify.info('Minutes remaining from server %s' % minutesRemaining)
if minutesRemaining >= 0:
self.notify.info('Spawning period timer')
self.cr.resetPeriodTimer(minutesRemaining * 60)
elif self.isPaid:
self.notify.warning('Negative minutes remaining for paid user (?)')
else:
self.notify.warning('Not paid, but also negative minutes remaining (?)')
else:
self.notify.info('Minutes remaining not returned from server; not spawning period timer')
familyStr = di.getString()
WhiteListResponse = di.getString()
if WhiteListResponse == 'YES':
self.cr.whiteListChatEnabled = 1
else:
self.cr.whiteListChatEnabled = 0
if di.getRemainingSize() > 0:
self.cr.accountDays = self.parseAccountDays(di.getInt32())
else:
self.cr.accountDays = 100000
if di.getRemainingSize() > 0:
self.lastLoggedInStr = di.getString()
self.notify.info('last logged in = %s' % self.lastLoggedInStr)
else:
self.lastLoggedInStr = ''
self.cr.lastLoggedIn = datetime.now()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.lastLoggedIn = self.cr.toontownTimeManager.convertStrToToontownTime(self.lastLoggedInStr)
self.cr.withParentAccount = False
self.notify.info('Login response return code %s' % returnCode)
if returnCode == 0:
self.__handleLoginSuccess()
elif returnCode == -13:
self.notify.info('Period Time Expired')
self.fsm.request('showLoginFailDialog', [
OTPLocalizer.LoginScreenPeriodTimeExpired])
else:
self.notify.info('Login failed: %s' % errorString)
messenger.send(self.doneEvent, [{'mode': 'reject'}])
return
def handleLoginResponseMsg(self, di):
self.notify.debug('handleLoginResponseMsg1')
if self.notify.getDebug():
dgram = di.getDatagram()
dgram.dumpHex(ostream)
now = time.time()
accountDetailRecord = AccountDetailRecord()
self.cr.accountDetailRecord = accountDetailRecord
returnCode = di.getUint8()
accountCode = di.getUint32()
errorString = self.getExtendedErrorMsg(di.getString())
sec = di.getUint32()
usec = di.getUint32()
serverTime = sec + usec / 1000000.0
serverDelta = serverTime - now
self.cr.serverTimeUponLogin = serverTime
self.cr.clientTimeUponLogin = now
self.cr.globalClockRealTimeUponLogin = globalClock.getRealTime()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.toontownTimeManager.updateLoginTimes(serverTime, now, self.cr.globalClockRealTimeUponLogin)
self.cr.setServerDelta(serverDelta)
self.notify.setServerDelta(serverDelta, 28800)
if di.getRemainingSize() > 0:
self.cr.accountDays = self.parseAccountDays(di.getInt32())
else:
self.cr.accountDays = 100000
if di.getRemainingSize() > 0:
WhiteListResponse = di.getString()
else:
WhiteListResponse = 'NO'
if WhiteListResponse == 'YES':
self.cr.whiteListChatEnabled = 1
else:
self.cr.whiteListChatEnabled = 0
self.lastLoggedInStr = base.config.GetString('last-logged-in', '')
self.cr.lastLoggedIn = datetime.now()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.lastLoggedIn = self.cr.toontownTimeManager.convertStrToToontownTime(self.lastLoggedInStr)
self.cr.withParentAccount = base.config.GetBool('dev-with-parent-account', 0)
self.notify.info('Login response return code %s' % returnCode)
if returnCode == 0:
self.__handleLoginSuccess()
elif returnCode == 12:
self.notify.info('Bad password')
self.fsm.request('showLoginFailDialog', [
OTPLocalizer.LoginScreenBadPassword])
elif returnCode == 14:
self.notify.info('Bad word in user | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import shutil
import sys
import time
import traceback
from dataclasses import replace
from pathlib import Path
from typing import Iterable, List, Optional
import click
from . import (
buck,
command_arguments,
commands,
configuration as configuration_module,
filesystem,
log,
recently_used_configurations,
statistics as statistics_module,
)
from .commands import Command, ExitCode, v2
from .commands.analyze import MissingFlowsKind
from .exceptions import EnvironmentException
from .version import __version__
LOG: logging.Logger = logging.getLogger(__name__)
def _log_statistics(
command: Command,
start_time: float,
client_exception_message: str,
error_message: Optional[str],
exit_code: int,
should_log: bool = True,
) -> None:
configuration = command.configuration
if should_log and configuration and configuration.logger:
statistics_module.log_with_configuration(
category=statistics_module.LoggerCategory.USAGE,
configuration=configuration,
integers={
"exit_code": exit_code,
"runtime": int((time.time() - start_time) * 1000),
},
normals={
"project_root": configuration.project_root,
"root": configuration.relative_local_root,
"cwd": os.getcwd(),
"client_version": __version__,
"command": command.NAME,
"client_exception": client_exception_message,
"error_message": error_message,
},
)
def _show_pyre_version_as_text(
binary_version: Optional[str], client_version: str
) -> None:
if binary_version:
log.stdout.write(f"Binary version: {binary_version}\n")
log.stdout.write(f"Client version: {__version__}\n")
def _show_pyre_version_as_json(
binary_version: Optional[str], client_version: str
) -> None:
version_json = {
**({} if binary_version is None else {"binary": binary_version}),
"client": client_version,
}
log.stdout.write(f"{json.dumps(version_json)}\n")
def _show_pyre_version(arguments: command_arguments.CommandArguments) -> None:
binary_version: Optional[str] = None
client_version: str = __version__
try:
configuration = configuration_module.create_configuration(arguments, Path("."))
binary_version = configuration.get_binary_version()
except Exception:
pass
if arguments.output == command_arguments.JSON:
_show_pyre_version_as_json(binary_version, client_version)
else:
_show_pyre_version_as_text(binary_version, client_version)
def run_pyre_command(
command: Command,
configuration: configuration_module.Configuration,
noninteractive: bool,
) -> ExitCode:
start_time = time.time()
client_exception_message = ""
should_log_statistics = True
# Having this as a fails-by-default helps flag unexpected exit
# from exception flows.
exit_code = ExitCode.FAILURE
try:
configuration_module.check_nested_local_configuration(configuration)
log.start_logging_to_directory(noninteractive, configuration.log_directory)
LOG.debug(f"Running cli command `{' '.join(sys.argv)}`...")
exit_code = command.run().exit_code()
except (buck.BuckException, EnvironmentException) as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
if isinstance(error, buck.BuckException):
exit_code = ExitCode.BUCK_ERROR
except (
commands.ClientException,
configuration_module.InvalidConfiguration,
) as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except Exception:
client_exception_message = traceback.format_exc()
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
LOG.debug(traceback.format_exc())
exit_code = ExitCode.SUCCESS
finally:
if len(client_exception_message) > 0:
LOG.error(client_exception_message)
result = command.result()
error_message = result.error if result else None
command.cleanup()
_log_statistics(
command,
start_time,
client_exception_message,
error_message,
exit_code,
should_log_statistics,
)
return exit_code
def _run_check_command(arguments: command_arguments.CommandArguments) -> ExitCode:
configuration = _create_configuration_with_retry(arguments, Path("."))
return run_pyre_command(
commands.Check(
arguments, original_directory=os.getcwd(), configuration=configuration
),
configuration,
arguments.noninteractive,
)
def _run_incremental_command(
arguments: command_arguments.CommandArguments,
nonblocking: bool,
incremental_style: commands.IncrementalStyle,
no_start_server: bool,
no_watchman: bool,
) -> ExitCode:
configuration = _create_configuration_with_retry(arguments, Path("."))
if arguments.use_command_v2:
start_arguments = command_arguments.StartArguments(
changed_files_path=arguments.changed_files_path,
debug=arguments.debug,
load_initial_state_from=arguments.load_initial_state_from,
no_saved_state=arguments.no_saved_state,
no_watchman=no_watchman,
save_initial_state_to=arguments.save_initial_state_to,
saved_state_project=arguments.saved_state_project,
sequential=arguments.sequential,
show_error_traces=arguments.show_error_traces,
store_type_check_resolution=False,
terminal=False,
wait_on_initialization=True,
)
return v2.incremental.run(
configuration,
command_arguments.IncrementalArguments(
output=arguments.output,
no_start=no_start_server,
start_arguments=start_arguments,
),
)
else:
return run_pyre_command(
commands.Incremental(
arguments,
original_directory=os.getcwd(),
configuration=configuration,
nonblocking=nonblocking,
incremental_style=incremental_style,
no_start_server=no_start_server,
no_watchman=no_watchman,
),
configuration,
arguments.noninteractive,
)
def _run_default_command(arguments: command_arguments.CommandArguments) -> ExitCode:
if shutil.which("watchman"):
return _run_incremental_command(
arguments=arguments,
nonblocking=False,
incremental_style=commands.IncrementalStyle.FINE_GRAINED,
no_start_server=False,
no_watchman=False,
)
else:
watchman_link = "https://facebook.github.io/watchman/docs/install"
LOG.warning(
"No watchman binary found. \n"
"To enable pyre incremental, "
"you can install watchman: {}".format(watchman_link)
)
LOG.warning("Defaulting to non-incremental check.")
return _run_check_command(arguments)
def _create_configuration_with_retry(
arguments: command_arguments.CommandArguments, base_directory: Path
) -> configuration_module.Configuration:
configuration = configuration_module.create_configuration(arguments, base_directory)
if len(configuration.source_directories) > 0 or len(configuration.targets) > 0:
return configuration
# Heuristic: If neither `source_directories` nor `targets` is specified,
# and if there exists recently-used local configurations, we guess that
# the user may have forgotten to specifiy `-l`.
error_message = "No buck targets or source directories to analyze."
recently_used_local_roots = recently_used_configurations.Cache(
configuration.dot_pyre_directory
).get_all_items()
if len(recently_used_local_roots) == 0:
raise configuration_module.InvalidConfiguration(error_message)
LOG.warning(error_message)
local_root_for_rerun = recently_used_configurations.prompt_user_for_local_root(
recently_used_local_roots
)
if local_root_for_rerun is None:
raise configuration_module.InvalidConfiguration(
"Cannot determine which recent local root to rerun. "
)
LOG.warning(f"Running pyre under local root `{local_root_for_rerun}`...")
LOG.warning(
f"Hint: To avoid this prompt, run `pyre -l {local_root_for_rerun}` "
f"or `cd {local_root_for_rerun} && pyre`."
)
new_configuration = configuration_module.create_configuration(
replace(arguments, local_configuration=local_root_for_rerun), base_directory
)
if (
len(new_configuration.source_directories) > 0
or len(new_configuration.targets) > 0
):
return new_configuration
raise configuration_module.InvalidConfiguration(error_message)
@click.group(invoke_without_command=True)
@click.pass_context
@click.option(
"-l",
"--local-configuration",
type=str,
help="Specify a path where Pyre could find a local configuration.",
)
@click.option(
"--version",
is_flag=True,
default=False,
help="Print the client and binary versions of Pyre.",
)
@click.option("--debug/--no-debug", default=False, hidden=True)
@click.option(
"--sequential/--no-sequential",
default=None,
help="Run Pyre in single-threaded mode.",
)
@click.option(
"--strict/--no-strict",
default=None,
help="Check all file in strict mode by default.",
)
@click.option("--additional-check", type=str, multiple=True, hidden=True)
@click.option("--show-error-traces/--no-show-error-traces", default=False, hidden=True)
@click.option(
"--output",
type=click.Choice(
[command_arguments.TEXT, command_arguments.JSON], case_sensitive=False
),
default=command_arguments.TEXT,
help="How to format output.",
)
@click.option("--enable-profiling/--no-enable-profiling", default=False, hidden=True)
@click.option(
"--enable-memory-profiling/--no-enable-memory-profiling", default=False, hidden=True
)
@click.option(
"-n", "--noninteractive", is_flag=True, help="Disable interactive logging."
)
@click.option("--logging-sections", type=str, hidden=True)
@click.option("--log-identifier", type=str, default=None, hidden=True)
@click.option("--dot-pyre-directory", type=str, hidden=True)
@click.option("--logger", type=str, hidden=True)
@click.option("--formatter", type=str, hidden=True)
@click.option(
"--target",
type=str,
multiple=True,
help=(
"The buck target to check. "
"Can be specified multiple times to include multiple directories."
),
)
@click.option(
"--use-buck-builder/--use-legacy-buck-builder",
default=None,
help="Use Pyre's own Java builder for Buck projects.",
)
@click.option("--buck-mode", type=str, help="Mode to pass to `buck query`")
@click.option(
"--use-buck-source-database/--no-use-buck-source-database",
default=None,
hidden=True,
)
@click.option(
"--source-directory",
type=str,
multiple=True,
help=(
"The source directory to check. "
"Can be specified multiple times to include multiple directories."
),
)
@click.option("--filter-directory", type=str, hidden=True)
@click.option(
"--no-saved-state",
is_flag=True,
hidden=True,
help="Do not attempt loading Pyre from saved state.",
)
@click.option(
"--search-path",
type=str,
multiple=True,
help=(
"Additional directory of modules and stubs to include in the type environment. "
"Can be specified multiple times to include multiple directories."
),
)
@click.option(
"--binary", type=str, show_envvar=True, help="Override location of the Pyre binary."
)
@click.option(
"--buck-builder-binary",
type=str,
show_envvar=True,
help="Override location of the buck builder binary.",
)
@click.option("--exclude", type=str, multiple=True, hidden=True)
@click.option(
"--typeshed",
type=str,
show_envvar=True,
help="Override location of the typeshed stubs.",
)
@click.option("--save-initial-state-to", type=str, hidden=True)
@click.option("--load-initial-state-from", type=str, hidden=True)
@click.option("--changed-files-path", type=str, hidden=True)
@click.option("--saved-state-project", type=str, hidden=True)
@click.option("--features", type=str, hidden=True)
@click.option("--use-command-v2", is_flag=True, default=False, hidden=True)
@click.option("--isolation-prefix", type=str, hidden=True)
def pyre(
context: click.Context,
local_configuration: Optional[str],
version: bool,
debug: bool,
sequential: Optional[bool],
strict: Optional[bool],
additional_check: Iterable[str],
show_error_traces: bool,
output: str,
enable_profiling: bool,
enable_memory_profiling: bool,
noninteractive: bool,
logging_sections: Optional[str],
log_identifier: Optional[str],
dot_pyre_directory: Optional[str],
logger: Optional[str],
formatter: Optional[str],
target: Iterable[str],
use_buck_builder: Optional[bool],
buck_mode: Optional[str],
use_buck_source_database: Optional[bool],
source_directory: Iterable[str],
filter_directory: Optional[str],
no_saved_state: bool,
search_path: Iterable[str],
binary: Optional[str],
buck_builder_binary: Optional[str],
exclude: Iterable[str],
typeshed: Optional[str],
save_initial_state_to: Optional[str],
load_initial_state_from: Optional[str],
changed_files_path: Optional[str],
saved_state_project: Optional[str],
features: Optional[str],
use_command_v2: bool,
isolation_prefix: Optional[str],
) -> int:
arguments = command_arguments.CommandArguments(
local_configuration=local_configuration,
version=version,
debug=debug,
sequential=sequential or False,
strict=strict or False,
additional_checks=list(additional_check),
show_error_traces=show_error_traces,
output=output,
enable_profiling=enable_profiling,
enable_memory_profiling=enable_memory_profiling,
noninteractive=noninteractive,
logging_sections=logging_sections,
log_identifier=log_identifier,
logger=logger,
formatter=formatter,
targets=list(target),
use_buck_builder=use_buck_builder,
use_buck_source_database=use_buck_source_database,
source_directories=list(source_directory),
filter_directory=filter_directory,
buck_mode=buck_mode,
no_saved_state=no_saved_state,
search_path=list(search_path),
binary=binary,
buck_builder_binary=buck_builder_binary,
exclude=list(exclude),
typeshed=typeshed,
save_initial_state_to=save_initial_state_to,
load_initial_state_from=load_initial_state_from,
changed_files_path=changed_files_path,
saved_state_project=saved_state_project,
dot_pyre_directory=Path(dot_pyre_directory)
if dot_pyre_directory is not None
else None,
features=features,
use_command_v2=use_command_v2,
isolation_prefix=isolation_prefix,
)
if arguments.version:
_show_pyre_version(arguments)
return ExitCode.SUCCESS
context.ensure_object(dict)
context.obj["arguments"] = arguments
if context.invoked_subcommand is None:
return _run_default_command(arguments)
# This return value is not used anywhere.
return ExitCode.SUCCESS
@pyre.command()
@click.argument("analysis", type=str, default="taint")
@click.option(
"--taint-models-path",
type=filesystem.readable_directory,
multiple=True,
help="Location of taint models.",
)
@click.option(
"--no-verify",
is_flag=True,
default=False,
help="Do not verify models for the taint analysis.",
)
@click.option(
"--save-results-to",
type=filesystem.writable_directory,
help="Directory to write analysis results to.",
)
@click.option("--dump-call-graph", is_flag=True, default=False, hidden=True)
@click.option("--repository-root", type=os.path.abspath)
@click.option("--rule", type=int, multiple=True, hidden=True)
@click.option(
"--find-missing-flows",
type=click.Choice([kind.value for kind in MissingFlowsKind]),
help="Perform a taint analysis to find flows through obscure models.",
)
@click.option(
"--dump-model-query-results",
is_flag=True,
default=False,
help="Provide model query debugging output.",
)
@click.option(
"--use-cache",
is_flag=True,
default=False,
help="Store information in .pyre/pysa.cache for faster runs.",
)
@click.pass_context
def analyze(
context: click.Context,
analysis: str,
taint_models_path: Iterable[str],
no_verify: bool,
save_results_to: Optional[str],
dump_call_graph: bool,
repository_root: Optional[str],
rule: Iterable[int],
find_missing_flows: Optional[str],
dump_model_query_results: bool,
use_cache: bool,
) -> int:
"""
Run Pysa, the inter-procedural static analysis tool.
"""
command_argument: command_arguments.CommandArguments = context.obj["arguments"]
configuration = _create_configuration_with_retry(command_argument, Path("."))
rules = list(rule)
return run_pyre_command(
commands.Analyze(
command_argument,
original_directory=os.getcwd(),
configuration=configuration,
analysis=analysis,
taint_models_path=list(taint_models_path),
no_verify=no_verify,
save_results_to=save_results_to,
dump_call_graph=dump_call_graph,
repository_root=repository_root,
rules=list(rules) if len(rules) > 0 else None,
find_missing_flows=(
MissingFlowsKind(find_missing_flows)
if find_missing_flows is not None
else None
),
dump_model_query_results=dump_model_query_results,
use_cache=use_cache,
),
configuration,
command_argument.noninteractive,
)
@pyre.command()
@click.pass_context
def check(context: click.Context) -> int:
"""
Runs a one-time type check of a Python project.
"""
return _run_check_command(context.obj["arguments"])
@pyre.command()
@click.option(
"--nonblocking",
is_flag=True,
default=False,
help=(
"[DEPRECATED] Ask the server to return partial results immediately, "
"even if analysis is still in progress."
),
)
@click.option(
"--incremental-style",
type=click.Choice(
[
str(commands.IncrementalStyle.SHALLOW),
str(commands.IncrementalStyle.FINE_GRAINED),
]
),
default=str(commands.IncrementalStyle.FINE_GRAINED),
help="[DEPRECATED] How to approach doing incremental checks.",
)
@click.option("--no-start", is_flag=True, default=False, hidden=True)
# This is mostly to allow `restart` to pass on the flag to `start`.
@click.option("--no-watchman", is_flag=True, default=False, hidden=True)
@click.pass_context
def incremental(
context: click.Context,
nonblocking: bool,
incremental_style: str,
no_start: bool,
no_watchman: bool,
) -> int:
"""
Connects to a running Pyre server and returns the current type errors for your
project. If no server exists for your projects, starts a new one. Running `pyre`
implicitly runs `pyre incremental`.
By default, incremental checks ensure that all dependencies of changed files are
analyzed before returning results. If you'd like to get partial type checking
results eagerly, you can run `pyre incremental --nonblocking`.
"""
command_argument: command_arguments.CommandArguments = context.obj["arguments"]
return _run_incremental_command(
arguments=command_argument,
nonblocking=nonblocking,
incremental_style=commands.IncrementalStyle.SHALLOW
if incremental_style == str(commands.IncrementalStyle.SHALLOW)
else commands.IncrementalStyle.FINE_GRAINED,
no_start_server=no_start,
no_watchman=no_watchman,
)
@pyre.command()
@click.argument("modify_paths", type=filesystem.file_or_directory_exists, nargs=-1)
@click.option(
| |
from __future__ import print_function
#
# General
#
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def quit():
exit(EXIT_SUCCESS)
def exit(status=EXIT_SUCCESS):
from slicer import app
app.commandOptions().runPythonAndExit = False
app.exit(status)
def restart():
from slicer import app
app.restart()
def _readCMakeCache(var):
import os
from slicer import app
prefix = var + ":"
try:
with open(os.path.join(app.slicerHome, "CMakeCache.txt")) as cache:
for line in cache:
if line.startswith(prefix):
return line.split("=", 1)[1].rstrip()
except:
pass
return None
def sourceDir():
"""Location of the Slicer source directory.
:type: :class:`str` or ``None``
This provides the location of the Slicer source directory, if Slicer is being
run from a CMake build directory. If the Slicer home directory does not
contain a ``CMakeCache.txt`` (e.g. for an installed Slicer), the property
will have the value ``None``.
"""
return _readCMakeCache('Slicer_SOURCE_DIR')
#
# Custom Import
#
def importVTKClassesFromDirectory(directory, dest_module_name, filematch = '*'):
from vtk import vtkObjectBase
importClassesFromDirectory(directory, dest_module_name, vtkObjectBase, filematch)
def importQtClassesFromDirectory(directory, dest_module_name, filematch = '*'):
importClassesFromDirectory(directory, dest_module_name, 'PythonQtClassWrapper', filematch)
# To avoid globbing multiple times the same directory, successful
# call to ``importClassesFromDirectory()`` will be indicated by
# adding an entry to the ``__import_classes_cache`` set.
#
# Each entry is a tuple of form (directory, dest_module_name, type_info, filematch)
__import_classes_cache = set()
def importClassesFromDirectory(directory, dest_module_name, type_info, filematch = '*'):
# Create entry for __import_classes_cache
cache_key = ",".join([str(arg) for arg in [directory, dest_module_name, type_info, filematch]])
# Check if function has already been called with this set of parameters
if cache_key in __import_classes_cache:
return
import glob, os, re, fnmatch
re_filematch = re.compile(fnmatch.translate(filematch))
for fname in glob.glob(os.path.join(directory, filematch)):
if not re_filematch.match(os.path.basename(fname)):
continue
try:
from_module_name = os.path.splitext(os.path.basename(fname))[0]
importModuleObjects(from_module_name, dest_module_name, type_info)
except ImportError as detail:
import sys
print(detail, file=sys.stderr)
__import_classes_cache.add(cache_key)
def importModuleObjects(from_module_name, dest_module_name, type_info):
"""Import object of type 'type_info' (str or type) from module identified
by 'from_module_name' into the module identified by 'dest_module_name'."""
# Obtain a reference to the module identifed by 'dest_module_name'
import sys
dest_module = sys.modules[dest_module_name]
# Skip if module has already been loaded
if from_module_name in sys.modules:
return
# Obtain a reference to the module identified by 'from_module_name'
import imp
fp, pathname, description = imp.find_module(from_module_name)
module = imp.load_module(from_module_name, fp, pathname, description)
# Loop over content of the python module associated with the given python library
for item_name in dir(module):
# Obtain a reference associated with the current object
item = getattr(module, item_name)
# Check type match by type or type name
match = False
if isinstance(type_info, type):
try:
match = issubclass(item, type_info)
except TypeError as e:
pass
else:
match = type(item).__name__ == type_info
if match:
setattr(dest_module, item_name, item)
#
# UI
#
def lookupTopLevelWidget(objectName, verbose = True):
"""Loop over all top level widget associated with 'slicer.app' and
return the one matching 'objectName'"""
from slicer import app
for w in app.topLevelWidgets():
if hasattr(w,'objectName'):
if w.objectName == objectName: return w
if verbose:
import sys
print("Failed to obtain reference to '%s'" % objectName, file=sys.stderr)
return None
def mainWindow(verbose = True):
return lookupTopLevelWidget('qSlicerAppMainWindow', verbose)
def pythonShell(verbose = True):
console = slicer.app.pythonConsole()
if not console and verbose:
print("Failed to obtain reference to python shell", file=sys.stderr)
return console
def showStatusMessage(message, duration = 0):
mw = mainWindow(verbose=False)
if mw:
mw.statusBar().showMessage(message, duration)
def findChildren(widget=None, name="", text="", title="", className=""):
""" Return a list of child widgets that meet all the given criteria.
If no criteria are provided, the function will return all widgets descendants.
If no widget is provided, slicer.util.mainWindow() is used.
:param widget: parent widget where the widgets will be searched
:param name: name attribute of the widget
:param text: text attribute of the widget
:param title: title attribute of the widget
:param className: className() attribute of the widget
:return: list with all the widgets that meet all the given criteria.
"""
# TODO: figure out why the native QWidget.findChildren method does not seem to work from PythonQt
import slicer, fnmatch
if not widget:
widget = mainWindow()
if not widget:
return []
children = []
parents = [widget]
kwargs = {'name': name, 'text': text, 'title': title, 'className': className}
expected_matches = []
for kwarg in kwargs.iterkeys():
if kwargs[kwarg]:
expected_matches.append(kwarg)
while parents:
p = parents.pop()
# sometimes, p is null, f.e. when using --python-script or --python-code
if not p:
break
if not hasattr(p,'children'):
continue
parents += p.children()
matched_filter_criteria = 0
for attribute in expected_matches:
if hasattr(p, attribute):
attr_name = getattr(p, attribute)
if attribute == 'className':
# className is a method, not a direct attribute. Invoke the method
attr_name = attr_name()
if fnmatch.fnmatchcase(attr_name, kwargs[attribute]):
matched_filter_criteria = matched_filter_criteria + 1
if matched_filter_criteria == len(expected_matches):
children.append(p)
return children
#
# IO
#
def loadNodeFromFile(filename, filetype, properties={}, returnNode=False):
from slicer import app
from vtk import vtkCollection
properties['fileName'] = filename
if returnNode:
loadedNodes = vtkCollection()
success = app.coreIOManager().loadNodes(filetype, properties, loadedNodes)
return success, loadedNodes.GetItemAsObject(0)
else:
success = app.coreIOManager().loadNodes(filetype, properties)
return success
def loadColorTable(filename, returnNode=False):
filetype = 'ColorTableFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadFiberBundle(filename, returnNode=False):
filetype = 'FiberBundleFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadFiducialList(filename, returnNode=False):
filetype = 'FiducialListFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadAnnotationFiducial(filename, returnNode=False):
filetype = 'AnnotationFile'
properties = {}
properties['fiducial'] = 1
return loadNodeFromFile(filename, filetype, properties, returnNode)
def loadMarkupsFiducialList(filename, returnNode=False):
filetype = 'MarkupsFiducials'
properties = {}
return loadNodeFromFile(filename, filetype, properties, returnNode)
def loadModel(filename, returnNode=False):
filetype = 'ModelFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadScalarOverlay(filename, returnNode=False):
filetype = 'ScalarOverlayFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadSegmentation(filename, returnNode=False):
filetype = 'SegmentationFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadTransform(filename, returnNode=False):
filetype = 'TransformFile'
return loadNodeFromFile(filename, filetype, {}, returnNode)
def loadLabelVolume(filename, properties={}, returnNode=False):
filetype = 'VolumeFile'
properties['labelmap'] = True
return loadNodeFromFile(filename, filetype, properties, returnNode)
def loadVolume(filename, properties={}, returnNode=False):
filetype = 'VolumeFile'
return loadNodeFromFile(filename, filetype, properties, returnNode)
def loadScene(filename, properties={}):
filetype = 'SceneFile'
return loadNodeFromFile(filename, filetype, properties, returnNode=False)
def openAddDataDialog():
from slicer import app
return app.coreIOManager().openAddDataDialog()
def openAddVolumeDialog():
from slicer import app
return app.coreIOManager().openAddVolumeDialog()
def openAddModelDialog():
from slicer import app
return app.coreIOManager().openAddModelDialog()
def openAddScalarOverlayDialog():
from slicer import app
return app.coreIOManager().openAddScalarOverlayDialog()
def openAddSegmentationDialog():
from slicer import app, qSlicerFileDialog
return app.coreIOManager().openDialog('SegmentationFile', qSlicerFileDialog.Read)
def openAddTransformDialog():
from slicer import app
return app.coreIOManager().openAddTransformDialog()
def openAddColorTableDialog():
from slicer import app
return app.coreIOManager().openAddColorTableDialog()
def openAddFiducialDialog():
from slicer import app
return app.coreIOManager().openAddFiducialDialog()
def openAddFiberBundleDialog():
from slicer import app
return app.coreIOManager().openAddFiberBundleDialog()
def openSaveDataDialog():
from slicer import app
return app.coreIOManager().openSaveDataDialog()
def saveNode(node, filename, properties={}):
"""Save 'node' data into 'filename'.
It is the user responsability to provide the appropriate file extension.
User has also the possibility to overwrite the fileType internally retrieved using
method 'qSlicerCoreIOManager::fileWriterFileType(vtkObject*)'. This can be done
by specifiying a 'fileType'attribute to the optional 'properties' dictionary.
"""
from slicer import app
properties["nodeID"] = node.GetID();
properties["fileName"] = filename
if hasattr(properties, "fileType"):
filetype = properties["fileType"]
else:
filetype = app.coreIOManager().fileWriterFileType(node)
return app.coreIOManager().saveNodes(filetype, properties)
def saveScene(filename, properties={}):
"""Save the current scene.
Based on the value of 'filename', the current scene is saved either
as a MRML file, MRB file or directory.
If filename ends with '.mrml', the scene is saved as a single file
without associated data.
If filename ends with '.mrb', the scene is saved as a MRML bundle (Zip
archive with scene and data files).
In every other case, the scene is saved in the directory
specified by 'filename'. Both MRML scene file and data
will be written to disk. If needed, directories and sub-directories
will be created.
"""
from slicer import app
filetype = 'SceneFile'
properties['fileName'] = filename
return app.coreIOManager().saveNodes(filetype, properties)
#
# Module
#
def moduleSelector():
w = mainWindow()
if not w:
import sys
print("Could not find main window", file=sys.stderr)
return None
return w.moduleSelector()
def selectModule(module):
moduleName = module
if not isinstance(module, basestring):
moduleName = module.name
selector = moduleSelector()
if not selector:
import sys
print("Could not find moduleSelector in the main window", file=sys.stderr)
return None
moduleSelector().selectModule(moduleName)
def selectedModule():
selector = moduleSelector()
if not selector:
import sys
print("Could not find moduleSelector in the main window", file=sys.stderr)
return None
return selector.selectedModule
def moduleNames():
from slicer import app
return app.moduleManager().factoryManager().loadedModuleNames()
def getModule(moduleName):
from slicer import app
module = app.moduleManager().module(moduleName);
if not module:
import sys
print("Could not find module with name '%s" % moduleName, file=sys.stderr)
return None
return module
def getModuleGui(module):
if isinstance(module, basestring):
module = getModule(module)
widgetRepr = module.widgetRepresentation()
if not widgetRepr:
import sys
print("Could not find module widget representation with name '%s" % module.name, file=sys.stderr)
return widgetRepr
def getNewModuleGui(module):
if isinstance(module, basestring):
module = getModule(module)
widgetRepr = module.createNewWidgetRepresentation()
if not widgetRepr:
import sys
print("Could not find module widget representation with name '%s" % module.name, file=sys.stderr)
return widgetRepr
def modulePath(moduleName):
| |
<gh_stars>0
# Compare ray by ray tracing to Zemax
import os
import pytest
import galsim
import numpy as np
from scipy.optimize import least_squares
import batoid
from test_helpers import timer, init_gpu
directory = os.path.dirname(__file__)
@timer
def test_HSC_trace():
telescope = batoid.Optic.fromYaml("HSC_old.yaml")
# Zemax has a number of virtual surfaces that we don't trace in batoid.
# Also, the HSC.yaml above includes Baffle surfaces not in Zemax. The
# following lists select out the surfaces in common to both models.
HSC_surfaces = [
3, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 24, 25, 28, 29,
31
]
surface_names = ['PM', 'G1_entrance', 'G1_exit', 'G2_entrance', 'G2_exit',
'ADC1_entrance', 'ADC1_exit', 'ADC2_entrance', 'ADC2_exit',
'G3_entrance', 'G3_exit', 'G4_entrance', 'G4_exit',
'G5_entrance', 'G5_exit', 'F_entrance', 'F_exit',
'W_entrance', 'W_exit', 'D']
for fn in [
"HSC_raytrace_1.txt", "HSC_raytrace_2.txt", "HSC_raytrace_3.txt"
]:
filename = os.path.join(directory, "testdata", fn)
with open(filename) as f:
arr = np.loadtxt(f, skiprows=22, usecols=list(range(0, 12)))
arr0 = arr[0]
rv = batoid.RayVector(
arr0[1]/1000, arr0[2]/1000, 16.0,
arr0[4], arr0[5], -arr0[6],
t=0, wavelength=750e-9
)
tf = telescope.traceFull(rv)
i = 0
for name in surface_names:
surface = tf[name]
srv = surface['out']
srv.toCoordSys(batoid.CoordSys())
bt_isec = np.array([srv.x, srv.y, srv.z-16.0]).T[0]
zx_isec = arr[HSC_surfaces[i]-1][1:4]/1000
# nanometer agreement
np.testing.assert_allclose(bt_isec, zx_isec, rtol=0, atol=1e-9)
v = srv.v/np.linalg.norm(srv.v)
bt_angle = v[0]
zx_angle = arr[HSC_surfaces[i]-1][4:7]
# direction cosines agree to 1e-9
np.testing.assert_allclose(bt_angle, zx_angle, rtol=0, atol=1e-9)
i += 1
@timer
def test_HSC_huygensPSF():
fn = os.path.join(directory, "testdata", "HSC_huygensPSF.txt")
with open(fn) as f:
Zarr = np.loadtxt(f, skiprows=21)
Zarr = Zarr[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 128
dx = 0.25e-6
print("computing Huygens PSF")
hPSF = batoid.huygensPSF(
telescope,
thx, thy, projection='zemax',
wavelength=wavelength,
nx=nx, dx=dx, nxOut=256,
reference='mean'
)
print("Done")
# Normalize images
Zarr /= np.sum(Zarr)
hPSF.array /= np.sum(hPSF.array)
Zmax = np.max(Zarr)
Zarr /= Zmax
hPSF.array /= Zmax
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(hPSF.array, scale=0.25),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=0.25, nx=256, ny=256)
def resid(params, ii=ii, Zarr=Zarr):
img = modelimg(params, ii=ii)
r = (img.array - Zarr).ravel()
return r
kwargs = dict(ii=ii, Zarr=Zarr)
print("Aligning")
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
print("Done")
np.testing.assert_allclose(Zarr, optImg.array, rtol=0, atol=3e-2)
Zmom = galsim.hsm.FindAdaptiveMom(galsim.Image(Zarr, scale=0.25))
bmom = galsim.hsm.FindAdaptiveMom(optImg)
np.testing.assert_allclose(
Zmom.observed_shape.g1,
bmom.observed_shape.g1,
rtol=0, atol=0.01
)
np.testing.assert_allclose(
Zmom.observed_shape.g2,
bmom.observed_shape.g2,
rtol=0, atol=1e-7
)
np.testing.assert_allclose(
Zmom.moments_sigma,
bmom.moments_sigma,
rtol=0, atol=0.1
)
@timer
def test_HSC_wf():
fn = os.path.join(directory, "testdata", "HSC_wavefront.txt")
with open(fn) as f:
Zwf = np.loadtxt(f, skiprows=17)
Zwf = Zwf[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 512
bwf = batoid.wavefront(telescope, thx, thy, wavelength, nx=nx)
Zwf = np.ma.MaskedArray(data=Zwf, mask=Zwf==0) # Turn Zwf into masked array
# There are unimportant differences in piston, tip, and tilt terms. So
# instead of comparing the wavefront directly, we'll compare Zernike
# coefficients for j >= 4.
x = np.linspace(-1, 1, nx, endpoint=False)
x, y = np.meshgrid(x, x)
w = ~Zwf.mask # Use the same mask for both Zemax and batoid
basis = galsim.zernike.zernikeBasis(37, x[w], y[w])
Zcoefs, _, _, _ = np.linalg.lstsq(basis.T, Zwf[w], rcond=-1)
Bcoefs, _, _, _ = np.linalg.lstsq(basis.T, bwf.array[w], rcond=-1)
for j in range(1, 38):
print("{:<4d} {:8.4f} {:8.4f}".format(j, Zcoefs[j], Bcoefs[j]))
np.testing.assert_allclose(Zcoefs[4:], Bcoefs[4:], rtol=0, atol=0.01)
# higher order Zernikes match even better
np.testing.assert_allclose(Zcoefs[11:], Bcoefs[11:], rtol=0, atol=0.01)
@timer
def test_HSC_zernike():
ZZernike = [0]
with open(os.path.join(directory, "testdata", "HSC_Zernike.txt")) as f:
for i, line in enumerate(f):
if i > 38:
ZZernike.append(float(line[9:20]))
ZZernike = np.array(ZZernike)
telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.75)
wavelength = 750e-9
nx = 256
bZernike = batoid.zernike(
telescope, thx, thy, wavelength, jmax=37, nx=nx,
projection='zemax', reference='chief'
)
print()
print("j Zemax batoid diff")
print("------------------------------")
for j in range(1, 38):
print(
f"{j:<4d} {ZZernike[j]:8.4f} {bZernike[j]:8.4f} "
f"{ZZernike[j]-bZernike[j]:8.4f}"
)
# Don't care about piston, tip, or tilt.
np.testing.assert_allclose(ZZernike[4:], bZernike[4:], rtol=0, atol=1e-2)
np.testing.assert_allclose(ZZernike[11:], bZernike[11:], rtol=0, atol=3e-3)
@timer
def test_LSST_wf(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_wf_0.0_0.0.txt",
"LSST_wf_0.0_1.225.txt",
"LSST_wf_0.0_1.75.txt",
"LSST_wf_1.176_1.176.txt"]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zwf = np.loadtxt(f, skiprows=16)
Zwf = Zwf[::-1] # Need to invert, probably just a Zemax convention...
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
nx = 32
bwf = batoid.wavefront(
telescope, thx, thy, wavelength, nx=nx,
reference='chief', projection='zemax'
)
# Turn Zwf into masked array
Zwf = np.ma.MaskedArray(data=Zwf, mask=Zwf==0)
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(bwf.array)
i1 = axes[1].imshow(Zwf)
i2 = axes[2].imshow(bwf.array-Zwf)
axes[0].set_title("batoid")
axes[1].set_title("Zemax")
axes[2].set_title("difference")
plt.colorbar(i0, ax=axes[0], label='waves')
plt.colorbar(i1, ax=axes[1], label='waves')
plt.colorbar(i2, ax=axes[2], label='waves')
plt.tight_layout()
plt.show()
np.testing.assert_allclose(
Zwf*wavelength,
bwf.array*wavelength,
atol=1e-11, rtol=0) # 10 picometer tolerance!
@timer
def test_LSST_fftPSF(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_fftpsf_0.0_0.0.txt",
"LSST_fftpsf_0.0_1.225.txt",
"LSST_fftpsf_0.0_1.75.txt",
"LSST_fftpsf_1.176_1.176.txt"]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zpsf = np.loadtxt(f, skiprows=18)
Zpsf = Zpsf[::-1] # Need to invert, probably just a Zemax convention...
Zpsf /= np.max(Zpsf)
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
nx = 32
bpsf = batoid.fftPSF(
telescope, thx, thy, wavelength, nx=nx,
reference='chief', projection='zemax'
)
bpsf.array = bpsf.array[::-1,::-1] # b/c primitives are negative
bpsf.array /= np.max(bpsf.array)
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(bpsf.array, scale=1.0),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=1.0, nx=64, ny=64)
def resid(params, ii=ii, Zpsf=Zpsf):
img = modelimg(params, ii=ii)
r = (img.array - Zpsf).ravel()
return r
kwargs = dict(ii=ii, Zpsf=Zpsf)
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(optImg.array)
i1 = axes[1].imshow(Zpsf)
i2 = axes[2].imshow(optImg.array-Zpsf)
plt.colorbar(i0, ax=axes[0])
plt.colorbar(i1, ax=axes[1])
plt.colorbar(i2, ax=axes[2])
plt.tight_layout()
plt.show()
@pytest.mark.slow
@timer
def test_LSST_huygensPSF(plot=False):
thxs = [0.0, 0.0, 0.0, 1.176]
thys = [0.0, 1.225, 1.75, 1.176]
fns = ["LSST_hpsf_0.0_0.0.txt",
"LSST_hpsf_0.0_1.225.txt",
"LSST_hpsf_0.0_1.75.txt",
"LSST_hpsf_1.176_1.176.txt"]
if __name__ != "__main__":
thxs = thxs[2:3]
thys = thys[2:3]
fns = fns[2:3]
for thx, thy, fn in zip(thxs, thys, fns):
fn = os.path.join(directory, "testdata", fn)
with open(fn, encoding='utf-16-le') as f:
Zpsf = np.loadtxt(f, skiprows=21)
Zpsf = Zpsf[::-1] # Need to invert, probably just a Zemax convention...
Zpsf /= np.max(Zpsf)
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
thx = np.deg2rad(thx)
thy = np.deg2rad(thy)
wavelength = 500e-9
bpsf = batoid.huygensPSF(
telescope, thx, thy, wavelength, nx=128,
# telescope, thx, thy, wavelength, nx=1024,
reference='chief', projection='zemax',
dx=0.289e-6, nxOut=64
)
bpsf.array /= np.max(bpsf.array)
# Use GalSim InterpolateImage to align and subtract
ii = galsim.InterpolatedImage(
galsim.Image(bpsf.array, scale=1.0),
normalization='sb'
)
# Now setup an optimizer to fit for x/y shift
def modelimg(params, ii=ii):
dx, dy, dlogflux = params
model = ii.shift(dx, dy)*np.exp(dlogflux)
return model.drawImage(method='sb', scale=1.0, nx=64, ny=64)
def resid(params, ii=ii, Zpsf=Zpsf):
img = modelimg(params, ii=ii)
r = (img.array - Zpsf).ravel()
return r
kwargs = dict(ii=ii, Zpsf=Zpsf)
print("Aligning")
result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
optImg = modelimg(result.x, ii=ii)
print("Done")
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=3, figsize=(10,3))
i0 = axes[0].imshow(optImg.array)
i1 = axes[1].imshow(Zpsf)
i2 = axes[2].imshow(optImg.array-Zpsf)
plt.colorbar(i0, ax=axes[0])
plt.colorbar(i1, ax=axes[1])
plt.colorbar(i2, ax=axes[2])
plt.tight_layout()
plt.show()
if thy not in [0.0, 1.176]:
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(optImg.array[:,32], c='g')
ax.plot(Zpsf[:,32], c='b')
ax.plot((optImg.array-Zpsf)[:,32], c='r')
plt.show()
@timer
def test_LSST_trace(verbose=False):
# The g_500 file uses vacuum instead of air, which is important to match
# Zemax for this test.
telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")
zSurfaces = [4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17]
for fn in ["LSST_trace_0.txt", "LSST_trace_1.txt", "LSST_trace_2.txt"]:
filename = os.path.join(directory, "testdata", fn)
# Get normalized coordinates
with open(filename, encoding='utf-16-le') as f:
Hx, Hy, Px, Py = np.genfromtxt(f, skip_header=13, max_rows=4, usecols=(6,))
with open(filename, encoding='utf-16-le') as f:
arr = np.genfromtxt(f, skip_header=22, max_rows=18, usecols=list(range(1, 12)))
ray = batoid.RayVector.fromStop(
Px*4.18, Py*4.18,
optic=telescope,
wavelength=500e-9,
theta_x=np.deg2rad(Hx*1.75), theta_y=np.deg2rad(Hy*1.75),
projection='zemax'
)
tf = telescope.traceFull(ray)
for surface, iz in zip(tf.values(), zSurfaces):
r = surface['out'].toCoordSys(batoid.globalCoordSys)
n = 1./np.sqrt(np.sum(r.v**2))
# Note | |
msh.Mesh(os.path.join(directories["muscles"], f[:-5] + ".scaled.o.mesh"))
half1, half2 = cutMeshInHalf(mesh)
half1.write(os.path.join(directories["muscles"], f[:-5]+".R.mesh"))
half2.write(os.path.join(directories["muscles"], f[:-5]+".L.mesh"))
os.remove(os.path.join(directories["muscles"], f[:-5] + ".scaled.mesh"))
os.remove(os.path.join(directories["muscles"], f[:-5] + ".scaled.o.mesh"))
os.remove(os.path.join(directories["muscles"], f[:-5] + ".scaled.o.sol"))
def alignToTemplate(f):
num = f.split("/")[-1][:3]
err=0
"""
if "mass" in f:
err = os.system(exe.align + " -i " + f + " " + templates["masseter"] + " -d 0.1 -o 0.95 > "+num+"_mass.txt")#/dev/null 2>&1")
elif "mand" in f:
err = os.system(exe.align + " -i " + f + " " + templates["mandible"] + " -d 0.1 -o 0.95 > "+num+"_mand.txt")#/dev/null 2>&1")
if err:
print "-- Error aligning " + boneFile
"""
mesh = msh.Mesh(f)
#mesh.applyMatrix(matFile = "mat_Super4PCS.txt")
#mesh.applyMatrix(matFile = "mat_ICP.txt")
#mesh.write(f[:-5]+".aligned.mesh")
todo = f#f[:-5]+".aligned.mesh"
if "mass" in f:
err = os.system(exe.pythonICP + " -s " + todo + " -t " + templates["masseter"] + " -m mat_pyICP.txt >> " + num + "_mass.txt")
elif "mand" in f:
err = os.system(exe.pythonICP + " -s " + todo + " -t " + templates["mandible"] + " -m mat_pyICP.txt >> " + num + "_mand.txt")
if err:
print "-- Error with ICP for " + f
else:
mesh.applyMatrix(matFile = "mat_pyICP.txt")
mesh.write(f)
print "Successfully aligned " + f
return 0
@debug()
def warpWithBlender(f,resolution=41):
err1 = os.system( exe.boundingMesh + " -i " + f + " -o carved.mesh -r " + str(resolution) + " > /dev/null 2>&1")
err2 = os.system("blender --background --python blender_warp.py -- carved.mesh " + f + " warped.mesh > /dev/null 2>&1")
err3 = os.system(exe.mmgs + " warped.mesh -o warped.o.mesh -hausd 0.002 > /dev/null 2>&1")
if (err1+err2+err3):
raise FacileError("blender warping failure")
shutil.copyfile("warped.o.mesh", f[:-5]+".rewarped.mesh")
def cleanWithMeshlab(f, new=None):
mesh = msh.Mesh(f)
mesh.writeOBJ("tmp.obj")
err = os.system("LC_ALL=C meshlabserver -i tmp.obj -o cleaned.obj -s " + intScript + " > /dev/null 2>&1" )
if err:
raise FacileError("meshlab failure")
mesh = obj2Mesh("cleaned.obj")
if new is not None:
mesh.write(new)
else:
mesh.write(f)
def intersects(f):
err = os.system(exe.tetgen + " -d " + f + " > log.txt")
with open("log.txt","r") as f:
if "No faces are intersecting" in "".join(f.readlines()):
return False
else:
return True
@debug()
def generateMask(f):
num = f[0].split("/")[-1][:3]
bone, face = None, None
if intersects(f[0]):
#cleanWithMeshlab(f[0],new="bone.mesh")
#if intersects("bone.mesh"):
raise FacileError("Bone intersect")
#bone = msh.Mesh("bone.mesh")
else:
bone = msh.Mesh(f[0])
if intersects(f[1]):
#cleanWithMeshlab(f[1],new="face.mesh")
#if intersects("face.mesh"):
raise FacileError("Face intersect")
#face = msh.Mesh("face.mesh")
else:
face = msh.Mesh(f[1])
face.tris = face.tris[face.tris[:,-1]!=2]
face.tris[:,3] = 2
face.discardUnused()
bone.tris[:,3] = 1
bone.fondre(face)
bone.write("mask.mesh")
if intersects("mask.mesh"):
#cleanWithMeshlab("mask.mesh")
#if intersects("mask.mesh"):
raise FacileError('Bone and Face intersect')
err = os.system(exe.tetgen + " -pgaAYNEF mask.mesh > /dev/null 2>&1")
if err:
raise FacileError("tetgen error")
mesh = msh.Mesh("mask.1.mesh")
#Exterior point = closest to [0,0,0]
ext_point_ind = np.argmin([np.linalg.norm(x) for x in mesh.verts[:,:3]])
ext_ref=None
for t in mesh.tets:
if ext_point_ind in t[:4]:
ext_ref = t[-1]
break
mesh.tets = mesh.tets[mesh.tets[:,-1]==ext_ref]
mesh.tets[:,4] = 2
for t in mesh.tris:
if ext_point_ind in t[:3]:
ext_ref = t[-1]
break
mesh.tris = mesh.tris[mesh.tris[:,3]>0]
M = mesh.tris[:,-1]==ext_ref
mesh.tris[M==1][:,3] = 1
mesh.tris[M==0][:,3] = 0
mesh.discardUnused()
for t in mesh.tris:
if t[-1]==1:
for i in t[:3]:
mesh.verts[i,-1]=1
mesh.write(os.path.join(directories["masked"], num + "_mask.mesh"))
err = os.system(exe.mmg3d + " " + os.path.join(directories["masked"], num + "_mask.mesh") + " -o " + os.path.join(directories["masked"], num + "_mask.o.mesh") + " -hausd 0.0005 -nosurf -hgrad 1.15 > /dev/null 2>&1")
if err:
raise FacileError("mmg3d error")
os.system("rm " + os.path.join(directories["masked"], num + "_mask.o.sol"))
template = msh.Mesh(templates["morphingSkull"])
template.tets = np.array([])
template.tris = template.tris[template.tris[:,-1]==1]
template.discardUnused()
mesh = msh.Mesh(os.path.join(directories["masked"], num + "_mask.o.mesh"))
n = len(mesh.verts)
mesh.tris = mesh.tris[mesh.tris[:,-1]==1]
mesh.tets = np.array([])
mesh.discardUnused()
mesh.vectors = np.zeros((n,3))
mesh.vectors[:len(template.verts)] = mesh.verts[:,:3] - template.verts[:,:3] # A remplacer par le resultat du morphing
mesh.writeSol( os.path.join(directories["masked"], num + "_mask.o.sol") )
@debug()
def morph(g, nit=400):
"""
g[0] = path to the signed distance
g[1] = path to the template mesh
"""
signedDist = g[0]
templateMesh = g[1]
os.system("cp " + templateMesh + " template.mesh")
dRef = [2] #Fixed surface inside the template
elRef = [2] #Elements inside the fixed surface
bRef = [] #Follower elements
cmd = " ".join((
"morphing",
" -dref " + str(len(dRef)) + " " + " ".join([str(d) for d in dRef]),
" -elref " + str(len(elRef)) + " " + " ".join([str(d) for d in elRef]),
#" -bref " + str(len(bRef)) + " " + " ".join([str(d) for d in bRef]),
" -nit " + str(nit),
" " + signedDist,
" template.mesh",
" > /dev/null 2>&1"
))
#print cmd
if True:
err = os.system(cmd)
if err:
raise FacileError("morphing failure")
name = signedDist.split("/")[-1].split(".")[0]
newMesh = os.path.join(directories["morphed"], name+".mesh")
newSol = os.path.join(directories["morphed"], name+".sol")
os.system("mv " + signedDist[:-5] + ".1.mesh " + newMesh)
os.system("mv " + signedDist[:-5] + ".1.depl.sol " + newSol)
mesh = msh.Mesh(newMesh)
mesh.readSol(newSol)
mesh.tets = np.array([])
mesh.discardUnused()
mesh.write(newMesh)
mesh.writeSol(newSol)
import numpy as np
import time
def scalar(d1,d2):
return np.sum(np.multiply(d1,d2))
def read_data(filename):
with open(filename,"r") as f:
return np.array([[float(x) for x in l.split()] for l in f.readlines()[:4859]])
return None
def center_data(d):
return d - np.mean(d,axis=0)
def cov(d):
return np.array([[scalar(x,y) for x in d] for y in d])
def eig(c):
eVal, eVec = np.linalg.eig(c)
eVec = eVec.transpose()
idx = eVal.argsort()[::-1]
eVal = eVal[idx]
eVec = eVec[idx,:]
return eVal, eVec
def get_principal_components(v, d):
pc = np.array([np.sum([v[i,j]*d[j] for j in range(len(d))],axis=0) for i in range(len(d))])
pcn = [x/np.sqrt(scalar(x,x)) for x in pc]
return pc, pcn
def reconstruct(pcn, d, n=None):
alpha = np.array([[scalar(x,y) for y in pcn] for x in d])
if n:
return np.array([np.sum([alpha[i,j] * pcn[j] for j in range(n)], axis=0) for i in range(len(d))])
else:
return np.array([np.sum([alpha[i,j] * pcn[j] for j in range(len(d))], axis=0) for i in range(len(d))])
def PCA(d, u, n, debug=False):
if debug:
t0 = time.time()
A = cov(d)
if debug:
print "Calcul de A: " + str(time.time() - t0)
t0 = time.time()
eVal, eVec = eig(A)
if debug:
print "Calcul des valeurs/vecteurs propres: " + str(time.time() - t0)
t0 = time.time()
PC,PCN = get_principal_components(eVec, d)
if debug:
print "Calcul des composantes principales: " + str(time.time() - t0)
t0 = time.time()
#N = reconstruct(PCN, d)
N = reconstruct(PCN, np.append(d,[u],axis=0), n)
if debug:
print "Calcul de la reconstruction: " + str(time.time() - t0)
t0 = time.time()
return N[-1]
if __name__=="__main__":
#Folders and static files
csvFile = "/home/norgeot/dev/own/FaciLe/pipeline/liste.csv"
mshScript = "/home/norgeot/dev/own/FaciLe/pipeline/cleanSTL.mlx"
intScript = "/home/norgeot/dev/own/FaciLe/pipeline/cleanIntersections.mlx"
#templateFiles
templates = {}
templates["masseter"] = "/home/norgeot/dev/own/FaciLe/MassTemplate.mesh"
templates["mandible"] = "/home/norgeot/dev/own/FaciLe/MandTemplate.mesh"
templates["bone"] = "/home/norgeot/dev/own/FaciLe/OsTemplate.mesh"
templates["sphere"] = "/home/norgeot/dev/own/FaciLe/projects/warping/demo/sphere.o1.mesh"
templates["morphing"] = "/home/norgeot/templateMorphing.mesh"
templates["morphingSkull"] = "/home/norgeot/templateMorphingSkull.mesh"
templates["box"] = "/home/norgeot/box.mesh"
#Output folders
outFolder = "/Data/Facile3"
dirNames = [
"raw",
"scaled",
"remeshed",
"merged",
"aligned",
"warped",
"signed",
"masked",
"morphed",
"muscles",
"reconstruction"
]
directories = {}
for d in dirNames:
directories[d] = os.path.join(outFolder, d)
if not os.path.exists(directories[d]):
os.makedirs(directories[d])
#FTP info
ftpUsr = raw_input("Enter your ftp username:")
ftpPwd = getpass.getpass()
IPadress = "172.16.31.10"
ftpDir = "Projets/FaciLe/Data/AllDataRaw"
############################################################################
# I - COPY AND CLEAN
############################################################################
# I.1 - Copy from ftp to rawFolder
ftp = FTP(IPadress, ftpUsr, ftpPwd)
ftp.cwd(ftpDir)
f_ftp = [ f for f in ftp.nlst() if ".mesh" in f or ".stl" in f ]
f_ftp = [ f for f in f_ftp if not os.path.exists(os.path.join(directories["raw"],f.split(".")[1].zfill(3) + "_" + newName(f) + ".mesh")) ]
f_ftp.sort(key=lambda f: int(f.split(".")[1]))
run(ftpCopy, f_ftp)
# I.2 - Convert from .stl to .mesh in rawFolder
files = [os.path.join(directories["raw"], f) for f in os.listdir(directories["raw"]) if ".stl" in f]
files.sort()
run(convertToMesh, files)
# I.3 - Clean the meshes in rawFolder
files = [os.path.join(directories["raw"], f) for f in os.listdir(directories["raw"]) if ".mesh" in f] if len(files) else []
files.sort()
run(cleanMesh, files)
############################################################################
# II - UNIFORMIZE (SCALE, REMESH, MERGE AND ALIGN)
############################################################################
# II.1 - Scale the files
files = [ f for f in os.listdir(directories["raw"]) if ".mesh" in f ]
files.sort()
groups = group(files, condition2)
groups = [g for g in groups if not os.path.exists(os.path.join(directories["scaled"], g[0][:3] +"_bone.mesh")) ]
run(scale, groups)
# II.2 - Remesh the files
files = [ f for f in os.listdir(directories["scaled"]) if ".mesh" in f ]
files = [ f for f in files if not os.path.exists(os.path.join(directories["remeshed"],f)) ]
good = readCSV(csvFile)
files = [f for f in files if good[int(f[:3])-1]]
files.sort()
run(remesh, files)
# II.3 - merge the bones together
files = [f for f in os.listdir(directories["remeshed"]) if ".mesh" in f]
files.sort()
groups = group(files, condition2)
groups = [g for g in groups if not | |
""" Module containing `~halotools.mock_observables.RectangularDoubleMesh`,
the primary data structure used to optimize pairwise
calculations throughout the `~halotools.mock_observables` sub-package.
"""
import numpy as np
from math import floor
__all__ = ('RectangularDoubleMesh', )
__author__ = ('<NAME>', )
default_max_cells_per_dimension_cell1 = 50
default_max_cells_per_dimension_cell2 = 50
def digitized_position(p, cell_size, num_divs):
""" Function returns a discretized spatial position of input point(s).
"""
ip = np.floor(p // cell_size).astype(int)
return np.where(ip >= num_divs, num_divs-1, ip)
def sample1_cell_size(period, search_length, approx_cell_size,
max_cells_per_dimension=default_max_cells_per_dimension_cell1):
""" Function determines the size of the cells of mesh1.
The conditions that must be met are that the cell size must
be less than the search length, must evenly divide the box length,
and may not exceed ``max_cells_per_dimension``.
"""
if search_length > period/3.:
msg = ("Input ``search_length`` cannot exceed period/3")
raise ValueError(msg)
ndivs = int(floor(period/float(approx_cell_size)))
ndivs = max(ndivs, 1)
ndivs = min(max_cells_per_dimension, ndivs)
nsearch = int(floor(period/float(search_length)))
nsearch = max(nsearch, 1)
ndivs = min(ndivs, nsearch)
ndivs = max(3, ndivs)
cell_size = period/float(ndivs)
return cell_size
def sample2_cell_sizes(period, sample1_cell_size, approx_cell_size,
max_cells_per_dimension=default_max_cells_per_dimension_cell2):
""" Function determines the size of the cells of mesh2.
The conditions that must be met are that the cell size must
be less than the search length, must evenly divide the box length,
and may not exceed ``max_cells_per_dimension``.
"""
num_sample1_cells = int(np.round(period / sample1_cell_size))
ndivs_sample1_cells = int(np.round(sample1_cell_size/float(approx_cell_size)))
ndivs_sample1_cells = max(1, ndivs_sample1_cells)
ndivs_sample1_cells = min(max_cells_per_dimension, ndivs_sample1_cells)
num_sample2_cells = num_sample1_cells*ndivs_sample1_cells
if num_sample2_cells > max_cells_per_dimension:
num2_per_num1 = max_cells_per_dimension // num_sample1_cells
num_sample2_cells = num2_per_num1*num_sample1_cells
cell_size = period/float(num_sample2_cells)
return cell_size
class RectangularMesh(object):
""" Underlying mesh structure used to place points into rectangular cells
within a simulation volume.
The simulation box is divided into rectanguloid cells whose edges
and faces are aligned with the Cartesian coordinates of the box.
Each spatial point in the box belongs to a unique cell.
Any cell can be identified by either its tuple indices, (ix, iy, iz),
or by the unique integer ID assigned it via the dictionary ordering
of tuple indices:
* (0, 0, 0) <--> 0
* (0, 0, 1) <--> 1
* (0, 0, 2) <--> 2
* ...
* (0, 1, 0) <--> num_zdivs
* (0, 1, 1) <--> num_zdivs + 1
* ...,
and so forth.
Each point thus has a unique triplet of integers specifying
the subvolume containing it, or equivalently a unique integer specifying
the subvolume containing it, called the *cell_id*.
"""
def __init__(self, x1in, y1in, z1in, xperiod, yperiod, zperiod,
approx_xcell_size, approx_ycell_size, approx_zcell_size):
"""
Parameters
----------
x1in, y1in, z1in : arrays
Length-*Npts* arrays containing the spatial position of the *Npts* points.
xperiod, yperiod, zperiod : floats
Length scale defining the periodic boundary conditions in each dimension.
In virtually all realistic cases, these are all equal.
approx_xcell_size, approx_ycell_size, approx_zcell_size : float
approximate cell sizes into which the simulation box will be divided.
These are only approximate because in each dimension,
the actual cell size must be evenly divide the box size.
Examples
---------
>>> Npts, Lbox = int(1e4), 1000
>>> xperiod, yperiod, zperiod = Lbox, Lbox, Lbox
>>> approx_xcell_size = Lbox/10.
>>> approx_ycell_size = Lbox/10.
>>> approx_zcell_size = Lbox/10.
Let's create some fake data to demonstrate the mesh structure:
>>> from astropy.utils.misc import NumpyRNGContext
>>> fixed_seed = 43
>>> with NumpyRNGContext(fixed_seed): pos = np.random.uniform(0, Lbox, 3*Npts).reshape(Npts, 3)
>>> x, y, z = pos[:,0], pos[:,1], pos[:,2]
>>> mesh = RectangularMesh(x, y, z, xperiod, yperiod, zperiod, approx_xcell_size, approx_ycell_size, approx_zcell_size)
Since we used approximate cell sizes *Lbox/10* that
exactly divided the period in each dimension,
then we know there are *10* subvolumes-per-dimension.
So, for example, based on the discussion above,
*cellID = 0* will correspond to *cell_tupleID = (0, 0, 0)*,
*cellID = 5* will correspond to *cell_tupleID = (0, 0, 5)* and
*cellID = 13* will correspond to *cell_tupleID = (0, 1, 3).*
Now that your mesh has been built, you can efficiently access
the *x, y, z* positions of the points lying in
the subvolume with *cellID = i* as follows:
>>> i = 13
>>> ith_subvol_first, ith_subvol_last = mesh.cell_id_indices[i], mesh.cell_id_indices[i+1]
>>> xcoords_ith_subvol = x[mesh.idx_sorted][ith_subvol_first:ith_subvol_last]
>>> ycoords_ith_subvol = y[mesh.idx_sorted][ith_subvol_first:ith_subvol_last]
>>> zcoords_ith_subvol = z[mesh.idx_sorted][ith_subvol_first:ith_subvol_last]
"""
self.npts = x1in.shape[0]
self.xperiod = xperiod
self.yperiod = yperiod
self.zperiod = zperiod
self.num_xdivs = max(int(np.round(xperiod / approx_xcell_size)), 1)
self.num_ydivs = max(int(np.round(yperiod / approx_ycell_size)), 1)
self.num_zdivs = max(int(np.round(zperiod / approx_zcell_size)), 1)
self.ncells = self.num_xdivs*self.num_ydivs*self.num_zdivs
self.xcell_size = self.xperiod / float(self.num_xdivs)
self.ycell_size = self.yperiod / float(self.num_ydivs)
self.zcell_size = self.zperiod / float(self.num_zdivs)
ix = digitized_position(x1in, self.xcell_size, self.num_xdivs)
iy = digitized_position(y1in, self.ycell_size, self.num_ydivs)
iz = digitized_position(z1in, self.zcell_size, self.num_zdivs)
cell_ids = self.cell_id_from_cell_tuple(ix, iy, iz)
self.idx_sorted = np.ascontiguousarray(np.argsort(cell_ids))
cell_id_indices = np.searchsorted(cell_ids, np.arange(self.ncells),
sorter=self.idx_sorted)
cell_id_indices = np.append(cell_id_indices, self.npts)
self.cell_id_indices = np.ascontiguousarray(cell_id_indices)
def cell_id_from_cell_tuple(self, ix, iy, iz):
return ix*(self.num_ydivs*self.num_zdivs) + iy*self.num_zdivs + iz
class RectangularDoubleMesh(object):
""" Fundamental data structure of the `~halotools.mock_observables` sub-package.
`~halotools.mock_observables.RectangularDoubleMesh` is built up from two instances
of `~halotools.mock_observables.pair_counters.rectangular_mesh.RectangularMesh`.
"""
def __init__(self, x1, y1, z1, x2, y2, z2,
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size,
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size,
search_xlength, search_ylength, search_zlength,
xperiod, yperiod, zperiod, PBCs=True,
max_cells_per_dimension_cell1=default_max_cells_per_dimension_cell1,
max_cells_per_dimension_cell2=default_max_cells_per_dimension_cell2):
"""
Parameters
----------
x1, y1, z1 : arrays
Length-*Npts1* arrays containing the spatial position of the *Npts1* points.
x2, y2, z2 : arrays
Length-*Npts2* arrays containing the spatial position of the *Npts2* points.
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size : float
approximate cell sizes into which the simulation box will be divided.
These are only approximate because in each dimension,
the actual cell size must be evenly divide the box size.
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size : float
An entirely separate tree is built for the *Npts2* points, the structure of
which is dependent on the struture of the *Npts1* tree as described below.
search_xlength, search_ylength, search_zlength, floats, optional
Maximum length over which a pair of points will searched for.
For example, if using `~halotools.mock_observables.pair_counters.RectangularDoubleMesh`
to compute a 3-D correlation function with radial separation bins
*rbins = [0.1, 1, 10, 25]*, then in this case
all the search lengths will equal 25.
If using `~halotools.mock_observables.pair_counters.RectangularDoubleMesh`
in a projected correlation function with *rp_bins = [0.1, 1, 10, 25]* and
*pi_max = 40*, then *search_xlength = search_ylength = 25* and
*search_zlength = 40*.
xperiod, yperiod, zperiod : floats
Length scale defining the periodic boundary conditions in each dimension.
In virtually all realistic cases, these are all equal.
PBCs : bool, optional
Boolean specifying whether or not the box has periodic boundary conditions.
Default is True.
max_cells_per_dimension_cell1 : int, optional
Maximum number of cells per dimension. Default is 50.
max_cells_per_dimension_cell2 : int, optional
Maximum number of cells per dimension. Default is 50.
"""
self.xperiod = xperiod
self.yperiod = yperiod
self.zperiod = zperiod
self.search_xlength = search_xlength
self.search_ylength = search_ylength
self.search_zlength = search_zlength
self._PBCs = PBCs
self._check_sensible_constructor_inputs()
approx_x1cell_size = sample1_cell_size(xperiod, search_xlength, approx_x1cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell1)
approx_y1cell_size = sample1_cell_size(yperiod, search_ylength, approx_y1cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell1)
approx_z1cell_size = sample1_cell_size(zperiod, search_zlength, approx_z1cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell1)
self.mesh1 = RectangularMesh(x1, y1, z1, xperiod, yperiod, zperiod,
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size)
approx_x2cell_size = sample2_cell_sizes(xperiod, self.mesh1.xcell_size, approx_x2cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell2)
approx_y2cell_size = sample2_cell_sizes(yperiod, self.mesh1.ycell_size, approx_y2cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell2)
approx_z2cell_size = sample2_cell_sizes(zperiod, self.mesh1.zcell_size, approx_z2cell_size,
max_cells_per_dimension=max_cells_per_dimension_cell2)
self.mesh2 = RectangularMesh(x2, y2, z2, xperiod, yperiod, zperiod,
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size)
self.num_xcell2_per_xcell1 = self.mesh2.num_xdivs // self.mesh1.num_xdivs
self.num_ycell2_per_ycell1 = self.mesh2.num_ydivs // self.mesh1.num_ydivs
self.num_zcell2_per_zcell1 = self.mesh2.num_zdivs // self.mesh1.num_zdivs
def _check_sensible_constructor_inputs(self):
try:
assert self.search_xlength <= self.xperiod/3.
except AssertionError:
msg = ("\n The maximum length over which you search for pairs of points \n"
"cannot be larger than Lbox/3 in any dimension. \n"
"You tried to search for pairs out to a length of search_xlength = %.2f,\n"
"but the size of your box in this dimension is xperiod = %.2f.\n"
"If you need to count pairs on these length scales, \n"
"you should use a larger simulation.\n" % (self.search_xlength, self.xperiod))
raise ValueError(msg)
try:
assert self.search_ylength <= self.yperiod/3.
except AssertionError:
msg = ("\n The maximum length over which you search for pairs of points \n"
"cannot be larger than Lbox/3 in any dimension. \n"
"You tried to search for pairs out to a length of search_ylength = %.2f,\n"
"but the size of your box in this dimension is yperiod = %.2f.\n"
"If you need to count pairs on these length scales, \n"
"you should use a larger simulation.\n" | |
The
default is ``en-US``
return_value_only (bool):
``True`` will return only the value for the policy, without the
name of the policy. ``return_full_policy_names`` and
``hierarchical_return`` will be ignored. Default is ``True``
return_full_policy_names (bool):
Returns the full policy name regardless of what was passed in
``policy_name``
.. note::
This setting applies to sub-elements of the policy if they
exist. The value passed in ``policy_name`` will always be used
as the policy name when this setting is ``False``
hierarchical_return (bool):
Returns a hierarchical view of the policy showing its parents
Returns:
dict: A dictionary containing the policy settings
CLI Example:
.. code-block:: bash
# Using the policy id
salt * lgpo.get_policy LockoutDuration machine
salt * lgpo.get_policy AutoUpdateCfg machine
# Using the full name
salt * lgpo.get_policy "Account lockout duration" machine
salt * lgpo.get_policy "Configure Automatic Updates" machine
# Using full path and name
salt * lgpo.get_policy "Windows Components\Windows Update\Configure Automatic Updates" machine
"""
if not policy_name:
raise SaltInvocationError("policy_name must be defined")
if not policy_class:
raise SaltInvocationError("policy_class must be defined")
policy_class = policy_class.title()
policy_data = _policy_info()
if policy_class not in policy_data.policies.keys():
policy_classes = ", ".join(policy_data.policies.keys())
raise CommandExecutionError(
'The requested policy class "{}" is invalid, policy_class should '
"be one of: {}".format(policy_class, policy_classes)
)
# Look in the _policy_data object first
policy_definition = None
if policy_name in policy_data.policies[policy_class]["policies"]:
policy_definition = policy_data.policies[policy_class]["policies"][policy_name]
else:
# Case-sensitive search first
for pol in policy_data.policies[policy_class]["policies"]:
_p = policy_data.policies[policy_class]["policies"][pol]["Policy"]
if _p == policy_name:
policy_definition = policy_data.policies[policy_class]["policies"][pol]
break
if policy_definition is None:
# Still not found, case-insensitive search
for pol in policy_data.policies[policy_class]["policies"]:
_p = policy_data.policies[policy_class]["policies"][pol]["Policy"]
if _p.lower() == policy_name.lower():
policy_definition = policy_data.policies[policy_class]["policies"][
pol
]
break
if policy_definition:
if return_value_only:
return _get_policy_info_setting(policy_definition)
if return_full_policy_names:
key_name = policy_definition["Policy"]
else:
key_name = policy_name
setting = {key_name: _get_policy_info_setting(policy_definition)}
if hierarchical_return:
if "lgpo_section" in policy_definition:
first_item = True
t_dict = {}
for level in reversed(policy_definition["lgpo_section"]):
new_dict = {}
if first_item:
new_dict[level] = {key_name: setting.pop(key_name)}
first_item = False
else:
new_dict[level] = t_dict
t_dict = new_dict
if t_dict:
setting = t_dict
return setting
success, policy_obj, _, _ = _lookup_admin_template(
policy_name=policy_name, policy_class=policy_class, adml_language=adml_language
)
if success:
setting = _get_policy_adm_setting(
admx_policy=policy_obj,
policy_class=policy_class,
adml_language=adml_language,
return_full_policy_names=return_full_policy_names,
hierarchical_return=hierarchical_return,
)
if return_value_only:
for key in setting:
return setting[key]
return setting
def set_computer_policy(
name, setting, cumulative_rights_assignments=True, adml_language="en-US"
):
"""
Set a single computer policy
Args:
name (str):
The name of the policy to configure
setting (str):
The setting to configure the named policy with
cumulative_rights_assignments (bool): Determine how user rights
assignment policies are configured. If True, user right assignment
specifications are simply added to the existing policy. If False,
only the users specified will get the right (any existing will have
the right revoked)
adml_language (str): The language files to use for looking up
Administrative Template policy data (i.e. how the policy is
displayed in the GUI). Defaults to 'en-US' (U.S. English).
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' lgpo.set_computer_policy LockoutDuration 1440
"""
pol = {}
pol[name] = setting
ret = set_(
computer_policy=pol,
user_policy=None,
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language,
)
return ret
def set_user_policy(name, setting, adml_language="en-US"):
"""
Set a single user policy
Args:
name (str):
The name of the policy to configure
setting (str):
The setting to configure the named policy with
adml_language (str):
The language files to use for looking up Administrative Template
policy data (i.e. how the policy is displayed in the GUI). Defaults
to 'en-US' (U.S. English).
Returns:
bool: True if successful, Otherwise False
CLI Example:
.. code-block:: bash
salt '*' lgpo.set_user_policy "Control Panel\\Display\\Disable the Display Control Panel" Enabled
"""
pol = {}
pol[name] = setting
ret = set_(
user_policy=pol,
computer_policy=None,
cumulative_rights_assignments=True,
adml_language=adml_language,
)
return ret
def set_(
computer_policy=None,
user_policy=None,
cumulative_rights_assignments=True,
adml_language="en-US",
):
"""
Set a local server policy.
Args:
computer_policy (dict):
A dictionary of "policyname: value" pairs of computer policies to
set. 'value' should be how it is displayed in the gpedit GUI, i.e.
if a setting can be 'Enabled'/'Disabled', then that should be passed
Administrative Template data may require dicts within dicts, to
specify each element of the Administrative Template policy.
Administrative Templates policies are always cumulative.
Policy names can be specified in a number of ways based on the type
of policy:
Windows Settings Policies:
These policies can be specified using the GUI display name
or the key name from the _policy_info class in this module.
The GUI display name is also contained in the _policy_info
class in this module.
Administrative Template Policies:
These can be specified using the policy name as displayed in
the GUI (case sensitive). Some policies have the same name,
but a different location (for example, "Access data sources
across domains"). These can be differentiated by the "path"
in the GUI (for example, "Windows Components\\Internet
Explorer\\Internet Control Panel\\Security Page\\Internet
Zone\\Access data sources across domains").
Additionally, policies can be specified using the "name" and
"id" attributes from the ADMX files.
For Administrative Templates that have policy elements, each
element can be specified using the text string as seen in
the GUI or using the ID attribute from the ADMX file. Due to
the way some of the GUI text is laid out, some policy
element names could include descriptive text that appears
lbefore the policy element in the GUI.
Use the get_policy_info function for the policy name to view
the element ID/names that the module will accept.
user_policy (dict):
The same setup as the computer_policy, except with data to configure
the local user policy.
cumulative_rights_assignments (bool):
Determine how user rights assignment policies are configured.
If True, user right assignment specifications are simply added to
the existing policy
If False, only the users specified will get the right (any existing
will have the right revoked)
adml_language (str):
The language files to use for looking up Administrative Template
policy data (i.e. how the policy is displayed in the GUI). Defaults
to 'en-US' (U.S. English).
Returns:
bool: True is successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' lgpo.set computer_policy="{'LockoutDuration': 2, 'RestrictAnonymous': 'Enabled', 'AuditProcessTracking': 'Succes, Failure'}"
"""
if computer_policy and not isinstance(computer_policy, dict):
raise SaltInvocationError("computer_policy must be specified as a dict")
if user_policy and not isinstance(user_policy, dict):
raise SaltInvocationError("user_policy must be specified as a dict")
policies = {}
policies["User"] = user_policy
policies["Machine"] = computer_policy
if policies:
adml_policy_resources = _get_policy_resources(language=adml_language)
for p_class in policies:
_secedits = {}
_netshs = {}
_advaudits = {}
_modal_sets = {}
_admTemplateData = {}
_regedits = {}
_lsarights = {}
_policydata = _policy_info()
if policies[p_class]:
for policy_name in policies[p_class]:
_pol = None
policy_key_name = policy_name
if policy_name in _policydata.policies[p_class]["policies"]:
_pol = _policydata.policies[p_class]["policies"][policy_name]
else:
# Case-sensitive search first
for policy in _policydata.policies[p_class]["policies"]:
_p = _policydata.policies[p_class]["policies"][policy][
"Policy"
]
if _p == policy_name:
_pol = _policydata.policies[p_class]["policies"][policy]
policy_key_name = policy
if _pol is None:
# Still not found, case-insensitive search
for policy in _policydata.policies[p_class]["policies"]:
_p = _policydata.policies[p_class]["policies"][policy][
"Policy"
]
# Case-sensitive search first
if _p.lower() == policy_name.lower():
_pol = _policydata.policies[p_class]["policies"][
policy
]
policy_key_name = policy
if _pol:
# transform and validate the setting
_value = _transform_value(
value=policies[p_class][policy_name],
policy=_policydata.policies[p_class]["policies"][
policy_key_name
],
transform_type="Put",
)
if not _validateSetting(
value=_value,
policy=_policydata.policies[p_class]["policies"][
policy_key_name
],
):
raise SaltInvocationError(
"The specified value {} is not an acceptable setting"
" for policy {}.".format(
policies[p_class][policy_name], policy_name
)
)
if "Registry" in _pol:
# set value in registry
log.trace("%s is a registry policy", policy_name)
_regedits[policy_name] = {"policy": _pol, "value": _value}
elif "Secedit" in _pol:
# set value with secedit
log.trace("%s is a Secedit policy", policy_name)
if _pol["Secedit"]["Section"] not in _secedits:
_secedits[_pol["Secedit"]["Section"]] = []
_secedits[_pol["Secedit"]["Section"]].append(
" ".join([_pol["Secedit"]["Option"], "=", str(_value)])
)
elif "NetSH" in _pol:
# set value with netsh
log.trace("%s is a NetSH policy", policy_name)
_netshs.setdefault(
policy_name,
{
"profile": _pol["NetSH"]["Profile"],
"section": _pol["NetSH"]["Section"],
"option": _pol["NetSH"]["Option"],
"value": str(_value),
},
)
elif "AdvAudit" in _pol:
# set value with advaudit
_advaudits.setdefault(
policy_name,
{
"option": _pol["AdvAudit"]["Option"],
"value": str(_value),
},
)
elif "NetUserModal" in _pol:
# set value via NetUserModal
log.trace("%s is a NetUserModal policy", policy_name)
if _pol["NetUserModal"]["Modal"] not in _modal_sets:
_modal_sets[_pol["NetUserModal"]["Modal"]] = {}
_modal_sets[_pol["NetUserModal"]["Modal"]][
_pol["NetUserModal"]["Option"]
| |
0))) + self.W_x_b(sent_output_backward[0])
new_output_backward = torch.tanh(new_output_backward)
final_output = torch.cat([new_output_forward, new_output_backward], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
class GRU_baseline(nn.Module):
def __init__(self, config):
super(GRU_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.Uh_s = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.Uh, self.Uh_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta)
return h
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx)
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
class GRU_concat(nn.Module):
def __init__(self, config):
super(GRU_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.Uh_s = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.Uh, self.Uh_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta)
return h
def forward(self, word_emb, length, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
inputs = torch.cat([word_emb, sememe_h], dim = 2)
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx)
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class GRU_gate(nn.Module):
def __init__(self, config):
super(GRU_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh_s = nn.Linear(self.mem_dim, self.mem_dim)
self.fs = nn.Linear(self.in_dim, self.mem_dim)
self.W_c = nn.Linear(self.in_dim, self.mem_dim)
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fh_s, self.fs, self.Uh, self.W_c]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_h, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
o_c = self.fx_s(inputs) + self.fh_s(child_h) + self.fs(sememe_h)
o_c = torch.sigmoid(o_c)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta) + torch.mul(o_c, torch.tanh(self.W_c(sememe_h)))
return h
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_h[time], hx)
output.append(next_hx)
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class GRU_cell_baseline(nn.Module):
def __init__(self, config):
super(GRU_cell_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememesumlstm = SememeSumLstm(512, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(512, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.Uh]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_h, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h) + torch.mul(r, sememe_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta)
return h
def forward(self, inputs, length, def_vecs, device):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(def_vecs)
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, sememe_h.size()[2]).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_h[time], hx)
output.append(next_hx)
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(input_s[i].float())
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_h = self.sememesumGRU(input_sememe)
return sememe_h
class GRU_cell(nn.Module):
def __init__(self, config):
super(GRU_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.Uh]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_h, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h) + torch.mul(r, sememe_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta)
return h
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, sememe_h.size()[2]).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_h[time], hx)
output.append(next_hx)
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_h = self.sememesumGRU(input_sememe)
return sememe_h
class GRU_extra_void(nn.Module):
def __init__(self, config):
super(GRU_extra_void, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 2 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 2 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.Uh = nn.Linear(self.mem_dim, self.mem_dim)
self.Uh_s = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s = nn.Linear(config['sememe_size'], self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fs, self.fh, self.W, self.Uh, self.Uh_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_h = hx
iou = self.ioux(inputs) + self.iouh(child_h)
z, r = torch.split(iou, iou.size(1) // 2, dim=1)
z, r = torch.sigmoid(z), torch.sigmoid(r)
h_telta = self.fx(inputs) + self.Uh(torch.mul(r, child_h))
h_telta = torch.tanh(h_telta)
h = torch.mul((1-z), child_h) + torch.mul(z, h_telta)
return h
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
sememe_h = sememe_data.float().cuda()
max_time, batch_size, _ = inputs.size()
output = []
hx = inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_()
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx)
hx = next_hx
new_output = []
new_output_2 = []
for i in range(len(length)):
hidden_old = torch.stack(output[0:length[i]], dim = 0)[:, i, :]
new_output_2.append(torch.index_select(output[length[i]-1], 0, torch.tensor(i, device = 'cuda')))
hidden = self.W(hidden_old)
emb_s_sum = sememe_h[0:length[i], i, :]
| |
"""
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import argparse
import glob
import logging
import os
import random
import sys
import timeit
from functools import partial
from os.path import join
from collections import OrderedDict
import numpy as np
from numpy.lib.shape_base import expand_dims
import torch
from torch.utils import data
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class
from components.dataset_utils import ListDataset
from components.disamb_dataset import (
read_disamb_instances_from_entity_candidates,
extract_disamb_features_from_examples,
disamb_collate_fn,
coverage_evaluation
)
from components.utils import mkdir_p, dump_json
logger = logging.getLogger(__name__)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
mkdir_p(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_collate_fn = partial(disamb_collate_fn, tokenizer=tokenizer)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=train_collate_fn)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(max(args.warmup_steps, t_total * args.warmup_ratio)),
num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Warmup steps = %d", int(max(args.warmup_steps, t_total * args.warmup_ratio)))
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"token_type_ids": batch[1],
"attention_mask": batch[2],
"sample_mask": batch[3],
"labels": batch[4],
}
if args.model_type in ["roberta", "distilbert", "camembert", "bart"]:
del inputs["token_type_ids"]
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log infomation
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
logs['epoch'] = _ + (step + 1) / len(epoch_iterator)
logs['learning_rate'] = scheduler.get_last_lr()[0]
logs['loss'] = (tr_loss - logging_loss) / args.logging_steps
logs['step'] = global_step
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logger.info("Training logs: {}".format(logs))
logging_loss = tr_loss
# Log metrics
if args.local_rank in [-1, 0] and args.eval_steps > 0 and global_step % args.eval_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
logger.info("Eval results: {}".format(dict(results)))
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, output_prediction=False):
# load examples
dataset, examples = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=partial(disamb_collate_fn, tokenizer=tokenizer))
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
start_time = timeit.default_timer()
all_pred_indexes = []
all_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"token_type_ids": batch[1],
"attention_mask": batch[2],
"sample_mask": batch[3],
"labels": batch[4],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart"]:
del inputs["token_type_ids"]
logits = model(**inputs)[1]
pred_indexes = torch.argmax(logits, 1).detach().cpu()
all_pred_indexes.append(pred_indexes)
all_labels.append(batch[4].cpu())
all_pred_indexes = torch.cat(all_pred_indexes).numpy()
all_labels = torch.cat(all_labels).numpy()
acc = np.sum(all_pred_indexes == all_labels) / len(all_pred_indexes)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
coverage = coverage_evaluation(examples, dataset, all_pred_indexes)
results = {'num problem': len(all_pred_indexes), 'acc': acc, 'cov': coverage}
saving = OrderedDict([(feat.pid, pred) for feat, pred in zip(dataset, all_pred_indexes.tolist())])
# print(saving)
if output_prediction:
dump_json(OrderedDict([(feat.pid, pred) for feat, pred in zip(dataset, all_pred_indexes.tolist())]),
join(args.output_dir, 'predictions.json'))
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
split_file = args.predict_file if evaluate else args.train_file
dataset_id = os.path.basename(split_file).split('_')[0]
split_id = os.path.basename(split_file).split('_')[1]
# split_file = '_'.(join(os.path.basename(split_file).split('_')[:2])
cached_features_file = os.path.join('feature_cache',"disamb_{}_{}_{}_{}".format(dataset_id, split_id,args.model_type,args.max_seq_length))
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
# cache exists
logger.info("Loading features from cached file %s", cached_features_file)
data = torch.load(cached_features_file)
examples = data['examples']
features = data['features']
else:
# cache not exists, create it
logger.info("Creating features from dataset file at %s", input_dir)
candidate_file = args.predict_file if evaluate else args.train_file
# TODO: hard coded for now
example_cache = join('feature_cache', f'{dataset_id}_{split_id}_disamb_examples.bin')
if os.path.exists(example_cache) and not args.overwrite_cache:
examples = torch.load(example_cache)
else:
orig_split = split_id
dataset_file = join('outputs', f'grailqa_v1.0_{orig_split}.json')
examples = read_disamb_instances_from_entity_candidates(dataset_file, candidate_file)
torch.save(examples, example_cache)
features = extract_disamb_features_from_examples(args, tokenizer, examples, do_predict=args.do_predict)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({'examples': examples, 'features': features}, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_examples:
return ListDataset(features), examples
else:
return ListDataset(features)
def main():
# parse args
parser = argparse.ArgumentParser()
register_args(parser)
args = parser.parse_args()
# check output dir
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir):
raise ValueError(
"Output directory ({}) | |
into its current state"""
self.forward = None
"""Scenelet forward direction in world space at mid_frame +-2"""
@classmethod
def from_mat(cls, angular_edges, radial_edges, bins, categories):
h = RadialHistogram(shape=(len(angular_edges)-1, len(radial_edges)-1),
r_max=radial_edges[-1],
angular_offset=angular_edges[0])
for id_category, category in enumerate(categories):
label = category[0][0]
h.volume[label] = bins[id_category, :, :]
assert np.allclose(h.angular_edges, np.squeeze(angular_edges.T)), \
"No:\n%s\n%s" % (h.angular_edges, angular_edges)
assert np.allclose(h.radial_edges, np.squeeze(radial_edges.T)), \
"No:\n%s\n%s" % (h.radial_edges, radial_edges)
return h
def set_bin(self, label, x0, x1, value, polys=None):
"""
:param label:
:param x0: bin_angular
:param x1: bin_radial
:param value:
:param polys:
:return:
"""
try:
self.volume[label][x0, x1] = value
except KeyError:
self.volume[label] = \
np.zeros(shape=self.shape, dtype='f4')
self.volume[label][x0, x1] = value
if polys is not None:
try:
self.polys_debug[label].extend(polys)
except KeyError:
self.polys_debug[label] = polys
def get_pos_2d(self, x0, x1):
"""
Converts bin ids to 2d positions
:param x0: bin_angular
:param x1: bin_radial
"""
angle = np.mean(self.angular_edges[x0:x0+2])
radius = np.mean(self.radial_edges[x1:x1+2])
return pol2cart(radius, angle)
def plot(self, show=False, polys_show=None):
"""
:param show:
:param polys_show: {label: [(poly, weight), ...]}
:return:
"""
nrows = int(np.ceil(np.sqrt(len(self.volume))))
fig, axes = plt.subplots(nrows, nrows, sharey=True, sharex=True)
axes = axes.ravel()
for ax_id, (label, slice) in enumerate(self.volume.items()):
ax = axes[ax_id] if len(self.volume) > 1 else axes
hits = np.argwhere(slice > 0.)
for elem in hits:
# angle = np.mean(self.angular_edges[elem[0]:elem[0]+2])
# radius = np.mean(self.radial_edges[elem[1]:elem[1]+2])
# pos = pol2cart(radius, angle)
# pos = self.get_pos_2d(elem[0], elem[1])
# ax.text(pos[0], pos[1], "%.2f, %.2f" % (np.rad2deg(angle), radius))
poly_arc = self.arc_polygons[(elem[0], elem[1])]
patch = PolygonPatch(poly_arc, facecolor='b',
edgecolor='r', alpha=0.3, zorder=2)
ax.add_patch(patch)
seen = []
if label in self.polys_debug:
for bin_angular, bin_radial, poly, perc in self.polys_debug[label]:
pos = self.get_pos_2d(bin_angular, bin_radial)
ax.text(pos[0]-0.05, pos[1], "%.1f%%" % (perc * 100.))
ax.add_patch(
PolygonPatch(poly, facecolor='r',
edgecolor='g', alpha=0.8))
seen.append((bin_angular, bin_radial))
ax.set_title(label)
if polys_show is not None and label in polys_show:
for poly, weight in polys_show[label]:
ax.add_patch(
PolygonPatch(poly, facecolor='r',
edgecolor='g', alpha=0.8))
pos = poly.centroid
print(dir(pos))
ax.text(pos.x-0.05, pos.y, "%f" % weight)
print("added poly %s at %f, %f" % (label, pos.x, pos.y))
for (bin_angular, bin_radial), poly_arc in self.arc_polygons.items():
if (bin_angular, bin_radial) not in seen:
ax.add_patch(PolygonPatch(poly_arc, facecolor='b',
edgecolor='r', alpha=0.1))
pos = self.get_pos_2d(bin_angular, bin_radial)
ax.text(pos[0]-0.05, pos[1],
"%.1f%%" %
(self.volume[label][bin_angular, bin_radial] * 100.))
else:
print("hm %s?" % repr((bin_angular, bin_radial)))
# ax.add_patch(patch)
ax.set_xlim(-self.radial_edges[-1], self.radial_edges[-1])
ax.set_ylim(-self.radial_edges[-1], self.radial_edges[-1])
ax.set_aspect('equal')
if show:
plt.show()
def get_weight(self, label, poly):
"""Returns the weight of the object based on area overlap
with this weighted histogram
charness = \frac{1.0}{\sum w_{area}}
\sum \left(w_{bin}
\frac{A_{intersection}}{min(A_{arc}, A_{object})}
\right)
"""
if label not in self.volume:
logging.info("Don't have %s charness" % label)
return 0.
# w_sum = 0.
poly_area = poly.area
sum_area = 0.
charness = 0.
for (bin_angular, bin_radial), poly_arc in self.arc_polygons.items():
poly_arc_area = poly_arc.area
sum_area += poly_arc_area
intersection = None
try:
intersection = poly.intersection(poly_arc)
except TopologicalError:
logging.error("Topology error, skipping")
continue
if intersection is not None:
# weight = intersection.area / min(poly_area, poly_arc_area)
weight = intersection.area
# logging.info(list(self.volume.keys()))
try:
charness += \
weight \
* self.volume[str(label)][bin_angular, bin_radial]
# w_sum += weight
except KeyError:
logging.info("Don't have %s in charness" % label)
# return charness / w_sum if w_sum > 0. else charness
return charness / min(poly_area, sum_area)
def to_mdict(self, name):
"""Creates a dictionary ready to be saved to .mat files"""
return {
"name": name.split("::"),
"sections": self.sections,
"layers": self.layers,
"r_max": self.r_max,
"angular_edges": self.angular_edges,
"radial_edges": self.radial_edges,
"hists": dict((str(k), v) for k, v in self.volume.items()),
"poses": self.poses,
"descriptor": self.descriptor,
"transforms": self.transforms,
"forward": self.forward
}
def characteristic_scenelets(py_scenes,
min_coverage=0.01, min_object_coverage=0.5,
sections=4, layers=2, r_max=1.5,
ignore={u'floor', u'wall'}):
"""
:param py_scenes:
:param min_coverage: Percentage of histogram bin area covered by an object
:param min_object_coverage: Percentage of object area covered by
histogram bin
:param sections: angular sections in histogram
:param layers: radial layers in histogram
:param r_max: max histogram radius
:param ignore: object categories to ignore
:return:
"""
np.set_printoptions(suppress=True)
# sclt_angles = np.asarray(sclt_angles, dtype=np.float32)
# sclt_angles = np.concatenate(sclt_angles).astype(np.float32)
unique_labels = set(str(obj.label)
for scene in py_scenes.values()
for scenelet in scene.values()
for obj in scenelet.objects.values()
if obj.label not in ignore)
print("unique labels: %s" % unique_labels)
# output collections
hists = []
matlab_dicts = []
# prototype histogram
h_orig = Histogram(sections, layers, r_max)
# for each scenelet
for name_scene, name_scenelet, scenelet in get_scenelets(py_scenes):
# name_scene = names[0]
# name_scenelet = names[1]
# copy prototype histogram to output
hists.append(copy.deepcopy(h_orig))
# reference it
h = hists[-1]
# ensure all bins appear in the histogram by setting bin 0,0 to 0.
for ulabel in unique_labels:
h.set_bin(str(ulabel), 0, 0, 0.)
# reference skeleton in scenelet
skel = scenelet.skeleton
# sorted frames of skeleton in scenelet
skel_frames = skel.get_frames()
# middle frame of scenelet
frame_id = skel_frames[len(skel_frames)//2]
# get pose at middle time in scenelet
pose = skel.get_pose(frame_id)
# copy skeleton to histogram
h.poses = skel._poses
# copy angles (descriptor) to histogram
# returns frame_ids as well, hence [0]
h.descriptor = skel.get_angles()[0]
# copy scenelet transforms
h.transforms = scenelet.aux_info[u'preproc'][u'transforms']
# copy scenelet forward direction
h.forward = scenelet.aux_info[u'forward']
# debug
# print("%s\n.\n%s =\n%s" % (
# np.asarray(h.transforms[1])[:3, :3],
# np.asarray(h.forward).T,
# np.dot(np.asarray(h.transforms[1])[:3, :3],
# np.asarray(h.forward).T)))
# sort object part bounding rectangles per object category
# polys = \
# dict((obj.label, # key
# cascaded_union( # merged top-view projected obb-s
# [
# geom.Polygon(
# part.obb.corners_3d_lower(
# up_axis=(0., -1., 0.))[:, [0, 2]]
# )
# for part in obj.parts.values()
# ]
# ))
# for obj in scenelet.objects.values()
# if obj.label not in ignore)
# same without list comprehensions:
# gather merged top-view polygons per label
polys = dict()
# for each object if not floor or wall
for obj in (obj for obj in scenelet.objects.values()
if obj.label not in ignore):
# collect object part polygons
locals = []
for part in obj.parts.values():
locals.append(geom.Polygon(
part.obb.corners_3d_lower(
up_axis=(0., -1., 0.))[:, [0, 2]]
))
# if has at least one part
if len(locals):
# add already stored polygon under this category, if exists
try:
locals.insert(0, polys[obj.label])
except KeyError:
pass
# try taking the union of the parts of the object
try:
# add to the per-category polygon dictionary
polys[obj.label] = cascaded_union(locals)
except ValueError:
# debug cascaded union
plt.figure()
ax = plt.gca()
for pl in locals:
patch = PolygonPatch(
pl, facecolor='b',
edgecolor='r', alpha=0.5, zorder=2)
ax.add_patch(patch)
plt.show()
# For each segment in the histogram
for (bin_angular, bin_radial), arc_poly in h.arc_polygons.items():
# estimate area threshold TODO: scale by increasing bin_radial
thresh_area_arc_poly = arc_poly.area * min_coverage
# for each category assembled above
for label, poly in polys.items():
percentage_intersection = 0.
try:
intersection = poly.intersection(arc_poly)
except TopologicalError:
print("topology error, skipping")
continue
if intersection.is_empty:
is_covering = \
poly.contains(arc_poly) \
or poly.covers(arc_poly) \
or arc_poly.within(poly)
if not intersection.is_empty:
is_intersecting = False
area_intersection = intersection.area
if area_intersection > thresh_area_arc_poly:
is_intersecting = True
percentage_intersection = \
area_intersection / arc_poly.area
else:
area_poly = poly.area
if area_intersection / area_poly > min_object_coverage:
is_intersecting = True
else:
is_intersecting = False
if is_intersecting or is_covering:
h.set_bin(
label, bin_angular, bin_radial,
percentage_intersection,
polys=[(bin_angular, bin_radial, poly,
percentage_intersection)])
else:
h.set_bin(label, bin_angular, bin_radial, 0.)
# copy to MATLAB output
matlab_dicts.append(h.to_mdict("%s::%s" % (name_scene, name_scenelet)))
# save MATLAB output
scipy.io.savemat("hists.mat", {'histograms': matlab_dicts})
print("Saved to %s" % os.path.abspath("hists.mat"))
# Show last scene in 3D
with Visualizer() as vis:
vis.add_coords()
for oid, obj in scenelet.objects.items():
if obj.label not in ignore:
for part in obj.parts.values():
vis.add_mesh(MeshOBJ.from_obb(part.obb),
"%02d_%s_%s" % (oid, obj.label, part.label))
for jid in range(pose.shape[1]):
vis.add_sphere(pose[:, jid], 0.05, (0.1, 0.9, 0.9),
"joint_%d" % jid)
camera = vis._ren.GetActiveCamera()
camera.SetViewUp(0., -1., 0.)
vis._ren.ResetCamera()
vis.show()
def plot_coords(ax, ob):
x, y = ob.xy
ax.plot(x, y, 'o', color='#999999', zorder=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Radial histogram generation")
parser.add_argument(
"-d", dest="path_scenelets",
default="/media/Data1/ResearchData/stealth/data-pigraphs/" \
"stealth_scenelets_subsample010_framestep015_gap-8.real")
args = parser.parse_args()
path_scenelets = args.path_scenelets
# read scenelets
_path_scenes_pickle = os.path.join(path_scenelets, 'scenes.pickle')
if os.path.exists(_path_scenes_pickle):
print("Reading scenelets from pickle: %s, delete this file, "
"if you want to reparse the scenelets" % _path_scenes_pickle)
py_scenes = pickle.load(open(_path_scenes_pickle, 'rb'))
else:
print("Listing \"%s\":\n" % path_scenelets)
py_scenes = {}
for parent, dirs, files in os.walk(path_scenelets):
scene_name = None
for f in [f for f in files
if f.endswith('.json') and f.startswith('skel')]:
if not scene_name:
scene_name = os.path.basename(os.path.split(parent)[-1])
py_scenes[scene_name] = {}
j_path = os.path.join(parent, f)
# print("j_path: %s" % j_path)
scenelet = Scenelet.load(j_path)
if len(scenelet.objects):
py_scenes[scene_name][os.path.splitext(f)[0]] = scenelet
pickle.dump(py_scenes, open(_path_scenes_pickle, 'wb'))
print("Dumped py_scenes to | |
<gh_stars>1-10
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""video analysis server module"""
import os
import re
import shutil
import json
from json.decoder import JSONDecodeError
import logging
from logging.config import fileConfig
from google.protobuf.message import DecodeError
import video_analysis.src.video_analysis_message_pb2 as pb2
from video_analysis.src.config_parser import ConfigParser
import common.presenter_message_pb2 as presenter_message_pb2
from common.channel_manager import ChannelManager
from common.presenter_socket_server import PresenterSocketServer
from common.app_manager import AppManager
IMAGE_FILE = "image.jpg"
JSON_FILE = "inference.json"
CHANNEL_NAME_FILE = "channel_name.txt"
SERVER_TYPE = "video_analysis"
CHECK_INTERCAL = 100
MAX_SUB_DIRECTORY_NUM = 30000
FRAME_GAP = 5
class VideoAnalysisServer(PresenterSocketServer):
'''Video Analysis Server'''
def __init__(self, config):
server_address = (config.presenter_server_ip,
int(config.presenter_server_port))
self.storage_dir = config.storage_dir
self.max_app_num = int(config.max_app_num)
self.reserved_space = int(config.reserved_space)
self.app_manager = AppManager()
self.frame_num = 0
super(VideoAnalysisServer, self).__init__(server_address)
def _clean_connect(self, sock_fileno, epoll, conns, msgs):
"""
Description: close socket, and clean local variables
Args:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
"""
logging.info("clean fd:%s, conns:%s", sock_fileno, conns)
self.app_manager.unregister_app_by_fd(sock_fileno)
epoll.unregister(sock_fileno)
conns[sock_fileno].close()
del conns[sock_fileno]
del msgs[sock_fileno]
def _process_msg(self, conn, msg_name, msg_data):
"""
Description: Total entrance to process protobuf msg
Args:
conn: a socket connection
msg_name: name of a msg.
msg_data: msg body, serialized by protobuf
Returns:
False:somme error occured
True:succeed
"""
# process open channel request
if msg_name == pb2._REGISTERAPP.full_name:
ret = self._process_register_app(conn, msg_data)
elif msg_name == pb2._IMAGESET.full_name:
ret = self._process_image_set(conn, msg_data)
elif msg_name == pb2._CARINFERENCERESULT.full_name:
ret = self._process_car_inference_result(conn, msg_data)
elif msg_name == pb2._HUMANINFERENCERESULT.full_name:
ret = self._process_human_inference_result(conn, msg_data)
elif msg_name == pb2._FACEINFERENCERESULT.full_name:
ret = self._process_face_inference_result(conn, msg_data)
elif msg_name == presenter_message_pb2._HEARTBEATMESSAGE.full_name:
ret = self._process_heartbeat(conn)
# process image request, receive an image data from presenter agent
else:
logging.error("Not recognized msg type %s", msg_name)
ret = False
return ret
def _process_heartbeat(self, conn):
'''
Description: set heartbeat
Input:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
if self.app_manager.get_app_id_by_socket(conn.fileno()):
self.app_manager.set_heartbeat(conn.fileno())
return True
def _parse_protobuf(self, protobuf, msg_data):
"""
Description: parse protobuf
Input:
protobuf: a struct defined by protobuf
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
try:
protobuf.ParseFromString(msg_data)
return True
except DecodeError as exp:
logging.error(exp)
return False
def _process_register_app(self, conn, msg_data):
'''
Description: process register_app message
Input:
conn: a socket connection
msg_data: message data.
Returns: True or False
'''
request = pb2.RegisterApp()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
self._response_error_unknown(conn)
return False
app_id = request.id
app_type = request.type
# check app id if exist
app_dir = os.path.join(self.storage_dir, app_id)
if os.path.isdir(app_dir):
logging.error("App %s is already exist.", app_id)
response.ret = pb2.kErrorAppRegisterExist
response.message = "App {} is already exist.".format(app_id)
self.send_message(conn, response, msg_name)
elif self.app_manager.get_app_num() >= self.max_app_num:
logging.error("App number reach the upper limit")
response.ret = pb2.kErrorAppRegisterLimit
response.message = "App number reach the upper limit"
self.send_message(conn, response, msg_name)
elif app_type != SERVER_TYPE:
logging.error("App type %s error", app_type)
response.ret = pb2.kErrorAppRegisterType
response.message = "App type {} error".format(app_type)
self.send_message(conn, response, msg_name)
elif self._is_app_id_invalid(app_id):
logging.error("App id %s is too long", app_id)
response.ret = pb2.kErrorOther
response.message = "App id: {} is too long".format(app_id)
self.send_message(conn, response, msg_name)
elif self._remain_space() < self.reserved_space:
logging.error("Insufficient storage space on Presenter Server.")
response.ret = pb2.kErrorAppRegisterNoStorage
response.message = "Insufficient storage space on Presenter Server"
self.send_message(conn, response, msg_name)
else:
self.app_manager.register_app(app_id, conn)
# app_dir = os.path.join(self.storage_dir, app_id)
# if not os.path.isdir(app_dir):
# os.makedirs(app_dir)
response.ret = pb2.kErrorNone
response.message = "Register app {} succeed".format(app_id)
self.send_message(conn, response, msg_name)
return True
return False
def _process_image_set(self, conn, msg_data):
'''
Description: process image_set message
Input:
conn: a socket connection
msg_data: message data.
Returns: True or False
'''
request = pb2.ImageSet()
response = pb2.CommonResponse()
msg_name = msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
self._response_error_unknown(conn)
return False
app_id = request.frame_index.app_id
channel_id = request.frame_index.channel_id
channel_name = request.frame_index.channel_name
frame_id = request.frame_index.frame_id
frame_image = request.frame_image
if not self.app_manager.is_app_exist(app_id):
logging.error("app_id: %s not exist", app_id)
response.ret = pb2.kErrorAppLost
response.message = "app_id: %s not exist"%(app_id)
self.send_message(conn, response, msg_name)
return False
frame_num = self.app_manager.get_frame_num(app_id, channel_id)
if frame_num % CHECK_INTERCAL == 0:
if self._remain_space() <= self.reserved_space:
logging.error("Insufficient storage space on Server.")
response.ret = pb2.kErrorStorageLimit
response.message = "Insufficient storage space on Server."
self.send_message(conn, response, msg_name)
return False
stack_index = int(frame_id) // (MAX_SUB_DIRECTORY_NUM * FRAME_GAP)
stack_directory = "stack_{}/".format(stack_index)
frame = stack_directory + frame_id
frame_dir = os.path.join(self.storage_dir, app_id, channel_id, frame)
image_file = os.path.join(frame_dir, IMAGE_FILE)
if not os.path.exists(image_file):
if not self._save_image(frame_dir, frame_image):
self._response_error_unknown(conn)
logging.error("save_image: %s error.", frame_dir)
return False
self.app_manager.increase_frame_num(app_id, channel_id)
app_dir = os.path.join(self.storage_dir, app_id)
self._save_channel_name(app_dir, channel_id, channel_name)
for i in request.object:
object_id = i.id
object_confidence = i.confidence
object_image = i.image
object_dir = os.path.join(frame_dir, object_id)
inference_dict = {"confidence" : object_confidence}
if not self._save_image(object_dir, object_image) or \
not self._save_inference_result(object_dir, inference_dict):
self._response_error_unknown(conn)
logging.error("save image: %s error.", object_dir)
return False
self.app_manager.set_heartbeat(conn.fileno())
response.ret = pb2.kErrorNone
response.message = "image set process succeed"
self.send_message(conn, response, msg_name)
return True
def _process_car_inference_result(self, conn, msg_data):
'''
Description: process car_inference_result message
Input:
conn: a socket connection
msg_data: message data.
Returns: True or False
'''
request = pb2.CarInferenceResult()
response = pb2.CommonResponse()
msg_name = msg_name = pb2._COMMONRESPONSE.full_name
inference_dict = {}
if not self._parse_protobuf(request, msg_data):
self._response_error_unknown(conn)
return False
app_id = request.frame_index.app_id
channel_id = request.frame_index.channel_id
frame_id = request.frame_index.frame_id
object_id = request.object_id
if not self.app_manager.is_app_exist(app_id):
logging.error("app_id: %s not exist", app_id)
response.ret = pb2.kErrorAppLost
response.message = "app_id: %s not exist"%(app_id)
self.send_message(conn, response, msg_name)
return False
channel_dir = os.path.join(self.storage_dir, app_id, channel_id)
stack_list = os.listdir(channel_dir)
stack_list.sort()
current_stack = int(frame_id) // (MAX_SUB_DIRECTORY_NUM * FRAME_GAP)
stack_directory = "stack_{}/".format(current_stack)
object_dir = os.path.join(channel_dir, stack_directory, frame_id, object_id)
if request.type == pb2.kCarColor:
inference_dict["color_confidence"] = request.confidence
inference_dict["color"] = request.value
elif request.type == pb2.kCarBrand:
inference_dict["brand_confidence"] = request.confidence
inference_dict["brand"] = request.value
elif request.type == pb2.kCarPlate:
inference_dict["plate_confidence"] = request.confidence
inference_dict["plate"] = request.value
else:
logging.error("unknown type %d", request.type)
self._response_error_unknown(conn)
return False
if not self._save_inference_result(object_dir, inference_dict):
self._response_error_unknown(conn)
return False
self.app_manager.set_heartbeat(conn.fileno())
response.ret = pb2.kErrorNone
response.message = "car inference process succeed"
self.send_message(conn, response, msg_name)
return True
def _process_human_inference_result(self, conn, msg_data):
'''
Description: process human_inference_result message
Input:
conn: a socket connection
msg_data: message data.
Returns: True or False
'''
request = pb2.HumanInferenceResult()
response = pb2.CommonResponse()
msg_name = msg_name = pb2._COMMONRESPONSE.full_name
inference_dict = {}
if not self._parse_protobuf(request, msg_data):
self._response_error_unknown(conn)
return False
app_id = request.frame_index.app_id
channel_id = request.frame_index.channel_id
frame_id = request.frame_index.frame_id
object_id = request.object_id
if not self.app_manager.is_app_exist(app_id):
logging.error("app_id: %s not exist", app_id)
response.ret = pb2.kErrorAppLost
response.message = "app_id: %s not exist"%(app_id)
self.send_message(conn, response, msg_name)
return False
channel_dir = os.path.join(self.storage_dir, app_id, channel_id)
stack_list = os.listdir(channel_dir)
stack_list.sort()
current_stack = int(frame_id) // (MAX_SUB_DIRECTORY_NUM * FRAME_GAP)
stack_directory = "stack_{}/".format(current_stack)
object_dir = os.path.join(channel_dir, stack_directory, frame_id, object_id)
inference_dict["property"] = {}
for item in request.human_property:
inference_dict["property"][item.key] = item.value
if not self._save_inference_result(object_dir, inference_dict):
self._response_error_unknown(conn)
return False
self.app_manager.set_heartbeat(conn.fileno())
response.ret = pb2.kErrorNone
response.message = "human inference process succeed"
self.send_message(conn, response, msg_name)
return True
def _process_face_inference_result(self, conn, msg_data):
'''
Description: process face_inference_result message
Input:
conn: a socket connection
msg_data: message data.
Returns: True or False
'''
request = pb2.FaceInferenceResult()
response = pb2.CommonResponse()
msg_name = msg_name = pb2._COMMONRESPONSE.full_name
inference_dict = {}
if not self._parse_protobuf(request, msg_data):
self._response_error_unknown(conn)
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.