text
stringlengths 65
6.05M
| lang
stringclasses 8
values | type
stringclasses 2
values | id
stringlengths 64
64
|
|---|---|---|---|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Simple file-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - July 2015
# http://www.basistech.com/python-autopsy-module-tutorial-1-the-file-ingest-module/
#
# Looks for big files that are a multiple of 4096 and makes artifacts
import os
import re
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
# This will work in 4.0.1 and beyond
# from org.sleuthkit.autopsy.casemodule.services import Blackboard
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the anlaysis.
class FindLogFilesIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Log File Finder"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "All Program's Log File Finder - LHS"
def getModuleVersionNumber(self):
return "1.0"
# Return true if module wants to get called for each file
#def isFileIngestModuleFactory(self):
# return True
# can return null if isFileIngestModuleFactory returns false
#def createFileIngestModule(self, ingestOptions):
# return FindLogFilesIngestModule()
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return FindLogFilesIngestModule()
# File-level ingest module. One gets created per thread.
class FindLogFilesIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(FindLogFilesIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
# TODO: Add any setup code that you need here.
def startUp(self, context):
self.filesFound = 0
self.context = context
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
pass
# Where the analysis is done. Each file will be passed into here.
# The 'file' object being passed in is of type org.sleuthkit.datamodel.AbstractFile.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/4.3/classorg_1_1sleuthkit_1_1datamodel_1_1_abstract_file.html
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# This will work in 4.0.1 and beyond
# Use blackboard class to index blackboard artifacts for keyword search
# blackboard = Case.getCurrentCase().getServices().getBlackboard()
# Find files named *.log, regardless of parent path
fileManager = Case.getCurrentCase().getServices().getFileManager()
files = fileManager.findFiles(dataSource, "%.log")
numFiles = len(files)
progressBar.switchToDeterminate(numFiles)
fileCount = 0;
for file in files:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + file.getName())
fileCount += 1
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(),
FindLogFilesIngestModuleFactory.moduleName, "Log Files")
art.addAttribute(att)
############# This is finding datetime information ###################
# Step 1: Save file temporary and open it
tmpPath = os.path.join(Case.getCurrentCase().getTempDirectory(), str(file.getName()))
ContentUtils.writeToFile(file, File(tmpPath))
fdata = open(tmpPath, "rb")
FLFpath = os.path.join(Case.getCurrentCase().getTempDirectory(),'..','FindLogFilesResult')
if not os.path.exists(FLFpath):
os.mkdir(FLFpath)
result = open(os.path.join(FLFpath,'result.txt'), 'ab')
# filename check
date = re.findall('([\d]{4}.?[\d]{2}.?[\d]{2})', file.getName())
# Case 1: date is in filename
if(date != []):
dnt = re.findall('([\d]{4}.?[\d]{2}.?[\d]{2}[T|_|-][\d]*)', file.getName())
# Case 1-1: time is also in filename
if(dnt != []):
result.write(file.getName()+' >> '+dnt[0]+'\r\n')
# Case 1-2: time is not in filename -- find time in file data
else:
line = fdata.readline()
timefind = []
while line != "":
#result.write(line)
timefind = re.findall('([\d]{2}[:][\d]{2}[:][\d]{2})',line)
if timefind != []:
result.write(file.getName()+' >> '+date[0]+' '+timefind[0]+'\r\n')
break
line = fdata.readline()
# Case 2: date is not in filename -- find datetime in file data
else:
line = fdata.readline()
dtfind = []
while line != "":
dtfind = re.findall('([\d]{4}.?[\d]{2}.?[\d]{2}[T|_|-][\d]*)',line)
if dtfind != []:
result.write(file.getName()+' >> '+dtfind[0]+'\r\n')
break
line = fdata.readline()
fdata.close()
result.close()
# This will work in 4.0.1 and beyond
#try:
# # index the artifact for keyword search
# blackboard.indexArtifact(art)
#except Blackboard.BlackboardException as e:
# self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there is a new artifact
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(FindLogFilesIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, None));
# After all databases, post a message to the ingest messages in box.
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
"Find Log Files", "Found %d files" % fileCount)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed.
# TODO: Add any shutdown code that you need here.
def shutDown(self):
None
|
Python
|
CL
|
bd8ef12c5c4434beffaa3b9393f53d4630a688c1d70d4ea2dbd9fd174ab22380
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy engine for openstack_auth"""
import logging
import os.path
from django.conf import settings
from oslo_config import cfg
from oslo_policy import opts as policy_opts
from oslo_policy import policy
import yaml
from openstack_auth import user as auth_user
from openstack_auth import utils as auth_utils
LOG = logging.getLogger(__name__)
_ENFORCER = None
_BASE_PATH = settings.POLICY_FILES_PATH
def _get_policy_conf(policy_file, policy_dirs=None):
conf = cfg.ConfigOpts()
# Passing [] is required. Otherwise oslo.config looks up sys.argv.
conf([])
policy_opts.set_defaults(conf)
conf.set_default('policy_file', policy_file, 'oslo_policy')
# Policy Enforcer has been updated to take in a policy directory
# as a config option. However, the default value in is set to
# ['policy.d'] which causes the code to break. Set the default
# value to empty list for now.
if policy_dirs is None:
policy_dirs = []
conf.set_default('policy_dirs', policy_dirs, 'oslo_policy')
return conf
def _get_policy_file_with_full_path(service):
policy_files = settings.POLICY_FILES
policy_file = os.path.join(_BASE_PATH, policy_files[service])
policy_dirs = settings.POLICY_DIRS.get(service, [])
policy_dirs = [os.path.join(_BASE_PATH, policy_dir)
for policy_dir in policy_dirs]
return policy_file, policy_dirs
def _convert_to_ruledefault(p):
deprecated = p.get('deprecated_rule')
if deprecated:
deprecated_rule = policy.DeprecatedRule(deprecated['name'],
deprecated['check_str'])
else:
deprecated_rule = None
return policy.RuleDefault(
p['name'], p['check_str'],
description=p['description'],
scope_types=p['scope_types'],
deprecated_rule=deprecated_rule,
deprecated_for_removal=p.get('deprecated_for_removal', False),
deprecated_reason=p.get('deprecated_reason'),
deprecated_since=p.get('deprecated_since'),
)
def _load_default_rules(service, enforcer):
policy_files = settings.DEFAULT_POLICY_FILES
try:
policy_file = os.path.join(_BASE_PATH, policy_files[service])
except KeyError:
LOG.error('Default policy file for %s is not defined. '
'Check DEFAULT_POLICY_FILES setting.', service)
return
try:
with open(policy_file) as f:
policies = yaml.safe_load(f)
except IOError as e:
LOG.error('Failed to open the policy file for %(service)s %(path)s: '
'%(reason)s',
{'service': service, 'path': policy_file, 'reason': e})
return
except yaml.YAMLError as e:
LOG.error('Failed to load the default policies for %(service)s: '
'%(reason)s', {'service': service, 'reason': e})
return
defaults = [_convert_to_ruledefault(p) for p in policies]
enforcer.register_defaults(defaults)
def _get_enforcer():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = {}
policy_files = settings.POLICY_FILES
for service in policy_files.keys():
policy_file, policy_dirs = _get_policy_file_with_full_path(service)
conf = _get_policy_conf(policy_file, policy_dirs)
enforcer = policy.Enforcer(conf)
enforcer.suppress_default_change_warnings = True
_load_default_rules(service, enforcer)
try:
enforcer.load_rules()
except IOError:
# Just in case if we have permission denied error which is not
# handled by oslo.policy now. It will handled in the code like
# we don't have any policy file: allow action from the Horizon
# side.
LOG.warning("Cannot load a policy file '%s' for service '%s' "
"due to IOError. One possible reason is "
"permission denied.", policy_file, service)
except ValueError:
LOG.warning("Cannot load a policy file '%s' for service '%s' "
"due to ValueError. The file might be wrongly "
"formatted.", policy_file, service)
# Ensure enforcer.rules is populated.
if enforcer.rules:
LOG.debug("adding enforcer for service: %s", service)
_ENFORCER[service] = enforcer
else:
locations = policy_file
if policy_dirs:
locations += ' and files under %s' % policy_dirs
LOG.warning("No policy rules for service '%s' in %s",
service, locations)
return _ENFORCER
def reset():
global _ENFORCER
_ENFORCER = None
def check(actions, request, target=None):
"""Check user permission.
Check if the user has permission to the action according
to policy setting.
:param actions: list of scope and action to do policy checks on,
the composition of which is (scope, action). Multiple actions
are treated as a logical AND.
* scope: service type managing the policy for action
* action: string representing the action to be checked
this should be colon separated for clarity.
i.e.
| compute:create_instance
| compute:attach_volume
| volume:attach_volume
for a policy action that requires a single action, actions
should look like
| "(("compute", "compute:create_instance"),)"
for a multiple action check, actions should look like
| "(("identity", "identity:list_users"),
| ("identity", "identity:list_roles"))"
:param request: django http request object. If not specified, credentials
must be passed.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:returns: boolean if the user has permission or not for the actions.
"""
if target is None:
target = {}
user = auth_utils.get_user(request)
# Several service policy engines default to a project id check for
# ownership. Since the user is already scoped to a project, if a
# different project id has not been specified use the currently scoped
# project's id.
#
# The reason is the operator can edit the local copies of the service
# policy file. If a rule is removed, then the default rule is used. We
# don't want to block all actions because the operator did not fully
# understand the implication of editing the policy file. Additionally,
# the service APIs will correct us if we are too permissive.
if target.get('project_id') is None:
target['project_id'] = user.project_id
if target.get('tenant_id') is None:
target['tenant_id'] = target['project_id']
# same for user_id
if target.get('user_id') is None:
target['user_id'] = user.id
domain_id_keys = [
'domain_id',
'project.domain_id',
'user.domain_id',
'group.domain_id'
]
# populates domain id keys with user's current domain id
for key in domain_id_keys:
if target.get(key) is None:
target[key] = user.user_domain_id
credentials = _user_to_credentials(user)
domain_credentials = _domain_to_credentials(request, user)
# if there is a domain token use the domain_id instead of the user's domain
if domain_credentials:
credentials['domain_id'] = domain_credentials.get('domain_id')
enforcer = _get_enforcer()
for action in actions:
scope, action = action[0], action[1]
if scope in enforcer:
# this is for handling the v3 policy file and will only be
# needed when a domain scoped token is present
if scope == 'identity' and domain_credentials:
# use domain credentials
if not _check_credentials(enforcer[scope],
action,
target,
domain_credentials):
return False
# use project credentials
if not _check_credentials(enforcer[scope],
action, target, credentials):
return False
# if no policy for scope, allow action, underlying API will
# ultimately block the action if not permitted, treat as though
# allowed
return True
def _check_credentials(enforcer_scope, action, target, credentials):
is_valid = True
if not enforcer_scope.enforce(action, target, credentials):
# to match service implementations, if a rule is not found,
# use the default rule for that service policy
#
# waiting to make the check because the first call to
# enforce loads the rules
if action not in enforcer_scope.rules:
if not enforcer_scope.enforce('default', target, credentials):
if 'default' in enforcer_scope.rules:
is_valid = False
else:
is_valid = False
return is_valid
def _user_to_credentials(user):
if not hasattr(user, "_credentials"):
roles = [role['name'] for role in user.roles]
user._credentials = {'user_id': user.id,
'username': user.username,
'project_id': user.project_id,
'tenant_id': user.project_id,
'project_name': user.project_name,
'domain_id': user.user_domain_id,
'is_admin': user.is_superuser,
'roles': roles}
return user._credentials
def _domain_to_credentials(request, user):
if not hasattr(user, "_domain_credentials"):
try:
domain_auth_ref = request.session.get('domain_token')
# no domain role or not running on V3
if not domain_auth_ref:
return None
domain_user = auth_user.create_user_from_token(
request, auth_user.Token(domain_auth_ref),
domain_auth_ref.service_catalog.url_for(interface=None))
user._domain_credentials = _user_to_credentials(domain_user)
# uses the domain_id associated with the domain_user
user._domain_credentials['domain_id'] = domain_user.domain_id
except Exception:
LOG.warning("Failed to create user from domain scoped token.")
return None
return user._domain_credentials
|
Python
|
CL
|
7b44339e542e855c867d5a3a053cb7e87d94972bd58f3f1fa112669872360f3d
|
"""Run this script to start the digital brain."""
from app.ask import ask
import app.io as io
from app.language import get_language
from app.match import Matcher
from app.memories.persistent import PersistentMemory
from app.tell import tell
from app.types.command import Command
from app.types.validation import Validation
from app.validate import validate_command
import os
import json
_CONFIG_PATH = 'configs/shell.json'
def _main():
io.welcome()
config = _parse_config()
thresholds = config['threholds']
memory_path = os.path.abspath(config['memory_path'])
memory = PersistentMemory(memory_path)
nlp = get_language()
matcher = Matcher(nlp)
io.help()
while True:
user_input, valid, validation_mesage = io.prompt()
if not valid:
io.reply(validation_mesage)
else:
command_validation_result = validate_command(user_input)
if command_validation_result == Validation.UNKNOWN_COMMAND:
io.reply('I am sorry, but I do not understand that.')
else:
if user_input == Command.BYE:
io.exit()
break
elif user_input == Command.HELP:
io.help()
elif user_input == Command.ASK:
_ask(thresholds['question'], matcher, memory.facts)
elif user_input == Command.TELL:
_tell(thresholds['fact'], matcher, memory)
def _parse_config():
with open(_CONFIG_PATH, 'r') as config_file:
config = json.load(config_file)
return config
def _ask(threshold, matcher, facts):
"""
Interactive option to ask a question.
:type threshold: float
:type matcher: app.match.Matcher
:type facts: list of app.types.fact.Fact
"""
io.reply('What do you want to ask me?')
while True:
message = io.prompt_without_parse('ask')
response = ask(threshold, matcher, facts, message)
type = response['type']
if type == 'invalid':
io.reply(response['validation_mesage'])
elif type == 'no_match':
# TODO: how to give detailed information on why no match was found?
io.reply('Sorry. My digital brain is not yet fully evolved. I did not understand it.')
io.reply('Could you rephrase the question, so I might be able to understand it?')
return
elif type == 'select_match':
_select_match(response['matches'])
else:
io.reply(response['answer'])
return
def _select_match(matches):
"""
Present all possible matches to user and prompt user to pick one of them.
:type matches: list of app.types.match.Match
:rtype: app.types.match.Match
"""
# TOOD: add user friendly representation to of each relation
raise NotImplementedError()
def _tell(threshold, matcher, memory):
io.reply('What do you want to tell me?')
while True:
message = io.prompt_without_parse('tell')
response = tell(threshold, matcher, memory, message)
if type == 'invalid':
io.reply(response['validation_mesage'])
elif type == 'no_match':
# TODO: how to give detailed information on why no match was found?
io.reply('Sorry. My digital brain is not yet fully evolved. I did not understand it.')
io.reply('Could you rephrase the question, so I might be able to understand it?')
return
else:
io.reply('Got it! I will remember that.')
return
if __name__ == '__main__':
_main()
|
Python
|
CL
|
e195db08285178137acdd2d64cc32c6ebd8dc865079ee5fd4db5eb70fd490917
|
import numpy as np
import torch
import json
class ImageDatabase(torch.utils.data.Dataset):
"""
Dataset for IMDB used in Pythia
General format that we have standardize follows:
{
metadata: {
'version': x
},
data: [
{
'id': DATASET_SET_ID,
'set_folder': <directory>,
'feature_path': <file_path>,
'info': {
// Extra information
'questions_tokens': [],
'answer_tokens': []
}
}
]
}
"""
def __init__(self, imdb_path):
super().__init__()
self.metadata = {}
self._load_imdb(imdb_path)
def _load_imdb(self, imdb_path):
if imdb_path.endswith(".npy"):
self._load_npy(imdb_path)
elif imdb_path.endswith(".jsonl"):
self._load_jsonl(imdb_path)
elif imdb_path.contains("visdial") or imdb_path.contains("visual_dialog"):
self._load_visual_dialog(imdb_path)
else:
raise ValueError("Unknown file format for imdb")
def _load_jsonl(self, imdb_path):
with open(imdb_path, "r") as f:
db = f.readlines()
for idx, line in enumerate(db):
db[idx] = json.loads(line.strip("\n"))
self.data = db
self.start_idx = 0
def _load_npy(self, imdb_path):
self.db = np.load(imdb_path, allow_pickle=True)
self.start_idx = 0
if type(self.db) == dict:
self.metadata = self.db.get("metadata", {})
self.data = self.db.get("data", [])
else:
# TODO: Deprecate support for this
self.metadata = {"version": 1}
self.data = self.db
# Handle old imdb support
if "image_id" not in self.data[0]:
self.start_idx = 1
if len(self.data) == 0:
self.data = self.db
def _load_visual_dialog(self, imdb_path):
from pythia.datasets.dialog.visual_dialog.database import VisualDialogDatabase
self.data = VisualDialogDatabase(imdb_path)
self.metadata = self.data.metadata
self.start_idx = 0
def __len__(self):
return len(self.data) - self.start_idx
def __getitem__(self, idx):
data = self.data[idx + self.start_idx]
# Hacks for older IMDBs
if "answers" not in data:
if "all_answers" in data and "valid_answers" not in data:
data["answers"] = data["all_answers"]
if "valid_answers" in data:
data["answers"] = data["valid_answers"]
# TODO: Later clean up VizWIz IMDB from copy tokens
if "answers" in data and data["answers"][-1] == "<copy>":
data["answers"] = data["answers"][:-1]
return data
def get_version(self):
return self.metadata.get("version", None)
|
Python
|
CL
|
167ed5a734fba6960c86bf950496a92ed610542fd118ef842571667ca102f73b
|
"""
For now, there is a single _read() and a single _write() method, tied to the
file system. In the future, these will be code cells in a context, and it
will be possible to register custom _read() and _write() cells, e.g. for
storage in a database.
Same for _exists.
_init() is invoked at startup:
If authority is "file" or "file-strict":
This may invoke _read(), but only if the file exists
(if cell is non-empty and is different, a warning is printed)
("file-strict": the file must exist)
If not, this may invoke _write(), but only if the cell is non-empty
If authority is "cell":
This may invoke _write(), but only if the cell is non-empty
(if the file exists and is different, a warning is printed)
If not, this may invoke _read(), but only if the file exists
Periodically, conditional_read() and conditional_write() are invoked,
that check if a read/write is necessary, and if so, invoke _read()/_write()
NOTE: resolve_register returns immediately if there has been an exception raised
"""
from .protocol import cson2json, json_encode
from weakref import WeakValueDictionary, WeakKeyDictionary, WeakSet, ref
from threading import Thread, RLock, Event
from collections import deque, OrderedDict
import sys, os
import time
import traceback
import copy
from contextlib import contextmanager
import json
NoStash = 1
def is_dummy_mount(mount):
if mount is None:
return True
assert isinstance(mount, dict), mount
if list(mount.keys()) == ["extension"]:
return True
return False
class MountItem:
last_exc = None
parent = None
_destroyed = False
def __init__(self, parent, cell, path, mode, authority, persistent, *,
dummy=False, **kwargs
):
if parent is not None:
self.parent = ref(parent)
self.path = path
self.cell = ref(cell)
self.dummy = dummy
assert mode in ("r", "w", "rw"), mode #read from file, write to file, or both
self.mode = mode
assert persistent in (True, False, None)
assert authority in ("cell", "file", "file-strict"), authority
if authority == "file-strict":
assert persistent
elif authority in ("file", "file-strict"):
assert "r" in self.mode, (authority, mode)
self.authority = authority
self.kwargs = kwargs
self.last_checksum = None
self.last_time = None
self.last_mtime = None
self.persistent = persistent
def init(self):
if self._destroyed:
return
assert self.parent is not None
cell = self.cell()
if cell is None:
return
exists = self._exists()
cell_empty = (cell.status() != "OK")
if self.authority in ("file", "file-strict"):
if exists:
with self.lock:
filevalue = self._read()
update_file = True
file_checksum = None
if not cell_empty:
file_checksum = cell._checksum(filevalue, buffer=True)
if file_checksum == cell.text_checksum():
update_file = False
else:
print("Warning: File path '%s' has a different value, overwriting cell" % self.path) #TODO: log warning
self._after_read(file_checksum)
if update_file:
self.set(filevalue, checksum=file_checksum)
elif self.authority == "file-strict":
raise Exception("File path '%s' does not exist, but authority is 'file-strict'" % self.path)
else:
if "w" in self.mode and not cell_empty:
value = cell.serialize_buffer()
checksum = cell.text_checksum()
with self.lock:
self._write(value)
self._after_write(checksum)
else: #self.authority == "cell"
must_read = ("r" in self.mode)
if not must_read and cell._master is not None and \
cell._master[1] in ("form", "storage"):
must_read = True
if not cell_empty:
value = cell.serialize_buffer()
checksum = cell.text_checksum()
if exists and must_read:
with self.lock:
filevalue = self._read()
file_checksum = cell._checksum(filevalue, buffer=True)
if file_checksum != checksum:
if "w" in self.mode:
print("Warning: File path '%s' has a different value, overwriting file" % self.path) #TODO: log warning
else:
print("Warning: File path '%s' has a different value, no overwriting enabled" % self.path) #TODO: log warning
self._after_read(file_checksum)
if "w" in self.mode:
with self.lock:
self._write(value)
self._after_write(checksum)
else:
if exists and must_read:
with self.lock:
filevalue = self._read()
file_checksum = cell._checksum(filevalue, buffer=True)
self.set(filevalue, checksum=file_checksum)
self._after_read(file_checksum)
def set(self, filevalue, checksum):
from .cell import JsonCell
if self._destroyed:
return
cell = self.cell()
if cell is None:
return
#Special mount mode for JSON: whatever is read will be passed through cson2json
if filevalue is not None and isinstance(cell, JsonCell) and "w" in self.mode:
d = cson2json(filevalue)
filevalue2 = json_encode(d, sort_keys=True, indent=2)
if filevalue2 != filevalue:
filevalue = filevalue2
self._write(filevalue)
if cell._mount_setter is not None:
cell._mount_setter(filevalue, checksum)
cell._get_manager().cell_send_update(cell, False, None)
else:
cell.from_buffer(filevalue, checksum=checksum)
@property
def lock(self):
assert self.parent is not None
return self.parent().lock
def _read(self):
#print("read", self.cell())
binary = self.kwargs["binary"]
encoding = self.kwargs.get("encoding")
filemode = "rb" if binary else "r"
with open(self.path.replace("/", os.sep), filemode, encoding=encoding) as f:
return f.read()
def _write(self, filevalue, with_none=False):
assert "w" in self.mode
binary = self.kwargs["binary"]
encoding = self.kwargs.get("encoding")
filemode = "wb" if binary else "w"
filepath = self.path.replace("/", os.sep)
if filevalue is None:
if not with_none:
if os.path.exists(filepath):
os.unlink(filepath)
return
filevalue = b"" if binary else ""
with open(filepath, filemode, encoding=encoding) as f:
f.write(filevalue)
def _exists(self):
return os.path.exists(self.path.replace("/", os.sep))
def _after_write(self, checksum):
self.last_checksum = checksum
self.last_time = time.time()
try:
stat = os.stat(self.path)
self.last_mtime = stat.st_mtime
except Exception:
pass
def conditional_write(self, with_none=False):
if self._destroyed:
return
if not "w" in self.mode:
return
cell = self.cell()
if cell is None:
return
status = cell.status()
if status != "OK":
if not with_none or status != "UNDEFINED":
return
checksum = cell.text_checksum()
if checksum is None or self.last_checksum != checksum:
value = cell.serialize_buffer()
if value is not None:
assert cell._checksum(value, buffer=True) == checksum, cell._format_path()
with self.lock:
self._write(value, with_none=with_none)
self._after_write(checksum)
def _after_read(self, checksum, *, mtime=None):
self.last_checksum = checksum
if mtime is None:
stat = os.stat(self.path)
mtime = stat.st_mtime
self.last_mtime = mtime
def conditional_read(self):
if self._destroyed:
return
cell = self.cell()
if cell is None:
return
if not self._exists():
return
with self.lock:
stat = os.stat(self.path)
mtime = stat.st_mtime
file_checksum = None
if self.last_mtime is None or mtime > self.last_mtime:
filevalue = self._read()
file_checksum = cell._checksum(filevalue, buffer=True)
self._after_read(file_checksum, mtime=mtime)
cell_checksum = None
if cell.value is not None:
cell_checksum = cell.text_checksum()
if file_checksum is not None and file_checksum != cell_checksum:
if "r" in self.mode:
self.set(filevalue, checksum=file_checksum)
else:
print("Warning: write-only file %s (%s) has changed on disk, overruling" % (self.path, self.cell()))
value = cell.serialize_buffer()
assert cell._checksum(value, buffer=True) == cell_checksum, cell._format_path()
with self.lock:
self._write(value)
self._after_write(cell_checksum)
def destroy(self):
if self._destroyed:
return
self._destroyed = True
if self.dummy:
return
if self.persistent == False and os.path.exists(self.path):
#print("remove", self.path)
os.unlink(self.path)
def __del__(self):
if self.dummy:
return
if self._destroyed:
return
self._destroyed = True
print("undestroyed mount path %s" % self.path)
#self.destroy()
class LinkItem:
_destroyed = False
linked_path = None
def __init__(self, link, path, persistent):
self.link = ref(link)
self.path = path
self.persistent = persistent
def init(self):
from .context import Context
if self._destroyed:
return
linked = self.get_linked()
is_dir = (isinstance(linked, Context))
if is_dummy_mount(linked._mount):
return
linked_path = linked._mount["path"]
os.symlink(linked_path, self.path, is_dir)
self.linked_path = linked_path
def get_linked(self):
if self._destroyed:
return
link = self.link()
if link is None:
return
linked = link.get_linked()
return linked
def destroy(self):
if self._destroyed:
return
self._destroyed = True
if self.persistent == False:
filepath = self.path
unbroken_link = os.path.islink(filepath)
broken_link = (os.path.lexists(filepath) and not os.path.exists(filepath))
if unbroken_link or broken_link:
os.unlink(filepath)
def __del__(self):
if self._destroyed:
return
self._destroyed = True
print("undestroyed link path %s" % self.path)
class MountManagerStash:
"""Stashes away a part of the mounts that are all under a single context
They can later be destroyed or restored, depending on what happens to the context
NOTE: While the stash is active, there are ._mount objects (in cells and contexts)
and MountItems that point to the same path, but with different cells and contexts
Therefore, for the duration of the stash, it is imperative that all those are
kept alive and not garbage-collected, until the stash is undone.
This means that stashing must be done in a Python context (= with statement)
"""
def __init__(self, parent, context):
self._active = False
self.root = context._root()
self.parent = parent
self.context = context
self.mounts = WeakKeyDictionary()
self.contexts = WeakSet()
self.context_as_parent = WeakKeyDictionary()
self.paths = set()
def activate(self):
assert not self._active
self._active = True
parent, context = self.parent, self.context
for ctx in list(parent.contexts):
assert not is_dummy_mount(ctx._mount), ctx
if ctx._root() is self.root and ctx._part_of2(context):
self.contexts.add(ctx)
parent.contexts.remove(ctx)
path = ctx._mount["path"]
parent.paths[self.root].remove(path)
self.paths.add(path)
for cell, mountitem in list(parent.mounts.items()):
assert not is_dummy_mount(cell._mount), cell
ctx = cell._context()
assert ctx is not None, cell
if ctx._root() is self.root and ctx._part_of2(context):
self.mounts[cell] = mountitem
parent.mounts.pop(cell)
path = cell._mount["path"]
parent.paths[self.root].remove(path)
self.paths.add(path)
def _build_new_paths(self):
"""paths added by the parent since stash activation"""
new_paths = {}
parent, context = self.parent, self.context
for ctx in list(parent.contexts):
path = ctx._mount["path"]
if ctx._root() is self.root and ctx._part_of2(context):
new_paths[path] = ctx
for cell, mountitem in list(parent.mounts.items()):
assert not is_dummy_mount(cell._mount), cell
ctx = cell._context()
if ctx._root() is self.root and ctx._part_of2(context):
path = cell._mount["path"]
new_paths[path] = mountitem
return new_paths
def undo(self):
from .context import Context
assert self._active
new_paths = self._build_new_paths()
parent, context = self.parent, self.context
for ctx in sorted(self.contexts, key=lambda l: -len(l.path)):
assert not is_dummy_mount(ctx._mount), ctx
path = ctx._mount["path"]
if path in new_paths:
new_context = new_paths[path]
object.__setattr__(new_context, "_mount", None) #since we are not in macro mode
new_paths.pop(path)
parent.contexts.add(ctx)
parent.paths[self.root].add(path)
for cell, mountitem in self.mounts.items():
assert not is_dummy_mount(cell._mount), cell
path = cell._mount["path"]
if path in new_paths:
new_mountitem = new_paths[path]
new_mountitem._destroyed = True
if isinstance(mountitem, LinkItem):
new_link = new_mountitem.link()
object.__setattr__(new_link, "_mount", None) #since we are not in macro mode
else:
new_cell = new_mountitem.cell()
object.__setattr__(new_cell, "_mount", None) #since we are not in macro mode
new_paths.pop(path)
parent.mounts[cell] = mountitem
parent.paths[self.root].add(path)
context_to_unmount = []
for path, obj in new_paths.items():
if isinstance(obj, Context):
context_to_unmount.append(obj)
elif isinstance(obj, LinkItem):
parent.unmount(obj.link())
else:
parent.unmount(obj.cell())
for context in sorted(context_to_unmount, key=lambda l: -len(l.path)):
parent.unmount_context(context)
def join(self):
from .context import Context
from .cell import Cell
assert self._active
new_paths = self._build_new_paths()
parent, context = self.parent, self.context
old_mountitems = {}
for old_cell, old_mountitem in list(self.mounts.items()):
assert not is_dummy_mount(old_cell._mount), old_cell
path = old_cell._mount["path"]
object.__setattr__(old_cell, "_mount", None) #since we are not in macro mode
if path in new_paths:
if isinstance(old_mountitem, MountItem):
old_mountitem._destroyed = True
old_mountitems[path] = old_mountitem
else:
old_mountitem.destroy()
old_paths = set()
for old_ctx in sorted(self.contexts, key=lambda l: -len(l.path)):
assert not is_dummy_mount(old_ctx._mount), old_ctx
path = old_ctx._mount["path"]
if path in new_paths:
old_paths.add(path)
new_context = new_paths[path]
object.__setattr__(old_ctx, "_mount", None) #since we are not in macro mode
for path in sorted(new_paths.keys(), key=lambda p:len(p)):
obj = new_paths[path]
if isinstance(obj, Context):
new_context = obj
if path not in old_paths:
assert new_context in self.context_as_parent, context
parent._check_context(new_context, self.context_as_parent[new_context])
for path in sorted(new_paths.keys(), key=lambda p:len(p)):
obj = new_paths[path]
if isinstance(obj, MountItem):
new_mountitem = obj
#print("new_path", obj, hex(id(obj)), path in old_mountitems)
if path in old_mountitems:
old_mountitem = old_mountitems[path]
rewrite = False
cell = new_mountitem.cell()
if cell._val is not None:
value = cell.serialize_buffer()
checksum = cell.text_checksum()
if "w" in old_mountitem.mode:
if type(old_mountitem.cell()) != type(cell):
rewrite = True
else:
if checksum != old_mountitem.last_checksum:
rewrite = True
if rewrite:
with new_mountitem.lock:
new_mountitem._write(value)
new_mountitem._after_write(checksum)
else:
new_mountitem.last_mtime = old_mountitem.last_mtime
new_mountitem.last_checksum = old_mountitem.last_checksum
else:
new_mountitem.init()
elif isinstance(obj, LinkItem):
new_linkitem = obj
identical = False
if path in old_mountitems:
old_linkitem = old_mountitems[path]
linked = new_linkitem.get_linked()
if linked._mount["path"] == old_linkitem.linked_path:
old = old_linkitem.get_linked()
if isinstance(old, Context) and isinstance(linked, Context):
identical = True
elif isinstance(old, Cell) and isinstance(linked, Cell):
identical = True
if identical:
old_linkitem._destroyed = True
else:
old_linkitem.destroy()
if not identical:
new_linkitem.init()
class MountManager:
_running = False
_last_run = None
_stop = False
_mounting = False
def __init__(self, latency):
self.latency = latency
self.mounts = WeakKeyDictionary()
self.contexts = WeakSet()
self.lock = RLock()
self.cell_updates = deque()
self._tick = Event()
self.stash = None
self.paths = WeakKeyDictionary()
@property
def reorganizing(self):
return self.stash is not None
@contextmanager
def reorganize(self, context):
if context is None:
self.stash = NoStash
yield
self.stash = None
return
if self.stash is not None:
assert context._part_of2(self.stash.context)
yield
return
with self.lock:
self.stash = MountManagerStash(self, context)
try:
self.stash.activate()
yield
#print("reorganize success")
self.stash.join()
except Exception as e:
#print("reorganize failure")
self.stash.undo()
raise e
finally:
self.stash = None
def add_mount(self, cell, path, mode, authority, persistent, **kwargs):
root = cell._root()
if root not in self.paths:
paths = set()
self.paths[root] = paths
else:
paths = self.paths[root]
assert path not in paths, path
#print("add mount", path, cell)
paths.add(path)
self.mounts[cell] = MountItem(self, cell, path, mode, authority, persistent, **kwargs)
if self.stash is None or self.stash is NoStash:
try:
self._mounting = True
self.mounts[cell].init()
finally:
self._mounting = False
def add_link(self, link, path, persistent):
paths = self.paths[link._root()]
assert path not in paths, path
#print("add link", path, link)
paths.add(path)
self.mounts[link] = LinkItem(link, path, persistent)
if self.stash is None or self.stash is NoStash:
self.mounts[link].init()
def unmount(self, cell_or_link, from_del=False):
#print("UNMOUNT", cell_or_link, cell_or_link._mount)
assert not is_dummy_mount(cell_or_link._mount), cell_or_link
root = cell_or_link._root()
if from_del and (cell_or_link not in self.mounts or root not in self.paths):
return
paths = self.paths[root]
path = cell_or_link._mount["path"]
assert path in paths
paths.remove(path)
assert cell_or_link in self.mounts, (cell_or_link, path) #... but path is in paths
mountitem = self.mounts.pop(cell_or_link)
mountitem.destroy()
def unmount_context(self, context, from_del=False):
#print("unmount context", context)
self.contexts.discard(context) # may or may not exist, especially at __del__ time
mount = context._mount
"""context._mount is authoritative!
If context is destroyed while an unmount is undesired,
(because of stash replacement)
context._mount MUST have been set to None!
"""
if context._root() is context:
self.paths.pop(context, None)
if mount is None:
return
assert not is_dummy_mount(mount), context
try:
paths = self.paths[context._root()]
except KeyError:
return
try:
paths.remove(mount["path"])
except KeyError:
pass
if mount["persistent"] == False:
dirpath = mount["path"].replace("/", os.sep)
try:
#print("rmdir", dirpath)
os.rmdir(dirpath)
except:
print("Error: cannot remove directory %s" % dirpath)
def add_context(self, context, path, as_parent):
#print("add context", path, context, as_parent, context._mount["persistent"])
paths = self.paths[context._root()]
if not as_parent:
assert path not in paths, path
paths.add(path)
self.contexts.add(context)
else:
if path in paths:
assert context in self.contexts, (path, context)
if self.stash is None or self.stash is NoStash:
self._check_context(context, as_parent)
else:
self.stash.context_as_parent[context] = as_parent
def _check_context(self, context, as_parent):
mount = context._mount
assert not is_dummy_mount(mount), context
dirpath = mount["path"].replace("/", os.sep)
persistent, authority = mount["persistent"], mount["authority"]
if os.path.exists(dirpath):
if authority == "cell" and not as_parent:
print("Warning: Directory path '%s' already exists" % dirpath) #TODO: log warning
else:
if authority == "file-strict":
raise Exception("Directory path '%s' does not exist, but authority is 'file-strict'" % dirpath)
os.mkdir(dirpath)
def add_cell_update(self, cell):
#print("add_cell_update", cell, self.reorganizing, self.mounting)
if self.reorganizing or self._mounting:
return
assert cell in self.mounts, (cell, hex(id(cell)))
self.cell_updates.append(cell)
def _run(self):
for cell, mount_item in list(self.mounts.items()):
if isinstance(cell, Link):
continue
if cell in self.cell_updates:
continue
try:
mount_item.conditional_read()
except Exception:
exc = traceback.format_exc()
if exc != mount_item.last_exc:
print(exc)
mount_item.last_exc = exc
while 1:
try:
cell = self.cell_updates.popleft()
except IndexError:
break
mount_item = self.mounts.get(cell)
if mount_item is None: #cell was deleted
continue
try:
mount_item.conditional_write(with_none=True)
except Exception:
exc = traceback.format_exc()
if exc != mount_item.last_exc:
print(exc)
mount_item.last_exc = exc
self._tick.set()
def run(self):
try:
self._running = True
while not self._stop:
t = time.time()
self._run()
while time.time() - t < self.latency:
if not self._tick.is_set():
break
time.sleep(0.01)
finally:
self._running = False
def start(self):
self._stop = False
t = self.thread = Thread(target=self.run)
t.setDaemon(True)
t.start()
def stop(self, wait=False, waiting_loop_period=0.01):
self._stop = True
if wait:
while self._running:
time.sleep(waiting_loop_period)
def tick(self):
"""Waits until one iteration of the run() loop has finished"""
if self._running:
self._tick.clear()
self._tick.wait()
def destroy(self):
for path in list(self.mounts.keys()):
self.unmount(path)
for context in sorted(self.contexts,key=lambda l:-len(l.path)):
self.unmount_context(context)
def resolve_register(reg):
from .context import Context
from .cell import Cell
from . import Worker
from .structured_cell import Inchannel, Outchannel
contexts = set([r for r in reg if isinstance(r, Context)])
cells = set([r for r in reg if isinstance(r, Cell)])
links = set([r for r in reg if isinstance(r, Link)])
mounts = mountmanager.mounts.copy()
if sys.exc_info()[0] is not None:
return #No mounting if there is an exception
def find_mount(c, as_parent=False, child=None):
if as_parent:
assert child is not None
if c in mounts:
result = mounts[c]
elif not is_dummy_mount(c._mount):
result = c._mount.copy()
if result["path"] is None:
parent = c._context
assert parent is not None, c
parent = parent()
parent_result = find_mount(parent, as_parent=True,child=c)
if parent_result is None:
raise Exception("No path provided for mount of %s, but no ancestor context is mounted" % c)
result["path"] = parent_result["path"]
result["autopath"] = True
elif isinstance(c, (Inchannel, Outchannel)):
result = None
elif isinstance(c, Context) and c._toplevel:
result = None
else:
parent = c._context
assert parent is not None, c
parent = parent()
result = None
cc = c
if isinstance(c, Link):
cc = c.get_linked()
if isinstance(cc, (Context, Cell)):
result = find_mount(parent, as_parent=True,child=c)
if not as_parent:
mounts[c] = result
if as_parent and result is not None:
result = copy.deepcopy(result)
if result["persistent"] is None:
result["persistent"] = False
result["autopath"] = True
result["path"] += "/" + child.name
if isinstance(child, Link):
child = child.get_linked()
extension = None
if child._mount is not None:
extension = child._mount.get("extension")
if extension is not None:
extension = "." + extension
else:
extension = get_extension(child)
result["path"] += extension
return result
for r in reg:
root = r._root()
if root not in mountmanager.paths:
mountmanager.paths[root] = set()
if isinstance(r, Worker):
continue
find_mount(r)
done_contexts = set()
contexts_to_mount = {}
def mount_context_delayed(context, as_parent=False):
if not context in mounts or mounts[context] is None:
return
if context in done_contexts:
if not as_parent:
contexts_to_mount[context][1] = False
return
parent = context._context
if parent is not None:
parent = parent()
mount_context_delayed(parent, as_parent=True)
object.__setattr__(context, "_mount", mounts[context]) #not in macro mode
contexts_to_mount[context] = [mounts[context]["path"], as_parent]
done_contexts.add(context)
for context in contexts:
mount_context_delayed(context)
def propagate_persistency(c, persistent=False):
m = c._mount
if is_dummy_mount(m):
return
if persistent:
m["persistent"] = True
elif m["persistent"] == True and m["autopath"]:
persistent = True
if isinstance(c, Context):
if c._toplevel:
return
parent = c._context
assert parent is not None, c
parent = parent()
propagate_persistency(parent, persistent)
for r in reg:
if isinstance(r, Worker):
continue
if not is_dummy_mount(r._mount):
propagate_persistency(r)
mount_cells = []
for cell in cells:
if cell in mounts and not is_dummy_mount(mounts[cell]):
mount = mounts[cell]
path = mount["path"]
if cell._mount_kwargs is None:
print("Warning: Unable to mount file path '%s': cannot mount this type of cell (%s)" % (path, type(cell).__name__))
continue
mount.update(cell._mount_kwargs)
if cell._master and (cell._mount_setter is None or cell._master[1] in ("form", "storage")):
if mount.get("mode") == "r":
continue
else:
mount["mode"] = "w"
object.__setattr__(cell, "_mount", mount) #not in macro mode
mount_cells.append(cell)
mount_links = []
for link in links:
if link in mounts and not is_dummy_mount(mounts[link]):
mount = mounts[link]
path = mount["path"]
object.__setattr__(link, "_mount", mount) #not in macro mode
mount_links.append(link)
for context, v in contexts_to_mount.items():
path, as_parent = v
mountmanager.add_context(context, path, as_parent=as_parent)
for cell in mount_cells:
mountmanager.add_mount(cell, **cell._mount)
for link in mount_links:
mount = link._mount
mountmanager.add_link(link, mount["path"], mount["persistent"])
mountmanager = MountManager(0.2) #TODO: latency in config cell
mountmanager.start()
def get_extension(c):
from .cell import extensions
for k,v in extensions.items():
if type(c) == k:
return v
for k,v in extensions.items():
if isinstance(c, k):
return v
return ""
from .link import Link
"""
*****
TODO: filehash option (cell stores hash of the file, necessary for slash-0)
*****
"""
|
Python
|
CL
|
99d7860d6f7df96b7cdfd792a7694cbabe2021eeb5acc3befd4a4ad9febe986c
|
"""Test cases for rule factory."""
import string
import random
from urllib.parse import urljoin
from looseserver.common.rule import RuleFactory
from looseserver.server.application import configure_application, DEFAULT_CONFIGURATION_ENDPOINT
def test_rule_factory(server_response_factory, server_rule_prototype):
"""Check that custom rule factory is used when specified.
1. Create rule factory.
2. Register a new rule type.
3. Configure application with the created rule factory.
4. Make a POST-request to create a method rule for PUT requests.
5. Check that response is successful and contains rule ID.
"""
rule_factory = RuleFactory()
application = configure_application(
rule_factory=rule_factory,
response_factory=server_response_factory,
)
rule_type = "".join(random.choice(string.ascii_uppercase) for _ in range(10))
rule = server_rule_prototype.create_new(rule_type=rule_type)
rule_factory.register_rule(
rule_type=rule_type,
parser=lambda *args, **kwargs: rule,
serializer=lambda *args, **kwargs: {},
)
serialized_rule = rule_factory.serialize_rule(rule=rule)
http_response = application.test_client().post(
urljoin(DEFAULT_CONFIGURATION_ENDPOINT, "rules"),
json=serialized_rule,
)
assert http_response.status_code == 200, "Can't create a rule"
assert http_response.json["data"]["rule_id"] is not None, "Response does not contain rule ID"
|
Python
|
CL
|
cee18ec89332c1c2e47881d8cc4654259ccd619612d2277780153c254b25e524
|
import matplotlib; matplotlib.use("agg")
from __future__ import print_function
import functools
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import ROOT;
import sys
sys.path.append('/global/homes/w/wbhimji/cori-envs/nersc-rootpy/lib/python2.7/site-packages/')
import root_numpy as rnp
# # Loading the input into numpy
#
# Using root_numpy to make structured arrays.
#filename = '/Users/sfarrell/Atlas/xaod/mc15_13TeV.361023.Pythia8EvtGen_A14NNPDF23LO_jetjet_JZ3W.merge.DAOD_EXOT3.e3668_s2576_s2132_r7728_r7676_p2613/DAOD_EXOT3.08204445._000002.pool.root.1'
filename = '/global/projecta/projectdirs/atlas/atlaslocalgroupdisk/rucio/mc15_13TeV/76/71/DAOD_EXOT3.08629754._000001.pool.root.1'
bg_files = [line.rstrip() for line in open('/global/project/projectdirs/das/wbhimji/RPVSusyJetLearn/atlas_dl/config/mc15_13TeV.361004.Pythia8EvtGen_A14NNPDF23LO_jetjet_JZ4.merge.DAOD_EXOT3.e3569_s2576_s2132_r7772_r7676_p2688-FileList.txt')]
sig_files = [line.rstrip() for line in open('/global/project/projectdirs/das/wbhimji/RPVSusyJetLearn/atlas_dl/config/mc15_13TeV.403568.MadGraphPythia8EvtGen_A14NNPDF23LO_GG_RPV10_1400_850.merge.DAOD_EXOT3.e5079_a766_a821_r7676_p2669-FileList.txt')]
# Branch names to read in and rename for convenience
branchMap = {
'CaloCalTopoClustersAuxDyn.calEta' : 'ClusEta',
'CaloCalTopoClustersAuxDyn.calPhi' : 'ClusPhi',
'CaloCalTopoClustersAuxDyn.calE' : 'ClusE',
'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.pt' : 'FatJetPt',
'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.eta' : 'FatJetEta',
'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.phi' : 'FatJetPhi',
'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.m' : 'FatJetM',
}
entries = rnp.root2array(bg_files, treename='CollectionTree',
branches=branchMap.keys(),warn_missing_tree=True,
start=0, stop=500000)
entries.dtype.names = branchMap.values()
print('Entries:', entries.size)
entries.dtype
# Since the data is structured, we can index by key name and do some fancy stuff.
# Multiple ways to dump variables for a specific event.
# I'm actually surprised these both work.
print(entries[0]['FatJetPt'])
print(entries['FatJetPt'][0])
bgdf = pd.DataFrame.from_records(entries)
# Perform object selections on one event
event = entries[3]
event['FatJetPt'] > 300000
# Select fatjets with pt > 200 GeV for all events in one go
f = np.vectorize(lambda jetPts: jetPts > 200000, otypes=[np.ndarray])
selectedJets = f(entries['FatJetPt'])
print(selectedJets)
# Select events with at least 2 selected jets
countSelected = np.vectorize(sum)
numJets = countSelected(selectedJets)
selectedEvents = numJets >= 2
print(numJets)
print(selectedEvents)
# ## Physics selections and variables
# Enough playing around. Let's test out the actual physics selections. The code has been put into the physics_selections module in the containing directory of this notebook.
sys.path.append('/project/projectdirs/das/wbhimji/RPVSusyJetLearn/atlas_dl_submitter/atlas_dl/scripts/')
from physics_selections import (select_fatjets, is_baseline_event,
sum_fatjet_mass, is_signal_region_event)
vec_select_fatjets = np.vectorize(select_fatjets, otypes=[np.ndarray])
vec_select_baseline_events = np.vectorize(is_baseline_event)
selectedFatJets = vec_select_fatjets(entries['FatJetPt'], entries['FatJetEta'])
baselineEvents = vec_select_baseline_events(entries['FatJetPt'], selectedFatJets)
print('Baseline selected events: %d / %d' % (np.sum(baselineEvents), entries.size))
# Calculate the summed jet mass for all events
summedMasses = np.vectorize(sum_fatjet_mass)(entries['FatJetM'], selectedFatJets)
print(summedMasses[baselineEvents])
vec_select_sr_events = np.vectorize(is_signal_region_event)
signalEvents = vec_select_sr_events(summedMasses, entries['FatJetPt'], entries['FatJetEta'],
selectedFatJets, baselineEvents)
signalEntries = entries[signalEvents]
numSignalEvents = np.sum(signalEvents)
print('Signal events: %d / %d' % (numSignalEvents, entries.size))
# # Drawing signal region events
def get_hist2d(event):
"""Convert event into the calo-cluster image"""
return np.histogram2d(event['ClusEta'], event['ClusPhi'],
bins=(50, 50), weights=event['ClusE'],
range=[[-2.5, 2.5], [-3.15, 3.15]])[0]
def plot_calo_image(h2d):
"""Plot a calo-image on the current axes"""
plt.imshow(np.log10(h2d).T, #extent=[-2.,2.,-3.14, 3.14],
extent=[-2.5, 2.5, -3.15, 3.15],
interpolation='none', aspect='auto', origin='low')
plt.colorbar(label='Cluster energy [Log(MeV)]')
plt.xlabel('eta')
plt.ylabel('phi')
def plot_jets(jetEtas, jetPhis, jetRadius=1):
"""Plot jet circles on the current axes"""
for eta, phi in zip(jetEtas, jetPhis):
circle = plt.Circle((eta, phi), radius=jetRadius, facecolor='none')
plt.gcf().gca().add_artist(circle)
# Pick out a sample of signal region events.
# The indexing is now starting to get very confusing.
numSample = 4
sampleIdxs = np.random.choice(np.arange(numSignalEvents), numSample, replace=False)
sampleEntries = signalEntries[sampleIdxs]
sampleFatJets = selectedFatJets[signalEvents][sampleIdxs] # are we lost yet?
assert(sampleEntries.size == sampleFatJets.size)
# Get the quantities to plot
hists = [get_hist2d(ev) for ev in sampleEntries]
jetEtas = [etas[jets] for (etas, jets) in zip(sampleEntries['FatJetEta'], sampleFatJets)]
jetPhis = [phis[jets] for (phis, jets) in zip(sampleEntries['FatJetPhi'], sampleFatJets)]
# Draw the calo images and draw the selected fat jets as circles
plt.figure(figsize=(12, 10))
plt.subplot(221)
plot_calo_image(hists[0])
plot_jets(jetEtas[0], jetPhis[0])
plt.subplot(222)
plot_calo_image(hists[1])
plot_jets(jetEtas[1], jetPhis[1])
plt.subplot(223)
plot_calo_image(hists[2])
plot_jets(jetEtas[2], jetPhis[2])
plt.subplot(224)
plot_calo_image(hists[3])
plot_jets(jetEtas[3], jetPhis[3])
|
Python
|
CL
|
668a58e7656eee33e6817092f655296e709766dfb012a417e0672be9d29afb62
|
'''
GUI module graphic generator
Created on: 11-20-2019
Last edited: 11-20-2019
@Copyright - Ken Trinh
@All rights reserve
'''
import tkinter as tk
import abc
class PlotterGUI(metaclass=abc.ABCMeta):
#Constructor
def __init__(self):
pass
#String Method
def __str__(self):
return "Generic GUI generator"
#Methods that need to be implemented
#Additional details applied when neccessary
#Create Title
#Description need to be implemented in details
@abc.abstractmethod
def createTitle(self):
pass
#Create Button
#Description need to be implemented in details
@abc.abstractmethod
def createButton(self):
pass
#Reset everything
#Description need to be implemented in details
@abc.abstractmethod
def reset(self):
pass
|
Python
|
CL
|
1e9d5866b542299f154a18e26eba6065e019d9a72632849ff0fdb49b7f768996
|
# coding: utf-8
import random
import time
import hashlib
from inspect import isclass
from git import Repo as GitRepo
from sqlalchemy.inspection import inspect as sqlalchemyinspect
from sqlalchemy.ext.declarative import declarative_base
from pykl.tiny.grapheneinfo import (
_is_graphql,
_is_graphql_cls,
_is_graphql_mutation
)
from pykl.tiny.codegen.utils import (
name_from_repr,
camel_to_underline,
underline_to_camel,
)
from base_type import *
from cmd import db, app
Base = db.Model
class MigrateVersion(Base):
u"""table migrate_version"""
__tablename__ = 'migrate_version'
repository_id = Column(String(191), primary_key=True, doc=u"""field repository_id""", info=CustomField | SortableField)
repository_path = Column(Text, doc=u"""field repository_path""", info=CustomField | SortableField)
version = Column(Integer, doc=u"""field version""", info=CustomField | SortableField)
class Actor(Base):
u"""table kit_actor"""
__tablename__ = 'kit_actor'
actor_id = Column(Integer, primary_key=True, doc=u"""对应 actor_id""", info=SortableField | InitializeField)
actor_name = Column(String(64), doc=u"""field actor_name""", info=CustomField | SortableField)
actor_email = Column(String(64), doc=u"""field actor_email""", info=CustomField | SortableField)
class Blob(Base):
u"""table kit_blob"""
__tablename__ = 'kit_blob'
blob_id = Column(Integer, primary_key=True, doc=u"""对应 blob_id""", info=SortableField | InitializeField)
blob_path = Column(String(64), doc=u"""field blob_path""", info=CustomField | SortableField)
blob_hash = Column(String(40), doc=u"""field blob_hash""", info=CustomField | SortableField)
blob_mode = Column(Integer, doc=u"""field blob_mode""", info=CustomField | SortableField)
blob_size = Column(Integer, doc=u"""field blob_size""", info=CustomField | SortableField)
class Tree(Base):
u"""table kit_tree"""
__tablename__ = 'kit_tree'
tree_id = Column(Integer, primary_key=True, doc=u"""对应 tree_id""", info=SortableField | InitializeField)
tree_path = Column(String(64), doc=u"""field tree_path""", info=CustomField | SortableField)
tree_hash = Column(String(40), doc=u"""field tree_hash""", info=CustomField | SortableField)
tree_mode = Column(Integer, doc=u"""field tree_mode""", info=CustomField | SortableField)
tree_size = Column(Integer, doc=u"""field tree_size""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Tree(SQLAlchemyObjectType):
class Meta:
model = cls
trees = List(lambda :cls, description=u'trees')
def resolve_trees(self, args, context, info):
return [_Tree(tree) for tree in self._tree.trees]
blobs = List(lambda :Blob, description=u'blobs')
def resolve_blobs(self, args, context, info):
return [_Blob(blob) for blob in self._tree.blobs]
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._tree, path)
return Tree
class Commit(Base):
u"""table kit_commit"""
__tablename__ = 'kit_commit'
commit_id = Column(Integer, primary_key=True, doc=u"""对应 commit_id""", info=SortableField | InitializeField)
commit_hash = Column(String(40), doc=u"""field commit_hash""", info=CustomField | SortableField)
commit_message = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
committed_date = Column(Integer, doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Commit(SQLAlchemyObjectType):
class Meta:
model = cls
author = Field(lambda :Actor, description=u'对应 author')
def resolve_author(self, args, context, info):
author = self._commit.author
return _Actor(author)
committer = Field(lambda :Actor, description=u'对应 committer')
def resolve_committer(self, args, context, info):
committer = self._commit.committer
return _Actor(committer)
parents = List(lambda :cls, description=u'parents commits')
def resolve_parents(self, args, context, info):
return [_Commit(commit) for commit in self._commit.parents]
tree = Field(lambda :Tree, description=u'对应 tree')
def resolve_tree(self, args, context, info):
tree = self._commit.tree
return _Tree(tree)
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._commit.tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._commit.tree, path)
return Commit
class Ref(Base):
u"""table kit_ref"""
__tablename__ = 'kit_ref'
ref_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField)
ref_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
ref_name = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Ref(SQLAlchemyObjectType):
class Meta:
model = cls
commit = Field(lambda :Commit, description=u'对应 commit')
def resolve_commit(self, args, context, info):
commit = self._ref.commit
return _Commit(commit)
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._ref.commit.tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._ref.commit.tree, path)
commits = List(lambda :Commit, description=u'往前推算 commits',
max_count=g.Argument(g.Int, description=u'input max_count')
)
def resolve_commits(self, args, context, info):
max_count = args.get('max_count', 10)
if max_count <= 0:
return []
return [_Commit(commit) for commit in self._ref.repo.iter_commits(self._ref.name, max_count=max_count)]
return Ref
class Repo(Base):
u"""table kit_repo"""
__tablename__ = 'kit_repo'
repo_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField)
repo_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Repo(SQLAlchemyObjectType):
class Meta:
model = cls
head = Field(lambda :Ref, description=u'查找 引用',
name=g.Argument(g.String, default_value="master", description=u'input you name')
)
def resolve_head(self, args, context, info):
name = args.get('name', '')
if not name:
return None
ref = self._repo.heads[name]
return _Ref(ref)
heads = List(lambda :Ref, description=u'引用')
def resolve_heads(self, args, context, info):
return [_Ref(ref) for ref in self._repo.heads]
master = Field(lambda :Ref, description=u'master 引用')
def resolve_master(self, args, context, info):
ref = self._repo.heads.master
return _Ref(ref)
tag = Field(lambda :Ref, description=u'查找 tag',
name=g.Argument(g.String, description=u'input you tag')
)
def resolve_tag(self, args, context, info):
name = args.get('name', '')
if not name:
return None
ref = self._repo.tags[name]
return _Ref(ref)
tags = List(lambda :Ref, description=u'tag')
def resolve_tags(self, args, context, info):
return [_Ref(ref) for ref in self._repo.tags]
return Repo
def search_blobfile(_tree, path):
if not path:
return None
def _resolve_blobfile(blobs, trees):
for blob in blobs:
if path == blob.path:
return _Blob(blob)
for tree in trees:
ret = _resolve_blobfile(tree.blobs, tree.trees) if path.startswith(tree.path) else None
if ret:
return ret
return _resolve_blobfile(_tree.blobs, _tree.trees)
def search_treedir(_tree, path):
if not path:
return None
def _resolve_treedir(trees):
for tree in trees:
if path == tree.path:
return _Tree(tree)
for tree in trees:
ret = _resolve_treedir(tree.trees) if path.startswith(tree.path) else None
if ret:
return ret
return _resolve_treedir(_tree.trees)
def _Actor(actor, actor_id=0):
obj = Actor(actor_id=actor_id, actor_name=actor.name, actor_email=actor.email)
obj._actor = actor
return obj
def _Blob(blob, blob_id=0):
obj = Blob(blob_id=0, blob_path=blob.path, blob_hash=blob.hexsha, blob_mode=blob.mode, blob_size=blob.size)
obj._blob = blob
return obj
def _Tree(tree, tree_id=0):
obj = Tree(tree_id=tree_id, tree_path=tree.path, tree_hash=tree.hexsha, tree_mode=tree.mode, tree_size=tree.size)
obj._tree = tree
return obj
def _Commit(commit, commit_id=0):
obj = Commit(commit_id=commit_id, commit_hash=commit.hexsha, commit_message=commit.message, committed_date=commit.committed_date)
obj._commit = commit
return obj
def _Ref(ref, ref_id=0):
obj = Ref(ref_id=ref_id, ref_name=ref.name, ref_path=ref.path)
obj._ref = ref
return obj
def _Repo(repo, repo_id=0):
obj = Repo(repo_id=repo_id, repo_path=repo.working_dir)
obj._repo = repo
return obj
##############################################################
################### 根查询 Query ######################
##############################################################
class Query(g.ObjectType):
hello = g.String(name=g.Argument(g.String, default_value="world", description=u'input you name'))
deprecatedField = Field(g.String, deprecation_reason = 'This field is deprecated!')
fieldWithException = g.String()
migrateVersion = Field(MigrateVersion, description=u'migrate_version')
repo = Field(Repo, description=u'load repo by path',
repo_path=g.Argument(g.String, description=u'input repo path'),
)
def resolve_repo(self, args, context, info):
repo_path = args.get('repo_path', '')
repo = GitRepo(repo_path)
return _Repo(repo)
curRepo = Field(Repo, description=u'this repo')
def resolve_curRepo(self, args, context, info):
repo = app.config.get('REPO')
return _Repo(repo)
def resolve_hello(self, args, context, info):
return 'Hello, %s!' % (args.get('name', ''), )
def resolve_deprecatedField(self, args, context, info):
return 'You can request deprecated field, but it is not displayed in auto-generated documentation by default.'
def resolve_fieldWithException(self, args, context, info):
raise ValueError('Exception message thrown in field resolver')
def resolve_migrateVersion(self, args, context, info):
return MigrateVersion.query.first()
##############################################################
################### Mutations ######################
##############################################################
def build_input(dao, bit_mask):
return {k: BuildArgument(v) for k, v in mask_field(dao, bit_mask).items()}
class CreateMigrateVersion(g.Mutation):
Input = type('Input', (), build_input(MigrateVersion, InitializeField))
ok = g.Boolean()
msg = g.String()
migrateVersion = Field(MigrateVersion)
@staticmethod
def mutate(root, args, context, info):
return CreateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first())
class UpdateMigrateVersion(g.Mutation):
Input = type('Input', (), build_input(MigrateVersion, EditableField))
ok = g.Boolean()
msg = g.String()
migrateVersion = Field(MigrateVersion)
@staticmethod
def mutate(root, args, context, info):
return UpdateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first())
##############################################################
################### 根查询 Mutations ######################
##############################################################
Mutations = type('Mutations', (g.ObjectType, ), {camel_to_underline(name_from_repr(v)):v.Field() for _, v in globals().items() if _is_graphql_mutation(v)})
tables = [tbl if BuildType(tbl) else tbl for _, tbl in globals().items() if isclass(tbl) and issubclass(tbl, Base) and tbl != Base]
schema = g.Schema(query=Query, mutation=Mutations, types=[BuildType(tbl) for tbl in tables] + [cls for _, cls in globals().items() if _is_graphql_cls(cls)], auto_camelcase = False)
|
Python
|
CL
|
22fb2adfec713f198880f419af613a91742872984b8d7386f81ff824301fc6a2
|
class Formula:
def __init__(self, formula: str="", str_logic= "", disjunctions=None):
self.disjunctions = []
if formula != "":
formula = self.remove_comments(formula)
self.num_variables, self.num_clauses, formula = self.variables_and_clauses(formula)
self.str_formula = self.convert_DIMACS_to_str(formula)
self.convert_to_logic()
elif str_logic != "":
self.str_formula = str_logic
self.convert_to_logic()
else:
self.disjunctions = disjunctions
self.variables = self.get_variables()
@staticmethod
def remove_comments(formula: str) -> str:
final = []
for line in formula.split("\n"):
if line.split(" ")[0] != "c":
final.append(line)
return "\n".join(final)
@staticmethod
def variables_and_clauses(formula: str):
variables_temp = 0
clauses_temp = 0
dim = formula.split("\n")
for line in dim:
characters = line.split(" ")
if characters[0] == "p" and characters[1] == "cnf" and len(characters) == 4:
variables_temp = characters[2]
clauses_temp = characters[3]
dim.remove(line)
break
if variables_temp != 0 and clauses_temp != 0:
return variables_temp, clauses_temp, "\n".join(dim)
else:
print("Error reading clauses and/or variable in DIMACS input file")
exit()
@staticmethod
def convert_DIMACS_to_str(formula: str) -> str:
"""
This function converts a DIMACS set of rules in a string where the variables are divided by ORs and
the clauses are divided by ANDs.
E.g. -111 -112 0
-113 -115 0
||
V
(-111OR-112)AND(-113OR-115)
"""
converted_logic = ""
for line in formula.split("\n"):
converted_line = ""
split_line = line.split(" ")
for var in split_line:
if var == "0":
break
converted_line = converted_line + ("OR" if converted_line != "" else "") + var
converted_line = ("(" + converted_line + ")" if converted_line != "" else "")
if converted_line != "":
converted_logic = converted_logic + ("AND" if converted_logic != "" else "") + converted_line
return converted_logic
def convert_to_logic(self):
for disjunction in self.string_to_conjunctions(self.str_formula):
self.disjunctions.append(Disjunction(disjunction))
@staticmethod
def string_to_conjunctions(elements: str) -> list:
return elements.split("AND")
def to_string(self) -> str:
formula = ""
for disjunction in self.disjunctions:
formula = formula + ("AND" if formula != "" else "") + "(" + disjunction.to_string() + ")"
return formula
def get_disjunctions(self) -> list:
return self.disjunctions
def get_variables(self) -> list:
temp_vars = set()
for disj in self.disjunctions:
for lit in disj.get_literals():
temp_vars.add(lit.get_name())
return list(temp_vars)
def compute_formula(self, values: dict) -> bool:
# print("values: " + str(len(values)) + ", num_vars: " + str(len(self.variables)))
if len(values) == len(self.variables):
if all(disjunction.compute_disjunction(values) is True for disjunction in self.disjunctions):
return True
return False
def return_unit_clauses_variable(self, guessed_variables: dict) -> dict:
unit_clauses = {}
for disj in self.disjunctions:
missing = [lit for lit in disj.literals if lit.get_name() not in guessed_variables]
if len(missing) == 1:
temp_disj = [lit for lit in disj.literals if lit.get_name() in guessed_variables]
if all(lit.get_name() not in guessed_variables or
(lit.get_name() in guessed_variables and
lit.get_value(guessed_variables[lit.get_name()]) is False)
for lit in temp_disj):
unit_clauses[missing[0].get_name()] = True if disj.literals[
disj.literals.index(missing[0])].get_value(
True) is True else False
else:
unit_clauses[missing[0].get_name()] = None
return unit_clauses
class Disjunction:
def __init__(self, elements: str = ""):
self.literals = []
if elements != "":
self.convert_to_logic(elements)
def convert_to_logic(self, elements: str):
for literal in Disjunction.string_to_disjunctions(elements.translate(str.maketrans("", "", "()"))):
self.literals.append(Literal(literal))
@staticmethod
def string_to_disjunctions(elements: str) -> list:
return elements.split("OR")
def to_string(self) -> str:
disjunction = ""
for literal in self.literals:
disjunction = disjunction + ("OR" if disjunction != "" else "") + literal.to_string()
return disjunction
def get_literals(self) -> list:
return self.literals
def compute_disjunction(self, values: dict) -> bool:
if all(lit.get_name() in values for lit in self.literals):
if any(literal.get_value(values[literal.get_name()]) is True for literal in self.literals):
return True
else:
print("false disjunction")
print(self.to_string())
return False
else:
print("Variable(s) not in disjunction " + self.to_string() + " != " +str(values.keys()))
return False
class Literal:
def __init__(self, literal: str = ""):
if literal != "":
lit = literal.split("-")
self.positive = True if len(lit) == 1 else False
self.name = lit[-1]
def to_string(self) -> str:
return ("" if self.positive is True else "-") + self.name
def get_name(self):
return self.name
def get_value(self, boolean: bool = True) -> bool:
if self.positive:
return boolean
else:
return not boolean
|
Python
|
CL
|
f9148d29ea66973dde92448d8d926f8470ed123e3c44340c065a5720ac83b025
|
class FSMError(Exception):
"""An exception class for StateMachine.
"""
def __init__(self, msg):
self.msg = msg
class StateMachine(object):
""" A class to represent a Finite State Machine.
"""
def __init__(self):
self.handlers = {}
self.arrival_handlers = {}
self.endStates = []
self.state = None
self.adapter = None
self.pump_return_value = None
def add_adapter(self, func):
""" Adds an adapter function that takes the cargo
passed to each state and does something with it.
This is a function that takes a single argument that
is the same as the transition functions, and returns
what the transmission functions want to receive.
"""
self.adapter = func
def add_state(self, name, arrival_handler, handler, end_state=False):
name = name.upper()
self.arrival_handlers[name] = arrival_handler
self.handlers[name] = handler
if end_state:
self.endStates.append(name)
def set_start(self, name):
self.state = name.upper()
def pump(self, cargo):
"""Feed a value to the state machine. This is handled by the
handler method associated with the current state. Potentially,
this moves us to a new state which is returned by the handler.
If a value is to be returned by pump(), then it can be set
in the property pump_return_value. This will be accessed
before the arrival_handler for the new state but returned
after the arrival_handler
"""
if self.state in self.endStates:
return
try:
handler = self.handlers[self.state]
except IndexError:
raise FSMError('No transition for state ' + str(self.state))
if not self.endStates:
raise FSMError('Must have at least one end_state.')
if self.adapter is not None:
cargo = self.adapter(cargo)
new_state = handler(cargo)
return_value, self.pump_return_value = self.pump_return_value, None
self.state = new_state
try:
arrival_handler = self.arrival_handlers[self.state]
except IndexError:
raise FSMError('No arrival handler for state ' + str(self.state))
if arrival_handler is not None:
arrival_handler()
return return_value
|
Python
|
CL
|
c4c20190f64b18360e5d3aeaffd8f7830622895a4e8d396cec980099dab12af9
|
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# vim:set ft=python ts=4 sw=4 sts=4 autoindent:
"""Per-project configuration functionality for Brat Rapid Annotation Tool
(brat)
Author: Pontus Stenetorp <pontus is s u-tokyo ac jp>
Author: Sampo Pyysalo <smp is s u-tokyo ac jp>
Author: Illes Solt <solt tmit bme hu>
Version: 2011-08-15
"""
import re
import sys
import urllib.parse # TODO reduce scope
import urllib.robotparser # TODO reduce scope
from bratsubset.annotation import open_textfile
from bratsubset.message import Messager
ENTITY_CATEGORY, EVENT_CATEGORY, RELATION_CATEGORY, UNKNOWN_CATEGORY = range(
4)
class InvalidProjectConfigException(Exception):
pass
# names of files in which various configs are found
__access_control_filename = 'acl.conf'
__annotation_config_filename = 'annotation.conf'
__visual_config_filename = 'visual.conf'
__tools_config_filename = 'tools.conf'
__kb_shortcut_filename = 'kb_shortcuts.conf'
# annotation config section name constants
ENTITY_SECTION = "entities"
RELATION_SECTION = "relations"
EVENT_SECTION = "events"
ATTRIBUTE_SECTION = "attributes"
# aliases for config section names
SECTION_ALIAS = {
"spans": ENTITY_SECTION,
}
__expected_annotation_sections = (
ENTITY_SECTION,
RELATION_SECTION,
EVENT_SECTION,
ATTRIBUTE_SECTION)
__optional_annotation_sections = []
# visual config section name constants
OPTIONS_SECTION = "options"
LABEL_SECTION = "labels"
DRAWING_SECTION = "drawing"
__expected_visual_sections = (OPTIONS_SECTION, LABEL_SECTION, DRAWING_SECTION)
__optional_visual_sections = [OPTIONS_SECTION]
# tools config section name constants
SEARCH_SECTION = "search"
ANNOTATORS_SECTION = "annotators"
DISAMBIGUATORS_SECTION = "disambiguators"
NORMALIZATION_SECTION = "normalization"
__expected_tools_sections = (
OPTIONS_SECTION,
SEARCH_SECTION,
ANNOTATORS_SECTION,
DISAMBIGUATORS_SECTION,
NORMALIZATION_SECTION)
__optional_tools_sections = (
OPTIONS_SECTION,
SEARCH_SECTION,
ANNOTATORS_SECTION,
DISAMBIGUATORS_SECTION,
NORMALIZATION_SECTION)
# special relation types for marking which spans can overlap
# ENTITY_NESTING_TYPE used up to version 1.3, now deprecated
ENTITY_NESTING_TYPE = "ENTITY-NESTING"
# TEXTBOUND_OVERLAP_TYPE used from version 1.3 onward
TEXTBOUND_OVERLAP_TYPE = "<OVERLAP>"
SPECIAL_RELATION_TYPES = set([ENTITY_NESTING_TYPE,
TEXTBOUND_OVERLAP_TYPE])
OVERLAP_TYPE_ARG = '<OVL-TYPE>'
# visual config default value names
VISUAL_SPAN_DEFAULT = "SPAN_DEFAULT"
VISUAL_ARC_DEFAULT = "ARC_DEFAULT"
VISUAL_ATTR_DEFAULT = "ATTRIBUTE_DEFAULT"
# visual config attribute name lists
SPAN_DRAWING_ATTRIBUTES = ['fgColor', 'bgColor', 'borderColor']
ARC_DRAWING_ATTRIBUTES = ['color', 'dashArray', 'arrowHead', 'labelArrow']
ATTR_DRAWING_ATTRIBUTES = [
'glyphColor',
'box',
'dashArray',
'glyph',
'position']
# fallback defaults if config files not found
__default_configuration = """
[entities]
Protein
[relations]
Equiv Arg1:Protein, Arg2:Protein, <REL-TYPE>:symmetric-transitive
[events]
Protein_binding|GO:0005515 Theme+:Protein
Gene_expression|GO:0010467 Theme:Protein
[attributes]
Negation Arg:<EVENT>
Speculation Arg:<EVENT>
"""
__default_visual = """
[labels]
Protein | Protein | Pro | P
Protein_binding | Protein binding | Binding | Bind
Gene_expression | Gene expression | Expression | Exp
Theme | Theme | Th
[drawing]
Protein bgColor:#7fa2ff
SPAN_DEFAULT fgColor:black, bgColor:lightgreen, borderColor:black
ARC_DEFAULT color:black
ATTRIBUTE_DEFAULT glyph:*
"""
__default_tools = """
[search]
google <URL>:http://www.google.com/search?q=%s
"""
__default_kb_shortcuts = """
P Protein
"""
__default_access_control = """
User-agent: *
Allow: /
Disallow: /hidden/
User-agent: guest
Disallow: /confidential/
"""
# Reserved strings with special meanings in configuration.
reserved_config_name = [
"ANY",
"ENTITY",
"RELATION",
"EVENT",
"NONE",
"EMPTY",
"REL-TYPE",
"URL",
"URLBASE",
"GLYPH-POS",
"DEFAULT",
"NORM",
"OVERLAP",
"OVL-TYPE",
"INHERIT"]
# TODO: "GLYPH-POS" is no longer used, warn if encountered and
# recommend to use "position" instead.
reserved_config_string = ["<%s>" % n for n in reserved_config_name]
# Magic string to use to represent a separator in a config
SEPARATOR_STR = "SEPARATOR"
def normalize_to_storage_form(t):
"""Given a label, returns a form of the term that can be used for disk
storage.
For example, space can be replaced with underscores to allow use
with space-separated formats.
"""
if t not in normalize_to_storage_form.__cache:
# conservative implementation: replace any space with
# underscore, replace unicode accented characters with
# non-accented equivalents, remove others, and finally replace
# all characters not in [a-zA-Z0-9_-] with underscores.
import re
import unicodedata
n = t.replace(" ", "_")
if isinstance(n, str):
unicodedata.normalize('NFKD', n).encode('ascii', 'ignore')
n = re.sub(r'[^a-zA-Z0-9_-]', '_', n)
normalize_to_storage_form.__cache[t] = n
return normalize_to_storage_form.__cache[t]
normalize_to_storage_form.__cache = {}
class TypeHierarchyNode:
"""Represents a node in a simple (possibly flat) hierarchy.
Each node is associated with a set of terms, one of which (the
storage_form) matches the way in which the type denoted by the
node is referenced to in data stored on disk and in client-server
communications. This term is guaranteed to be in "storage form" as
defined by normalize_to_storage_form().
Each node may be associated with one or more "arguments", which
are (multivalued) key:value pairs. These determine various characteristics
of the node, but their interpretation depends on the hierarchy the
node occupies: for example, for events the arguments correspond to
event arguments.
"""
def __init__(self, terms, args=[]):
self.terms, self.args = terms, args
if len(terms) == 0 or len([t for t in terms if t == ""]) != 0:
Messager.debug("Empty term in configuration", duration=-1)
raise InvalidProjectConfigException
# unused if any of the terms marked with "!"
self.unused = False
for i in range(len(self.terms)):
if self.terms[i][0] == "!":
self.terms[i] = self.terms[i][1:]
self.unused = True
self.children = []
# The first of the listed terms is used as the primary term for
# storage (excepting for "special" config-only types). Due to
# format restrictions, this form must not have e.g. space or
# various special characters.
if self.terms[0] not in SPECIAL_RELATION_TYPES:
self.__primary_term = normalize_to_storage_form(self.terms[0])
else:
self.__primary_term = self.terms[0]
# TODO: this might not be the ideal place to put this warning
if self.__primary_term != self.terms[0]:
Messager.warning(
"Note: in configuration, term '%s' is not appropriate for storage (should match '^[a-zA-Z0-9_-]*$'), using '%s' instead. (Revise configuration file to get rid of this message. Terms other than the first are not subject to this restriction.)" %
(self.terms[0], self.__primary_term), -1)
self.terms[0] = self.__primary_term
# TODO: cleaner and more localized parsing
self.arguments = {}
self.special_arguments = {}
self.arg_list = []
self.arg_min_count = {}
self.arg_max_count = {}
self.keys_by_type = {}
for a in self.args:
a = a.strip()
m = re.match(r'^(\S*?):(\S*)$', a)
if not m:
Messager.warning(
"Project configuration: Failed to parse argument '%s' (args: %s)" %
(a, args), 5)
raise InvalidProjectConfigException
key, atypes = m.groups()
# special case (sorry): if the key is a reserved config
# string (e.g. "<REL-TYPE>" or "<URL>"), parse differently
# and store separately
if key in reserved_config_string:
if key is self.special_arguments:
Messager.warning(
"Project configuration: error parsing: %s argument '%s' appears multiple times." %
key, 5)
raise InvalidProjectConfigException
# special case in special case: relation type specifications
# are split by hyphens, nothing else is.
# (really sorry about this.)
if key == "<REL-TYPE>":
self.special_arguments[key] = atypes.split("-")
else:
self.special_arguments[key] = [atypes]
# NOTE: skip the rest of processing -- don't add in normal args
continue
# Parse "repetition" modifiers. These are regex-like:
# - Arg : mandatory argument, exactly one
# - Arg? : optional argument, at most one
# - Arg* : optional argument, any number
# - Arg+ : mandatory argument, one or more
# - Arg{N} : mandatory, exactly N
# - Arg{N-M} : mandatory, between N and M
m = re.match(r'^(\S+?)(\{\S+\}|\?|\*|\+|)$', key)
if not m:
Messager.warning(
"Project configuration: error parsing argument '%s'." %
key, 5)
raise InvalidProjectConfigException
key, rep = m.groups()
if rep == '':
# exactly one
minimum_count = 1
maximum_count = 1
elif rep == '?':
# zero or one
minimum_count = 0
maximum_count = 1
elif rep == '*':
# any number
minimum_count = 0
maximum_count = sys.maxsize
elif rep == '+':
# one or more
minimum_count = 1
maximum_count = sys.maxsize
else:
# exact number or range constraint
assert '{' in rep and '}' in rep, "INTERNAL ERROR"
m = re.match(r'\{(\d+)(?:-(\d+))?\}$', rep)
if not m:
Messager.warning(
"Project configuration: error parsing range '%s' in argument '%s' (syntax is '{MIN-MAX}')." %
(rep, key + rep), 5)
raise InvalidProjectConfigException
n1, n2 = m.groups()
n1 = int(n1)
if n2 is None:
# exact number
if n1 == 0:
Messager.warning(
"Project configuration: cannot have exactly 0 repetitions of argument '%s'." %
(key + rep), 5)
raise InvalidProjectConfigException
minimum_count = n1
maximum_count = n1
else:
# range
n2 = int(n2)
if n1 > n2:
Messager.warning(
"Project configuration: invalid range %d-%d for argument '%s'." %
(n1, n2, key + rep), 5)
raise InvalidProjectConfigException
minimum_count = n1
maximum_count = n2
# format / config sanity: an argument whose label ends
# with a digit label cannot be repeated, as this would
# introduce ambiguity into parsing. (For example, the
# second "Theme" is "Theme2", and the second "Arg1" would
# be "Arg12".)
if maximum_count > 1 and key[-1].isdigit():
Messager.warning(
"Project configuration: error parsing: arguments ending with a digit cannot be repeated: '%s'" %
(key + rep), 5)
raise InvalidProjectConfigException
if key in self.arguments:
Messager.warning(
"Project configuration: error parsing: %s argument '%s' appears multiple times." %
key, 5)
raise InvalidProjectConfigException
assert (key not in self.arg_min_count and
key not in self.arg_max_count), "INTERNAL ERROR"
self.arg_min_count[key] = minimum_count
self.arg_max_count[key] = maximum_count
self.arg_list.append(key)
for atype in atypes.split("|"):
if atype.strip() == "":
Messager.warning(
"Project configuration: error parsing: empty type for argument '%s'." %
a, 5)
raise InvalidProjectConfigException
# Check disabled; need to support arbitrary UTF values
# for visual.conf. TODO: add this check for other configs.
# TODO: consider checking for similar for appropriate confs.
# if atype not in reserved_config_string and normalize_to_storage_form(atype) != atype:
# Messager.warning("Project configuration: '%s' is not a valid argument (should match '^[a-zA-Z0-9_-]*$')" % atype, 5)
# raise InvalidProjectConfigException
if key not in self.arguments:
self.arguments[key] = []
self.arguments[key].append(atype)
if atype not in self.keys_by_type:
self.keys_by_type[atype] = []
self.keys_by_type[atype].append(key)
def argument_minimum_count(self, arg):
"""Returns the minimum number of times the given argument is required
to appear for this type."""
return self.arg_min_count.get(arg, 0)
def argument_maximum_count(self, arg):
"""Returns the maximum number of times the given argument is allowed to
appear for this type."""
return self.arg_max_count.get(arg, 0)
def mandatory_arguments(self):
"""Returns the arguments that must appear at least once for this
type."""
return [a for a in self.arg_list if self.arg_min_count[a] > 0]
def multiple_allowed_arguments(self):
"""Returns the arguments that may appear multiple times for this
type."""
return [a for a in self.arg_list if self.arg_max_count[a] > 1]
def storage_form(self):
"""Returns the form of the term used for storage serverside."""
return self.__primary_term
def normalizations(self):
"""Returns the normalizations applicable to this node, if any."""
return self.special_arguments.get('<NORM>', [])
def __require_tab_separator(section):
"""Given a section name, returns True iff in that section of the project
config only tab separators should be permitted.
This exception initially introduced to allow slighlty different
syntax for the [labels] section than others.
"""
return section == "labels"
def __read_term_hierarchy(input, section=None):
root_nodes = []
last_node_at_depth = {}
last_args_at_depth = {}
macros = {}
for l in input:
# skip empties and lines starting with '#'
if l.strip() == '' or re.match(r'^\s*#', l):
continue
# interpret lines of only hyphens as separators
# for display
if re.match(r'^\s*-+\s*$', l):
# TODO: proper placeholder and placing
root_nodes.append(SEPARATOR_STR)
continue
# interpret lines of the format <STR1>=STR2 as "macro"
# definitions, defining <STR1> as a placeholder that should be
# replaced with STR2 whevever it occurs.
m = re.match(r'^<([a-zA-Z_-]+)>=\s*(.*?)\s*$', l)
if m:
name, value = m.groups()
if name in reserved_config_name:
Messager.error(
"Cannot redefine <%s> in configuration, it is a reserved name." %
name)
# TODO: proper exception
assert False
else:
macros["<%s>" % name] = value
continue
# macro expansion
for n in macros:
l = l.replace(n, macros[n])
# check for undefined macros
for m in re.finditer(r'(<.*?>)', l):
s = m.group(1)
assert s in reserved_config_string, "Error: undefined macro %s in configuration. (Note that macros are section-specific.)" % s
# choose strict tab-only separator or looser any-space
# separator matching depending on section
if __require_tab_separator(section):
m = re.match(r'^(\s*)([^\t]+)(?:\t(.*))?$', l)
else:
m = re.match(r'^(\s*)(\S+)(?:\s+(.*))?$', l)
assert m, "Error parsing line: '%s'" % l
indent, terms, args = m.groups()
terms = [t.strip() for t in terms.split("|") if t.strip() != ""]
if args is None or args.strip() == "":
args = []
else:
args = [a.strip() for a in args.split(",") if a.strip() != ""]
# older configs allowed space in term strings, splitting those
# from arguments by space. Trying to parse one of these in the
# new way will result in a crash from space in arguments.
# The following is a workaround for the transition.
if len([x for x in args if re.search(r'\s', x)]) and '\t' in l:
# re-parse in the old way (dups from above)
m = re.match(r'^(\s*)([^\t]+)(?:\t(.*))?$', l)
assert m, "Error parsing line: '%s'" % l
indent, terms, args = m.groups()
terms = [t.strip() for t in terms.split("|") if t.strip() != ""]
if args is None or args.strip() == "":
args = []
else:
args = [a.strip() for a in args.split(",") if a.strip() != ""]
# issue a warning
Messager.warning(
"Space in term name(s) (%s) on line \"%s\" in config. This feature is deprecated and support will be removed in future versions. Please revise your configuration." %
(",".join(
[
'"%s"' %
x for x in terms if " " in x]),
l),
20)
# depth in the ontology corresponds to the number of
# spaces in the initial indent.
depth = len(indent)
# expand <INHERIT> into parent arguments
expanded_args = []
for a in args:
if a != '<INHERIT>':
expanded_args.append(a)
else:
assert depth - 1 in last_args_at_depth, \
"Error no parent for '%s'" % l
expanded_args.extend(last_args_at_depth[depth - 1])
# TODO: remove, debugging
# if expanded_args != args:
# Messager.info('expand: %s --> %s' % (str(args), str(expanded_args)))
args = expanded_args
n = TypeHierarchyNode(terms, args)
if depth == 0:
# root level, no children assignments
root_nodes.append(n)
else:
# assign as child of last node at the depth of the parent
assert depth - 1 in last_node_at_depth, \
"Error: no parent for '%s'" % l
last_node_at_depth[depth - 1].children.append(n)
last_node_at_depth[depth] = n
last_args_at_depth[depth] = args
return root_nodes
def __read_or_default(filename, default):
try:
f = open_textfile(filename, 'r')
r = f.read()
f.close()
return r
except BaseException:
# TODO: specific exception handling and reporting
return default
def __parse_kb_shortcuts(shortcutstr, default, source):
try:
shortcuts = {}
for l in shortcutstr.split("\n"):
l = l.strip()
if l == "" or l[:1] == "#":
continue
key, type = re.split(r'[ \t]+', l)
if key in shortcuts:
Messager.warning(
"Project configuration: keyboard shortcut for '%s' defined multiple times. Ignoring all but first ('%s')" %
(key, shortcuts[key]))
else:
shortcuts[key] = type
except BaseException:
# TODO: specific exception handling
Messager.warning(
"Project configuration: error parsing keyboard shortcuts from %s. Configuration may be wrong." %
source, 5)
shortcuts = default
return shortcuts
def __parse_access_control(acstr, source):
try:
parser = urllib.robotparser.RobotFileParser()
parser.parse(acstr.split("\n"))
except BaseException:
# TODO: specific exception handling
Messager.warning(
"Project configuration: error parsing access control rules from %s. Configuration may be wrong." %
source, 5)
parser = None
return parser
def get_config_path(directory):
return __read_first_in_directory_tree(
directory, __annotation_config_filename)[1]
def __read_first_in_directory_tree(directory, filename):
# config must be under project root
from pathlib import Path
source = str(Path(directory) / filename)
result = __read_or_default(source, None)
return result, source
def __parse_configs(configstr, source, expected_sections, optional_sections):
# top-level config structure is a set of term hierarchies
# separated by lines consisting of "[SECTION]" where SECTION is
# e.g. "entities", "relations", etc.
# start by splitting config file lines by section, also storing
# the label (default name or alias) used for each section.
section = "general"
section_lines = {section: []}
section_labels = {}
for ln, l in enumerate(configstr.split("\n")):
m = re.match(r'^\s*\[(.*)\]\s*$', l)
if m:
section = m.group(1)
# map and store section name/alias (e.g. "spans" -> "entities")
section_name = SECTION_ALIAS.get(section, section)
section_labels[section_name] = section
section = section_name
if section not in expected_sections:
Messager.warning(
"Project configuration: unexpected section [%s] in %s. Ignoring contents." %
(section, source), 5)
if section not in section_lines:
section_lines[section] = []
else:
section_lines[section].append(l)
# attempt to parse lines in each section as a term hierarchy
configs = {}
for s, sl in list(section_lines.items()):
try:
configs[s] = __read_term_hierarchy(sl, s)
except Exception as e:
Messager.warning(
"Project configuration: error parsing section [%s] in %s: %s" %
(s, source, str(e)), 5)
raise
# verify that expected sections are present; replace with empty if not.
for s in expected_sections:
if s not in configs:
if s not in optional_sections:
Messager.warning(
"Project configuration: missing section [%s] in %s. Configuration may be wrong." %
(s, source), 5)
configs[s] = []
return (configs, section_labels)
def get_configs(
directory,
filename,
defaultstr,
minconf,
sections,
optional_sections):
if (directory, filename) not in get_configs.__cache:
configstr, source = __read_first_in_directory_tree(directory, filename)
if configstr is None:
# didn't get one; try default dir and fall back to the default
configstr = __read_or_default(filename, defaultstr)
if configstr == defaultstr:
Messager.info(
"Project configuration: no configuration file (%s) found, using default." %
filename, 5)
source = "[default]"
else:
source = filename
# try to parse what was found, fall back to minimal config
try:
configs, section_labels = __parse_configs(
configstr, source, sections, optional_sections)
except BaseException:
Messager.warning(
"Project configuration: Falling back to minimal default. Configuration is likely wrong.",
5)
configs = minconf
section_labels = dict([(a, a) for a in sections])
# very, very special case processing: if we have a type
# "Equiv" defined in a "relations" section that doesn't
# specify a "<REL-TYPE>", automatically fill "symmetric" and
# "transitive". This is to support older configurations that
# rely on the type "Equiv" to identify the relation as an
# equivalence.
if 'relations' in configs:
for r in configs['relations']:
if r == SEPARATOR_STR:
continue
if (r.storage_form() == "Equiv" and
"<REL-TYPE>" not in r.special_arguments):
# this was way too much noise; will only add in after
# at least most configs are revised.
# Messager.warning('Note: "Equiv" defined in config without "<REL-TYPE>"; assuming symmetric and transitive. Consider revising config to add "<REL-TYPE>:symmetric-transitive" to definition.')
r.special_arguments["<REL-TYPE>"] = ["symmetric",
"transitive"]
get_configs.__cache[(directory, filename)] = (configs, section_labels)
return get_configs.__cache[(directory, filename)]
get_configs.__cache = {}
def __get_access_control(directory, filename, default_rules):
acstr, source = __read_first_in_directory_tree(directory, filename)
if acstr is None:
acstr = default_rules # TODO read or default isntead of default
if acstr == default_rules:
source = "[default rules]"
else:
source = filename
ac_oracle = __parse_access_control(acstr, source)
return ac_oracle
def __get_kb_shortcuts(directory, filename, default_shortcuts, min_shortcuts):
shortcutstr, source = __read_first_in_directory_tree(directory, filename)
if shortcutstr is None:
shortcutstr = __read_or_default(filename, default_shortcuts)
if shortcutstr == default_shortcuts:
source = "[default kb_shortcuts]"
else:
source = filename
kb_shortcuts = __parse_kb_shortcuts(shortcutstr, min_shortcuts, source)
return kb_shortcuts
# final fallback for configuration; a minimal known-good config
__minimal_configuration = {
ENTITY_SECTION: [
TypeHierarchyNode(
["Protein"])], RELATION_SECTION: [
TypeHierarchyNode(
["Equiv"], [
"Arg1:Protein", "Arg2:Protein", "<REL-TYPE>:symmetric-transitive"])], EVENT_SECTION: [
TypeHierarchyNode(
["Event"], ["Theme:Protein"])], ATTRIBUTE_SECTION: [
TypeHierarchyNode(
["Negation"], ["Arg:<EVENT>"])], }
def get_annotation_configs(directory):
return get_configs(directory,
__annotation_config_filename,
__default_configuration,
__minimal_configuration,
__expected_annotation_sections,
__optional_annotation_sections)
# final fallback for visual configuration; minimal known-good config
__minimal_visual = {
LABEL_SECTION: [TypeHierarchyNode(["Protein", "Pro", "P"]),
TypeHierarchyNode(["Equiv", "Eq"]),
TypeHierarchyNode(["Event", "Ev"])],
DRAWING_SECTION: [TypeHierarchyNode([VISUAL_SPAN_DEFAULT], ["fgColor:black", "bgColor:white"]),
TypeHierarchyNode([VISUAL_ARC_DEFAULT], ["color:black"]),
TypeHierarchyNode([VISUAL_ATTR_DEFAULT], ["glyph:*"])],
}
def get_visual_configs(directory):
return get_configs(directory,
__visual_config_filename,
__default_visual,
__minimal_visual,
__expected_visual_sections,
__optional_visual_sections)
# final fallback for tools configuration; minimal known-good config
__minimal_tools = {
OPTIONS_SECTION: [],
SEARCH_SECTION: [
TypeHierarchyNode(
["google"],
["<URL>:http://www.google.com/search?q=%s"])],
ANNOTATORS_SECTION: [],
DISAMBIGUATORS_SECTION: [],
NORMALIZATION_SECTION: [],
}
def get_tools_configs(directory):
return get_configs(directory,
__tools_config_filename,
__default_tools,
__minimal_tools,
__expected_tools_sections,
__optional_tools_sections)
def get_entity_type_hierarchy(directory):
return get_annotation_configs(directory)[0][ENTITY_SECTION]
def get_relation_type_hierarchy(directory):
return get_annotation_configs(directory)[0][RELATION_SECTION]
def get_event_type_hierarchy(directory):
return get_annotation_configs(directory)[0][EVENT_SECTION]
def get_attribute_type_hierarchy(directory):
return get_annotation_configs(directory)[0][ATTRIBUTE_SECTION]
def get_annotation_config_section_labels(directory):
return get_annotation_configs(directory)[1]
# TODO: too much caching?
def get_labels(directory):
cache = get_labels.__cache
if directory not in cache:
l = {}
for t in get_visual_configs(directory)[0][LABEL_SECTION]:
if t.storage_form() in l:
Messager.warning(
"In configuration, labels for '%s' defined more than once. Only using the last set." %
t.storage_form(), -1)
# first is storage for, rest are labels.
l[t.storage_form()] = t.terms[1:]
cache[directory] = l
return cache[directory]
get_labels.__cache = {}
# TODO: too much caching?
def get_drawing_types(directory):
cache = get_drawing_types.__cache
if directory not in cache:
l = set()
for n in get_drawing_config(directory):
l.add(n.storage_form())
cache[directory] = list(l)
return cache[directory]
get_drawing_types.__cache = {}
def get_option_config(directory):
return get_tools_configs(directory)[0][OPTIONS_SECTION]
def get_drawing_config(directory):
return get_visual_configs(directory)[0][DRAWING_SECTION]
def get_visual_option_config(directory):
return get_visual_configs(directory)[0][OPTIONS_SECTION]
def get_visual_config_section_labels(directory):
return get_visual_configs(directory)[1]
def get_search_config(directory):
return get_tools_configs(directory)[0][SEARCH_SECTION]
def get_annotator_config(directory):
return get_tools_configs(directory)[0][ANNOTATORS_SECTION]
def get_disambiguator_config(directory):
return get_tools_configs(directory)[0][DISAMBIGUATORS_SECTION]
def get_normalization_config(directory):
return get_tools_configs(directory)[0][NORMALIZATION_SECTION]
def get_tools_config_section_labels(directory):
return get_tools_configs(directory)[1]
def get_access_control(directory):
cache = get_access_control.__cache
if directory not in cache:
a = __get_access_control(directory,
__access_control_filename,
__default_access_control)
cache[directory] = a
return cache[directory]
get_access_control.__cache = {}
def get_kb_shortcuts(directory):
cache = get_kb_shortcuts.__cache
if directory not in cache:
a = __get_kb_shortcuts(directory,
__kb_shortcut_filename,
__default_kb_shortcuts,
{"P": "Positive_regulation"})
cache[directory] = a
return cache[directory]
get_kb_shortcuts.__cache = {}
def __collect_type_list(node, collected):
if node == SEPARATOR_STR:
return collected
collected.append(node)
for c in node.children:
__collect_type_list(c, collected)
return collected
def __type_hierarchy_to_list(hierarchy):
root_nodes = hierarchy
types = []
for n in root_nodes:
__collect_type_list(n, types)
return types
# TODO: it's not clear it makes sense for all of these methods to have
# their own caches; this seems a bit like a case of premature
# optimization to me. Consider simplifying.
def get_entity_type_list(directory):
cache = get_entity_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_entity_type_hierarchy(directory))
return cache[directory]
get_entity_type_list.__cache = {}
def get_event_type_list(directory):
cache = get_event_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_event_type_hierarchy(directory))
return cache[directory]
get_event_type_list.__cache = {}
def get_relation_type_list(directory):
cache = get_relation_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_relation_type_hierarchy(directory))
return cache[directory]
get_relation_type_list.__cache = {}
def get_attribute_type_list(directory):
cache = get_attribute_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_attribute_type_hierarchy(directory))
return cache[directory]
get_attribute_type_list.__cache = {}
def get_search_config_list(directory):
cache = get_search_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_search_config(directory))
return cache[directory]
get_search_config_list.__cache = {}
def get_annotator_config_list(directory):
cache = get_annotator_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_annotator_config(directory))
return cache[directory]
get_annotator_config_list.__cache = {}
def get_disambiguator_config_list(directory):
cache = get_disambiguator_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_disambiguator_config(directory))
return cache[directory]
get_disambiguator_config_list.__cache = {}
def get_normalization_config_list(directory):
cache = get_normalization_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(
get_normalization_config(directory))
return cache[directory]
get_normalization_config_list.__cache = {}
def get_node_by_storage_form(directory, term):
cache = get_node_by_storage_form.__cache
if directory not in cache:
d = {}
for e in get_entity_type_list(
directory) + get_event_type_list(directory):
t = e.storage_form()
if t in d:
Messager.warning(
"Project configuration: term %s appears multiple times, only using last. Configuration may be wrong." %
t, 5)
d[t] = e
cache[directory] = d
return cache[directory].get(term, None)
get_node_by_storage_form.__cache = {}
def _get_option_by_storage_form(directory, term, config, cache):
if directory not in cache:
d = {}
for n in config:
t = n.storage_form()
if t in d:
Messager.warning(
"Project configuration: %s appears multiple times, only using last. Configuration may be wrong." %
t, 5)
d[t] = {}
for a in n.arguments:
if len(n.arguments[a]) != 1:
Messager.warning(
"Project configuration: %s key %s has multiple values, only using first. Configuration may be wrong." %
(t, a), 5)
d[t][a] = n.arguments[a][0]
cache[directory] = d
return cache[directory].get(term, None)
def get_option_config_by_storage_form(directory, term):
cache = get_option_config_by_storage_form.__cache
config = get_option_config(directory)
return _get_option_by_storage_form(directory, term, config, cache)
get_option_config_by_storage_form.__cache = {}
def get_visual_option_config_by_storage_form(directory, term):
cache = get_visual_option_config_by_storage_form.__cache
config = get_visual_option_config(directory)
return _get_option_by_storage_form(directory, term, config, cache)
get_visual_option_config_by_storage_form.__cache = {}
# access for settings for specific options in tools.conf
# TODO: avoid fixed string values here, define vars earlier
def options_get_validation(directory):
v = get_option_config_by_storage_form(directory, 'Validation')
return 'none' if v is None else v.get('validate', 'none')
def options_get_tokenization(directory):
v = get_option_config_by_storage_form(directory, 'Tokens')
return 'whitespace' if v is None else v.get('tokenizer', 'whitespace')
def options_get_ssplitter(directory):
v = get_option_config_by_storage_form(directory, 'Sentences')
return 'regex' if v is None else v.get('splitter', 'regex')
def options_get_annlogfile(directory):
v = get_option_config_by_storage_form(directory, 'Annotation-log')
return '<NONE>' if v is None else v.get('logfile', '<NONE>')
# access for settings for specific options in visual.conf
def visual_options_get_arc_bundle(directory):
v = get_visual_option_config_by_storage_form(directory, 'Arcs')
return 'none' if v is None else v.get('bundle', 'none')
def visual_options_get_text_direction(directory):
v = get_visual_option_config_by_storage_form(directory, 'Text')
return 'ltr' if v is None else v.get('direction', 'ltr')
def get_drawing_config_by_storage_form(directory, term):
cache = get_drawing_config_by_storage_form.__cache
if directory not in cache:
d = {}
for n in get_drawing_config(directory):
t = n.storage_form()
if t in d:
Messager.warning(
"Project configuration: term %s appears multiple times, only using last. Configuration may be wrong." %
t, 5)
d[t] = {}
for a in n.arguments:
# attribute drawing can be specified with multiple
# values (multi-valued attributes), other parts of
# drawing config should have single values only.
if len(n.arguments[a]) != 1:
if a in ATTR_DRAWING_ATTRIBUTES:
# use multi-valued directly
d[t][a] = n.arguments[a]
else:
# warn and pass
Messager.warning(
"Project configuration: expected single value for %s argument %s, got '%s'. Configuration may be wrong." %
(t, a, "|".join(
n.arguments[a])))
else:
d[t][a] = n.arguments[a][0]
# TODO: hack to get around inability to have commas in values;
# fix original issue instead
for t in d:
for k in d[t]:
# sorry about this
if not isinstance(d[t][k], list):
d[t][k] = d[t][k].replace("-", ",")
else:
for i in range(len(d[t][k])):
d[t][k][i] = d[t][k][i].replace("-", ",")
default_keys = [VISUAL_SPAN_DEFAULT,
VISUAL_ARC_DEFAULT,
VISUAL_ATTR_DEFAULT]
for default_dict in [d.get(dk, {}) for dk in default_keys]:
for k in default_dict:
for t in d:
d[t][k] = d[t].get(k, default_dict[k])
# Kind of a special case: recognize <NONE> as "deleting" an
# attribute (prevents default propagation) and <EMPTY> as
# specifying that a value should be the empty string
# (can't be written as such directly).
for t in d:
todelete = [k for k in d[t] if d[t][k] == '<NONE>']
for k in todelete:
del d[t][k]
for k in d[t]:
if d[t][k] == '<EMPTY>':
d[t][k] = ''
cache[directory] = d
return cache[directory].get(term, None)
get_drawing_config_by_storage_form.__cache = {}
def __directory_relations_by_arg_num(
directory, num, atype, include_special=False):
assert num >= 0 and num < 2, "INTERNAL ERROR"
rels = []
entity_types = set([t.storage_form()
for t in get_entity_type_list(directory)])
event_types = set([t.storage_form()
for t in get_event_type_list(directory)])
for r in get_relation_type_list(directory):
# "Special" nesting relations ignored unless specifically
# requested
if r.storage_form() in SPECIAL_RELATION_TYPES and not include_special:
continue
if len(r.arg_list) != 2:
# Don't complain about argument constraints for unused relations
if not r.unused:
Messager.warning(
"Relation type %s has %d arguments in configuration (%s; expected 2). Please fix configuration." %
(r.storage_form(), len(
r.arg_list), ",".join(
r.arg_list)))
else:
types = r.arguments[r.arg_list[num]]
for type_ in types:
# TODO: there has to be a better way
if (type_ == atype or
type_ == "<ANY>" or
atype == "<ANY>" or
(type_ in entity_types and atype == "<ENTITY>") or
(type_ in event_types and atype == "<EVENT>") or
(atype in entity_types and type_ == "<ENTITY>") or
(atype in event_types and type_ == "<EVENT>")):
rels.append(r)
# TODO: why not break here?
return rels
def get_relations_by_arg1(directory, atype, include_special=False):
cache = get_relations_by_arg1.__cache
cache[directory] = cache.get(directory, {})
if (atype, include_special) not in cache[directory]:
cache[directory][(atype, include_special)] = __directory_relations_by_arg_num(
directory, 0, atype, include_special)
return cache[directory][(atype, include_special)]
get_relations_by_arg1.__cache = {}
def get_relations_by_arg2(directory, atype, include_special=False):
cache = get_relations_by_arg2.__cache
cache[directory] = cache.get(directory, {})
if (atype, include_special) not in cache[directory]:
cache[directory][(atype, include_special)] = __directory_relations_by_arg_num(
directory, 1, atype, include_special)
return cache[directory][(atype, include_special)]
get_relations_by_arg2.__cache = {}
def get_relations_by_storage_form(directory, rtype, include_special=False):
cache = get_relations_by_storage_form.__cache
cache[directory] = cache.get(directory, {})
if include_special not in cache[directory]:
cache[directory][include_special] = {}
for r in get_relation_type_list(directory):
if (r.storage_form() in SPECIAL_RELATION_TYPES and
not include_special):
continue
if r.unused:
continue
if r.storage_form() not in cache[directory][include_special]:
cache[directory][include_special][r.storage_form()] = []
cache[directory][include_special][r.storage_form()].append(r)
return cache[directory][include_special].get(rtype, [])
get_relations_by_storage_form.__cache = {}
def get_labels_by_storage_form(directory, term):
cache = get_labels_by_storage_form.__cache
if directory not in cache:
cache[directory] = {}
for l, labels in list(get_labels(directory).items()):
# recognize <EMPTY> as specifying that a label should
# be the empty string
labels = [lab if lab != '<EMPTY>' else ' ' for lab in labels]
cache[directory][l] = labels
return cache[directory].get(term, None)
get_labels_by_storage_form.__cache = {}
# fallback for missing or partial config: these are highly likely to
# be entity (as opposed to an event or relation) types.
# TODO: remove this workaround once the configs stabilize.
very_likely_physical_entity_types = [
'Protein',
'Entity',
'Organism',
'Chemical',
'Two-component-system',
'Regulon-operon',
# for more PTM annotation
'Protein_family_or_group',
'DNA_domain_or_region',
'Protein_domain_or_region',
'Amino_acid_monomer',
'Carbohydrate',
# for AZ corpus
'Cell_type',
'Drug_or_compound',
'Gene_or_gene_product',
'Tissue',
# 'Not_sure',
# 'Other',
'Other_pharmaceutical_agent',
]
# helper; doesn't really belong here
# TODO: shouldn't we have an utils.py or something for stuff like this?
def unique_preserve_order(iterable):
seen = set()
uniqued = []
for i in iterable:
if i not in seen:
seen.add(i)
uniqued.append(i)
return uniqued
class ProjectConfiguration(object):
def __init__(self, directory):
# debugging (note: latter test for windows paths)
if directory[:1] != "/" and not re.search(r'^[a-zA-Z]:\\', directory):
Messager.debug(
"Project config received relative directory ('%s'), configuration may not be found." %
directory, duration=-1)
self.directory = directory
def mandatory_arguments(self, atype):
"""Returns the mandatory argument types that must be present for an
annotation of the given type."""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning(
"Project configuration: unknown event type %s. Configuration may be wrong." %
atype)
return []
return node.mandatory_arguments()
def multiple_allowed_arguments(self, atype):
"""Returns the argument types that are allowed to be filled more than
once for an annotation of the given type."""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning(
"Project configuration: unknown event type %s. Configuration may be wrong." %
atype)
return []
return node.multiple_allowed_arguments()
def argument_maximum_count(self, atype, arg):
"""Returns the maximum number of times that the given argument is
allowed to be filled for an annotation of the given type."""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning(
"Project configuration: unknown event type %s. Configuration may be wrong." %
atype)
return 0
return node.argument_maximum_count(arg)
def argument_minimum_count(self, atype, arg):
"""Returns the minimum number of times that the given argument is
allowed to be filled for an annotation of the given type."""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning(
"Project configuration: unknown event type %s. Configuration may be wrong." %
atype)
return 0
return node.argument_minimum_count(arg)
def arc_types_from(self, from_ann):
return self.arc_types_from_to(from_ann)
def relation_types_from(self, from_ann, include_special=False):
"""Returns the possible relation types that can have an annotation of
the given type as their arg1."""
return [r.storage_form() for r in get_relations_by_arg1(
self.directory, from_ann, include_special)]
def relation_types_to(self, to_ann, include_special=False):
"""Returns the possible relation types that can have an annotation of
the given type as their arg2."""
return [
r.storage_form() for r in get_relations_by_arg2(
self.directory,
to_ann,
include_special)]
def relation_types_from_to(self, from_ann, to_ann, include_special=False):
"""Returns the possible relation types that can have the given arg1 and
arg2."""
types = []
t1r = get_relations_by_arg1(self.directory, from_ann, include_special)
t2r = get_relations_by_arg2(self.directory, to_ann, include_special)
for r in t1r:
if r in t2r:
types.append(r.storage_form())
return types
def overlap_types(self, inner, outer):
"""Returns the set of annotation overlap types that have been
configured for the given pair of annotations."""
# TODO: this is O(NM) for relation counts N and M and goes
# past much of the implemented caching. Might become a
# bottleneck for annotations with large type systems.
t1r = get_relations_by_arg1(self.directory, inner, True)
t2r = get_relations_by_arg2(self.directory, outer, True)
types = []
for r in (s for s in t1r if s.storage_form()
in SPECIAL_RELATION_TYPES):
if r in t2r:
types.append(r)
# new-style overlap configuration ("<OVERLAP>") takes precedence
# over old-style configuration ("ENTITY-NESTING").
ovl_types = set()
ovl = [r for r in types if r.storage_form() == TEXTBOUND_OVERLAP_TYPE]
nst = [r for r in types if r.storage_form() == ENTITY_NESTING_TYPE]
if ovl:
if nst:
Messager.warning(
'Warning: both ' +
TEXTBOUND_OVERLAP_TYPE +
' and ' +
ENTITY_NESTING_TYPE +
' defined for ' +
'(' +
inner +
',' +
outer +
') in config. ' +
'Ignoring latter.')
for r in ovl:
if OVERLAP_TYPE_ARG not in r.special_arguments:
Messager.warning('Warning: missing ' + OVERLAP_TYPE_ARG +
' for ' + TEXTBOUND_OVERLAP_TYPE +
', ignoring specification.')
continue
for val in r.special_arguments[OVERLAP_TYPE_ARG]:
ovl_types |= set(val.split('|'))
elif nst:
# translate into new-style configuration
ovl_types = set(['contain'])
else:
ovl_types = set()
undefined_types = [t for t in ovl_types if
t not in ('contain', 'equal', 'cross', '<ANY>')]
if undefined_types:
Messager.warning('Undefined ' + OVERLAP_TYPE_ARG + ' value(s) ' +
str(undefined_types) + ' for ' +
'(' + inner + ',' + outer + ') in config. ')
return ovl_types
def span_can_contain(self, inner, outer):
"""Returns True if the configuration allows the span of an annotation
of type inner to (properly) contain an annotation of type outer, False
otherwise."""
ovl_types = self.overlap_types(inner, outer)
if 'contain' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(outer, inner)
if '<ANY>' in ovl_types:
return True
return False
def spans_can_be_equal(self, t1, t2):
"""Returns True if the configuration allows the spans of annotations of
type t1 and t2 to be equal, False otherwise."""
ovl_types = self.overlap_types(t1, t2)
if 'equal' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(t2, t1)
if 'equal' in ovl_types or '<ANY>' in ovl_types:
return True
return False
def spans_can_cross(self, t1, t2):
"""Returns True if the configuration allows the spans of annotations of
type t1 and t2 to cross, False otherwise."""
ovl_types = self.overlap_types(t1, t2)
if 'cross' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(t2, t1)
if 'cross' in ovl_types or '<ANY>' in ovl_types:
return True
return False
def all_connections(self, include_special=False):
"""Returns a dict of dicts of lists, outer dict keyed by entity/event
type, inner dicts by role/relation type, and lists containing
entity/event types, representing all possible connections between
annotations.
This function is provided to optimize access to the entire
annotation configuration for passing it to the client and should
never be used to check for individual connections. The caller
must not change the contents of the returned collection.
"""
# TODO: are these uniques really necessary?
entity_types = unique_preserve_order(self.get_entity_types())
event_types = unique_preserve_order(self.get_event_types())
all_types = unique_preserve_order(entity_types + event_types)
connections = {}
# TODO: it might be possible to avoid copies like
# entity_types[:] and all_types[:] here. Consider the
# possibility.
for t1 in all_types:
assert t1 not in connections, "INTERNAL ERROR"
connections[t1] = {}
processed_as_relation = {}
# relations
rels = get_relations_by_arg1(self.directory, t1, include_special)
for r in rels:
a = r.storage_form()
conns = connections[t1].get(a, [])
# magic number "1" is for 2nd argument
args = r.arguments[r.arg_list[1]]
if "<ANY>" in args:
connections[t1][a] = all_types[:]
else:
for t2 in args:
if t2 == "<ENTITY>":
conns.extend(entity_types)
elif t2 == "<EVENT>":
conns.extend(event_types)
else:
conns.append(t2)
connections[t1][a] = unique_preserve_order(conns)
processed_as_relation[a] = True
# event arguments
n1 = get_node_by_storage_form(self.directory, t1)
for a, args in list(n1.arguments.items()):
if a in processed_as_relation:
Messager.warning(
"Project configuration: %s appears both as role and relation. Configuration may be wrong." %
a)
# won't try to resolve
continue
assert a not in connections[t1], "INTERNAL ERROR"
# TODO: dedup w/above
if "<ANY>" in args:
connections[t1][a] = all_types[:]
else:
conns = []
for t2 in args:
if t2 == "<EVENT>":
conns.extend(event_types)
elif t2 == "<ENTITY>":
conns.extend(entity_types)
else:
conns.append(t2)
connections[t1][a] = unique_preserve_order(conns)
return connections
def arc_types_from_to(
self,
from_ann,
to_ann="<ANY>",
include_special=False):
"""Returns the possible arc types that can connect an annotation of
type from_ann to an annotation of type to_ann.
If to_ann has the value \"<ANY>\", returns all possible arc
types.
"""
from_node = get_node_by_storage_form(self.directory, from_ann)
if from_node is None:
Messager.warning(
"Project configuration: unknown textbound/event type %s. Configuration may be wrong." %
from_ann)
return []
if to_ann == "<ANY>":
relations_from = get_relations_by_arg1(
self.directory, from_ann, include_special)
# TODO: consider using from_node.arg_list instead of .arguments for
# order
return unique_preserve_order(
[role for role in from_node.arguments] + [r.storage_form() for r in relations_from])
# specific hits
types = from_node.keys_by_type.get(to_ann, [])
if "<ANY>" in from_node.keys_by_type:
types += from_node.keys_by_type["<ANY>"]
# generic arguments
if self.is_event_type(to_ann) and '<EVENT>' in from_node.keys_by_type:
types += from_node.keys_by_type['<EVENT>']
if self.is_physical_entity_type(
to_ann) and '<ENTITY>' in from_node.keys_by_type:
types += from_node.keys_by_type['<ENTITY>']
# relations
types.extend(self.relation_types_from_to(from_ann, to_ann))
return unique_preserve_order(types)
def attributes_for(self, ann_type):
"""Returs a list of the possible attribute types for an annotation of
the given type."""
attrs = []
for attr in get_attribute_type_list(self.directory):
if attr == SEPARATOR_STR:
continue
if 'Arg' not in attr.arguments:
Messager.warning(
"Project configuration: config error: attribute '%s' lacks 'Arg:' specification." %
attr.storage_form())
continue
types = attr.arguments['Arg']
if ((ann_type in types) or ('<ANY>' in types) or
(self.is_event_type(ann_type) and '<EVENT>' in types) or
(self.is_physical_entity_type(ann_type) and '<ENTITY>' in types)
or
(self.is_relation_type(ann_type) and '<RELATION>' in types)):
attrs.append(attr.storage_form())
return attrs
def get_labels(self):
return get_labels(self.directory)
def get_kb_shortcuts(self):
return get_kb_shortcuts(self.directory)
def get_access_control(self):
return get_access_control(self.directory)
def get_attribute_types(self):
return [t.storage_form()
for t in get_attribute_type_list(self.directory)]
def get_event_types(self):
return [t.storage_form() for t in get_event_type_list(self.directory)]
def get_relation_types(self):
return [t.storage_form()
for t in get_relation_type_list(self.directory)]
def get_equiv_types(self):
# equivalence relations are those relations that are symmetric
# and transitive, i.e. that have "symmetric" and "transitive"
# in their "<REL-TYPE>" special argument values.
return [t.storage_form() for t in get_relation_type_list(self.directory)
if "<REL-TYPE>" in t.special_arguments and
"symmetric" in t.special_arguments["<REL-TYPE>"] and
"transitive" in t.special_arguments["<REL-TYPE>"]]
def get_relations_by_type(self, _type):
return get_relations_by_storage_form(self.directory, _type)
def get_labels_by_type(self, _type):
return get_labels_by_storage_form(self.directory, _type)
def get_drawing_types(self):
return get_drawing_types(self.directory)
def get_drawing_config_by_type(self, _type):
return get_drawing_config_by_storage_form(self.directory, _type)
def get_search_config(self):
search_config = []
for r in get_search_config_list(self.directory):
if '<URL>' not in r.special_arguments:
Messager.warning(
'Project configuration: config error: missing <URL> specification for %s search.' %
r.storage_form())
else:
search_config.append(
(r.storage_form(), r.special_arguments['<URL>'][0]))
return search_config
def _get_tool_config(self, tool_list):
tool_config = []
for r in tool_list:
if '<URL>' not in r.special_arguments:
Messager.warning(
'Project configuration: config error: missing <URL> specification for %s.' %
r.storage_form())
continue
if 'tool' not in r.arguments:
Messager.warning(
'Project configuration: config error: missing tool name ("tool") for %s.' %
r.storage_form())
continue
if 'model' not in r.arguments:
Messager.warning(
'Project configuration: config error: missing model name ("model") for %s.' %
r.storage_form())
continue
tool_config.append((r.storage_form(),
r.arguments['tool'][0],
r.arguments['model'][0],
r.special_arguments['<URL>'][0]))
return tool_config
def get_disambiguator_config(self):
tool_list = get_disambiguator_config_list(self.directory)
return self._get_tool_config(tool_list)
def get_annotator_config(self):
# TODO: "annotator" is a very confusing term for a web service
# that does automatic annotation in the context of a tool
# where most annotators are expected to be human. Rethink.
tool_list = get_annotator_config_list(self.directory)
return self._get_tool_config(tool_list)
def get_normalization_config(self):
norm_list = get_normalization_config_list(self.directory)
norm_config = []
for n in norm_list:
if 'DB' not in n.arguments:
# optional, server looks in default location if None
n.arguments['DB'] = [None]
if '<URL>' not in n.special_arguments:
Messager.warning(
'Project configuration: config error: missing <URL> specification for %s.' %
n.storage_form())
continue
if '<URLBASE>' not in n.special_arguments:
# now optional, client skips link generation if None
n.special_arguments['<URLBASE>'] = [None]
norm_config.append((n.storage_form(),
n.special_arguments['<URL>'][0],
n.special_arguments['<URLBASE>'][0],
n.arguments['DB'][0]))
return norm_config
def get_entity_types(self):
return [t.storage_form() for t in get_entity_type_list(self.directory)]
def get_entity_type_hierarchy(self):
return get_entity_type_hierarchy(self.directory)
def get_relation_type_hierarchy(self):
return get_relation_type_hierarchy(self.directory)
def get_event_type_hierarchy(self):
return get_event_type_hierarchy(self.directory)
def get_attribute_type_hierarchy(self):
return get_attribute_type_hierarchy(self.directory)
def _get_filtered_attribute_type_hierarchy(self, types):
from copy import deepcopy
# TODO: This doesn't property implement recursive traversal
# and filtering, instead only checking the topmost nodes.
filtered = []
for t in self.get_attribute_type_hierarchy():
if t.storage_form() in types:
filtered.append(deepcopy(t))
return filtered
def attributes_for_types(self, types):
"""Returns list containing the attribute types that are applicable to
at least one of the given annotation types."""
# list to preserve order, dict for lookup
attribute_list = []
seen = {}
for t in types:
for a in self.attributes_for(t):
if a not in seen:
attribute_list.append(a)
seen[a] = True
return attribute_list
def get_entity_attribute_type_hierarchy(self):
"""Returns the attribute type hierarchy filtered to include only
attributes that apply to at least one entity."""
attr_types = self.attributes_for_types(self.get_entity_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def get_relation_attribute_type_hierarchy(self):
"""Returns the attribute type hierarchy filtered to include only
attributes that apply to at least one relation."""
attr_types = self.attributes_for_types(self.get_relation_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def get_event_attribute_type_hierarchy(self):
"""Returns the attribute type hierarchy filtered to include only
attributes that apply to at least one event."""
attr_types = self.attributes_for_types(self.get_event_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def preferred_display_form(self, t):
"""Given a storage form label, returns the preferred display form as
defined by the label configuration (labels.conf)"""
labels = get_labels_by_storage_form(self.directory, t)
if labels is None or len(labels) < 1:
return t
else:
return labels[0]
def is_physical_entity_type(self, t):
if t in self.get_entity_types() or t in self.get_event_types():
return t in self.get_entity_types()
# TODO: remove this temporary hack
if t in very_likely_physical_entity_types:
return True
return t in self.get_entity_types()
def is_event_type(self, t):
return t in self.get_event_types()
def is_relation_type(self, t):
return t in self.get_relation_types()
def is_equiv_type(self, t):
return t in self.get_equiv_types()
def is_configured_type(self, t):
return (t in self.get_entity_types() or
t in self.get_event_types() or
t in self.get_relation_types())
def type_category(self, t):
"""Returns the category of the given type t.
The categories can be compared for equivalence but offer no
other interface.
"""
if self.is_physical_entity_type(t):
return ENTITY_CATEGORY
elif self.is_event_type(t):
return EVENT_CATEGORY
elif self.is_relation_type(t):
return RELATION_CATEGORY
else:
# TODO: others
return UNKNOWN_CATEGORY
|
Python
|
CL
|
d7233df58b8b111b1b8ef14e2b2fb6b5d127371716390dc90a32144f4d938c81
|
# coding: utf-8
# Distributed under the terms of the MIT License.
""" This file defines some useful scraper functionality,
like custom errors and a scraper function wrapper.
"""
import glob
import os
import gzip
import traceback as tb
from matador.orm.spectral import VibrationalDOS, VibrationalDispersion
from matador.orm.spectral import ElectronicDOS, ElectronicDispersion
from matador.crystal import Crystal
MODEL_REGISTRY = {
"phonon_dos2dict": VibrationalDOS,
"phonon2dict": VibrationalDispersion,
"optados2dict": ElectronicDOS,
"bands2dict": ElectronicDispersion,
"castep2dict": Crystal,
"res2dict": Crystal,
"cif2dict": Crystal,
"magres2dict": Crystal,
"pwout2dict": Crystal,
}
def get_flines_extension_agnostic(fname, ext):
"""Try to open and read the filename provided, if it doesn't exist
then try adding the given file extension to it.
Parameters:
fname (str): the filename with or without extension.
ext (list of str or str): the extension or list of file extensions to try,
or None. Should not contain ".".
Raises:
FileNotFoundError: if the file was not found in either form.
Returns:
(list of str, str): the contents of the file and the filename.
"""
if isinstance(ext, str):
ext = [ext]
if ext is not None and not os.path.isfile(fname):
for exts in ext:
if not fname.endswith(exts):
_fname = f"{fname}.{exts}"
if os.path.isfile(_fname):
fname = _fname
break
try:
if fname.endswith(".gz"):
with gzip.open(fname, "r") as f:
flines = [line.decode("utf-8") for line in f.readlines()]
else:
try:
with open(fname, "r", encoding="utf-8") as f:
flines = f.readlines()
except Exception:
with open(fname, "r", encoding="latin1") as f:
flines = f.readlines()
except FileNotFoundError as exc:
if ext is not None:
raise FileNotFoundError(f"Neither {fname} or {fname}.{ext} could be found.")
raise exc
return flines, fname
def scraper_function(function):
"""Wrapper for scraper functions to handle exceptions and
template the scraper functions to work for multiples files
at once.
"""
from functools import wraps
@wraps(function)
def wrapped_scraper_function(*args, verbosity=1, fail_fast=False, **kwargs):
"""Wrap and return the scraper function, handling the
multiplicity of file names.
"""
if kwargs.get("no_wrap"):
return function(*args, **kwargs)
result = None
seed = args[0]
if isinstance(seed, str):
if "*" in seed and not kwargs.get("noglob"):
seed = sorted(glob.glob(seed))
else:
seed = [seed]
failures = []
cursor = []
if not seed:
print("Nothing to scrape.")
return
for _seed in seed:
# we can get away with this as each
# scraper function only has one arg
try:
result, success = function(_seed, verbosity=verbosity, **kwargs)
# UnicodeDecodeErrors require 5 arguments, so handle these separately
except (FileNotFoundError, UnicodeError) as oops:
raise oops
except Exception as oops:
success = False
result = type(oops)("{}: {}\n".format(_seed, oops))
if verbosity >= 1:
msg = "{}: {} {}".format(_seed, type(oops), oops)
print(msg)
if verbosity >= 2:
tb.print_exc()
if fail_fast:
raise oops
if len(seed) == 1:
if success and kwargs.get("as_model"):
orm = _as_model(result, function)
if orm is not None:
result = orm
if not success and verbosity >= 1:
print("Failed to scrape file {}".format(seed))
return result, success
if not success:
failures += [_seed]
else:
if kwargs.get("as_model"):
orm = _as_model(result, function, debug=kwargs.get("debug"))
cursor.append(orm)
if not kwargs.get("as_model") or orm is None:
cursor.append(result)
if verbosity >= 1:
print(
"Successfully scraped {} out of {} files.".format(
len(cursor), len(cursor) + len(failures)
)
)
return cursor, failures
return wrapped_scraper_function
def _as_model(doc, function, debug=True):
"""Convert the document to the appropriate orm model."""
model = MODEL_REGISTRY.get(function.__name__)
orm = None
if model is not None:
try:
orm = model(doc)
except Exception as exc:
if debug:
tb.print_exc()
print("Unable to convert scraped dict to model {}".format(model.__name__))
raise exc
else:
print(
"`as_model` keyword not supported for {}, not converting".format(
function.__name__
)
)
return orm
def f90_float_parse(val):
"""Wrapper to float that handles Fortran's horrible behaviour for
float exponents <= 100, e.g. 1e-100 -> 1.0000000-100 rather than
1.000000E-100. Also handles "+" signs in front of numbers.
Parameters:
val (str): the string to cast to a float.
"""
try:
return float(val)
except ValueError as exc:
# if the E is being displayed, then something else has gone wrong
if "E" in val:
raise exc
# if there's a minus sign after the first char, but no E...
if len(val) > 1 and "-" in val[1:]:
val = val[0] + val[1:].replace("-", "E-")
if val.startswith("+"):
val = val[1:]
return float(val)
class DFTError(Exception):
"""Quick DFT exception class for unconverged or
non-useful calculations.
"""
class ComputationError(Exception):
"""Raised when the calculation fails to do the DFT.
Distinct from DFTError as this is an issue of numerics
or chemistry, where this is raised for technical issues,
e.g. CASTEP crashes.
"""
|
Python
|
CL
|
0acee167e53749dfe7a8cf9d5b8d28519fce08076fe452efe2ddeead69fda601
|
import os
import logging
from datetime import datetime
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
import vaex
import vaex.ml
import vaex.ml.lightgbm
# Configure logging
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
# Stream the data from GCS
log.info('Reading in the data...')
df = vaex.open('gs://vaex-data/human-activity-recognition/phones_accelerometer.hdf5')
# Pre-fetch only the relevant column
log.info('Fetching relevant columns...')
columns_to_use = ['Arrival_Time', 'Creation_Time', 'x', 'y', 'z', 'gt']
df.nop(columns_to_use)
# Train and test split
log.info('Splitting the data into train, validation and test sets...')
df_train, df_val, df_test = df.split_random(into=[0.8, 0.1, 0.1], random_state=42)
# Feature engineering
log.info('Feature engineering...')
# Drop missing values form unlabeled activities
log.info('Drop missing values form unlabeled activities...')
df_train = df_train.dropna(column_names=['gt'])
log.info('Convert to spherical polar coordinates...')
df_train['r'] = ((df_train.x**2 + df_train.y**2 + df_train.z**2)**0.5).jit_numba()
df_train['theta'] = np.arccos(df_train.z / df_train.r).jit_numba()
df_train['phi'] = np.arctan2(df_train.y, df_train.x).jit_numba()
log.info('PCA transformation...')
df_train = df_train.ml.pca(n_components=3, features=['x', 'y', 'z'])
log.info('Create certain feature interactions...')
df_train['PCA_00'] = df_train.PCA_0**2
df_train['PCA_11'] = df_train.PCA_1**2
df_train['PCA_22'] = df_train.PCA_2**2
df_train['PCA_01'] = df_train.PCA_0 * df_train.PCA_1
df_train['PCA_02'] = df_train.PCA_0 * df_train.PCA_2
df_train['PCA_12'] = df_train.PCA_1 * df_train.PCA_2
log.info('Calculate some summary statistics per class...')
df_summary = df_train.groupby('gt').agg({'PCA_0_mean': vaex.agg.mean('PCA_0'),
'PCA_0_std': vaex.agg.std('PCA_0'),
'PCA_1_mean': vaex.agg.mean('PCA_1'),
'PCA_1_std': vaex.agg.std('PCA_1'),
'PCA_2_mean': vaex.agg.mean('PCA_2'),
'PCA_2_std': vaex.agg.std('PCA_2')
}).to_pandas_df().set_index('gt')
log.info('Define features based on the summary statistics per target class...')
for class_name in df_train.gt.unique():
feature_name = f'PCA_012_err_{class_name}'
df_train[feature_name] = ((np.abs(df_train.PCA_0 - df_summary.loc[class_name, 'PCA_0_mean']) / df_summary.loc[class_name, 'PCA_0_std']) +
(np.abs(df_train.PCA_1 - df_summary.loc[class_name, 'PCA_1_mean']) / df_summary.loc[class_name, 'PCA_1_std']) +
(np.abs(df_train.PCA_2 - df_summary.loc[class_name, 'PCA_2_mean']) / df_summary.loc[class_name, 'PCA_2_std'])).jit_numba()
log.info('Create features based on KMeans clustering...')
n_clusters = df_train.gt.nunique()
logging.info('Creating kmeans clustering features using the PCA components ...')
df_train = df_train.ml.kmeans(features=['PCA_0', 'PCA_1', 'PCA_2'],
n_clusters=n_clusters,
max_iter=1000,
n_init=5,
prediction_label='kmeans_pca')
logging.info('Creating kmeans clustering features using the interacting PCA components ...')
df_train = df_train.ml.kmeans(features=['PCA_01', 'PCA_02', 'PCA_12'],
n_clusters=n_clusters,
max_iter=1000,
n_init=5,
prediction_label='kmeans_pca_inter')
logging.info('Creating kmeans clustering features using the power PCA components ...')
df_train = df_train.ml.kmeans(features=['PCA_00', 'PCA_11', 'PCA_22'],
n_clusters=n_clusters,
max_iter=1000,
n_init=5,
prediction_label='kmeans_pca_power')
log.info('Create time feature...')
df_train['time_delta'] = df_train.Arrival_Time - df_train.Creation_Time
df_train = df_train.ml.max_abs_scaler(features=['time_delta'], prefix='scaled_')
log.info('Gather all the features that will be used for training the model...')
features = df_train.get_column_names(regex='x|y|z|r|theta|phi|PCA_|scaled_|kmeans_')
log.info('Encoding the target variable...')
target_encoder = df_train.ml.label_encoder(features=['gt'], prefix='enc_', transform=False)
df_train = target_encoder.transform(df_train)
target_mapper_inv = {key: value for value, key in target_encoder.labels_['gt'].items()}
# Apply the feature transformations to the validation set
# so it can be used while fitting the estimator
log.info('Applying the feature transformations on the validation set...')
df_val.state_set(df_train.state_get())
# Training the Estimator
log.info('Instantiating and configuring a LightGBM model...')
# Train a lightgbm model
params = {
'learning_rate': 0.5, # learning rate
'max_depth': 7, # max depth of the tree
'colsample_bytree': 0.8, # subsample ratio of columns when constructing each tree
'subsample': 0.8, # subsample ratio of the training instance
'reg_lambda': 3, # L2 regularisation
'reg_alpha': 1.5, # L1 regularisation
'min_child_weight': 1, # minimum sum of instance weight (hessian) needed in a child
'objective': 'softmax', # learning task objective
'num_class': 6, # number of target classes (if classification)
'random_state': 42, # fixes the seed, for reproducibility
'metric': 'multi_error' # the error metric
}
# Instantiate the booster model
booster = vaex.ml.lightgbm.LightGBMModel(features=features,
target='enc_gt',
prediction_name='pred',
num_boost_round=1000,
params=params)
history = {} # Dict in which to record the training history
# Start the training process
log.info('Training the LightGBM model...')
booster.fit(df=df_train,
valid_sets=[df_train, df_val],
valid_names=['train', 'val'],
early_stopping_rounds=15,
evals_result=history,
verbose_eval=True)
log.info('Obtain predictions for the training set...')
df_train = booster.transform(df_train)
log.info('Get the names of the predicted classes...')
df_train['pred_name'] = df_train.pred.apply(lambda x: target_mapper_inv[np.argmax(x)])
# Model evaluation
log.info('Evaluating the trained model...')
# Apply the full pipeline to the validation and test samples
df_test.state_set(df_train.state_get())
df_val.state_set(df_train.state_get())
# Get the scores
val_acc = accuracy_score(df_val.pred.values.argmax(axis=1), df_val.enc_gt.values)
test_acc = accuracy_score(df_test.pred.values.argmax(axis=1), df_test.enc_gt.values)
val_f1 = f1_score(df_val.pred.values.argmax(axis=1), df_val.enc_gt.values, average='micro')
test_f1 = f1_score(df_test.pred.values.argmax(axis=1), df_test.enc_gt.values, average='micro')
log.info('Evaluating the model performance...')
log.info(f'Validation accuracy: {val_acc:.3f}')
log.info(f'Validation f1-score: {val_f1:.3f}')
log.info(f'Test accuracy: {test_acc:.3f}')
log.info(f'Test f1-score: {test_f1:.3f}')
# Save the model to a GCP bucket - vaex can do this directly!
log.info('Saving the Vaex state file to a GCP bucket...')
bucket_name = 'gs://vaex-data'
folder_name = datetime.now().strftime('models/har_phones_accelerometer_%Y-%m-%dT%H:%M:%S')
model_name = 'state.json'
gcs_model_path = os.path.join(bucket_name, folder_name, model_name)
# Save only the columns that are needed in production
df_train[features + ['pred', 'pred_name']].state_write(gcs_model_path)
log.info(f'The model has been trained and is available in {bucket_name}.')
# THE END
|
Python
|
CL
|
9d46b8b0ecc5e18d6b524994f06ed0ff9cd395b2e5e49beba42ab4070baa3dae
|
"""
### **Execution Environment**
Working examples of all flavors of image specification.
An image can be:
* an existing image from Dockerhub or another image registry
* a python image with python requirements
* a dockerfile for full flexibility
Code can be added to an image:
* copy local code
* clone from git
* use COPY or ADD in a dockerfile
Live debug can be enabled with path_map:
* clone from git with a path_map
* use dockerfile with a path_map
[Companion tutorial here.](
https://medium.com/conducto/execution-environment-5a66ff0a10bc)
[Code for this pipeline here.](
https://github.com/conducto/demo/blob/main/cicd/execution_env.py)
"""
import conducto as co
pretty_table_script = """
from prettytable import PrettyTable
table = PrettyTable(field_names=["test", "result"])
table.add_row(["auth", "OK"])
table.add_row(["app", "FAILED"])
print(table)
"""
def existing_image() -> co.Exec:
"""Specify any existing image from Dockerhub or another image registry."""
image = co.Image("node:lts-alpine")
return co.Exec("npm help", image=image, doc=co.util.magic_doc())
def python_image_with_reqs_py() -> co.Exec:
"""Specify a python image and list requirements with `reqs_py`."""
image = co.Image("python:3.8-alpine", reqs_py=["PTable"])
return co.Exec(
f"python -c '{pretty_table_script}'", image=image, doc=co.util.magic_doc()
)
def dockerfile() -> co.Exec:
"""Specify a dockerfile for full flexibility in defining your image."""
image = co.Image(dockerfile="./docker/Dockerfile.simple")
return co.Exec(
f"python -c '{pretty_table_script}'", image=image, doc=co.util.magic_doc()
)
def copy_local_code() -> co.Exec:
"""Copy local code into your image with `copy_dir`."""
image = co.Image("python:3.8-alpine", copy_dir="./code")
return co.Exec("python test.py", image=image, doc=co.util.magic_doc())
def clone_from_git() -> co.Exec:
"""
Clone a git branch into your image with `copy_url` and `copy_branch`.
Your image or dockerfile must have git installed for this to work.
"""
git_url = "https://github.com/conducto/demo.git"
image = co.Image(
dockerfile="./docker/Dockerfile.git", copy_url=git_url, copy_branch="main",
)
return co.Exec("python cicd/code/test.py", image=image, doc=co.util.magic_doc())
def dockerfile_with_copy() -> co.Exec:
"""
You can COPY or ADD files directly in your dockerfile.
"""
image = co.Image(dockerfile="./docker/Dockerfile.copy", context=".")
return co.Exec("python /root/code/test.py", image=image, doc=co.util.magic_doc())
def clone_from_git_with_path_map() -> co.Exec:
"""
Enable livedebug by specifying `path_map`, mapping a local directory
to a directory in your checkout. A relative path in the key is relative
to the location of this script. A relative path in the value is relative
to the root directory of the git repo.
"""
git_url = "https://github.com/conducto/demo.git"
path_map = {".": "cicd"}
image = co.Image(
dockerfile="./docker/Dockerfile.git",
copy_url=git_url,
copy_branch="main",
path_map=path_map,
)
return co.Exec("python cicd/code/test.py", image=image, doc=co.util.magic_doc())
def dockerfile_with_path_map() -> co.Exec:
"""
Enable livedebug by specifying `path_map`, mapping a local directory
to a directory in the container. A relative path in the key is relative
to the location of this script. The value must be an absolute path.
"""
path_map = {"./code": "/root/code"}
image = co.Image(
dockerfile="./docker/Dockerfile.copy", context=".", path_map=path_map
)
return co.Exec("python /root/code/test.py", image=image, doc=co.util.magic_doc())
def examples() -> co.Parallel:
ex = co.Parallel(doc=__doc__)
ex["existing_image"] = existing_image()
ex["python_image_with_reqs_py"] = python_image_with_reqs_py()
ex["dockerfile"] = dockerfile()
ex["copy_local_code"] = copy_local_code()
ex["clone_from_git"] = clone_from_git()
ex["dockerfile_with_copy"] = dockerfile_with_copy()
ex["clone_from_git_with_path_map"] = clone_from_git_with_path_map()
ex["dockerfile_with_path_map"] = dockerfile_with_path_map()
return ex
if __name__ == "__main__":
print(__doc__)
co.Image.share_directory("CONDUCTO_DEMO", "..")
co.main(default=examples)
|
Python
|
CL
|
8896cd0c69cce17386bced02272d4c196a0fe54a387d4b4fc80d5eb8813ae9be
|
#!/usr/bin/env python
import sys, math, os, glob
import re as regex
import argparse
import toml
import numpy as np
import h5py
import asteval
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
from matplotlib.backends.backend_pdf import PdfPages
from pbpl.units import *
from .core import setup_plot
def get_parser():
parser = argparse.ArgumentParser(
description='Make field animation from HDF5 dump')
parser.add_argument(
'conf', metavar='CONF',
help='Configuration file (e.g., field-anim.toml)')
return parser
def get_args():
parser = get_parser()
args = parser.parse_args()
args.conf = toml.load(args.config_filename)
return args
def plot_frame(output, args, aeval):
fig = plot.figure(figsize=(244.0/72, 140.0/72))
ax = fig.add_subplot(1, 1, 1)
ax.plot(
aeval(args.xaxis_value), aeval(args.yaxis_value),
marker='o', ls='', markersize=3, markeredgewidth=0,
color='#0083b8', alpha=0.5)
ax.set_xlabel(args.xaxis_title, labelpad=0.0)
ax.set_ylabel(args.yaxis_title, labelpad=0.0)
ax.set_xlim(args.xaxis_min, args.xaxis_max)
ax.set_ylim(args.yaxis_min, args.yaxis_max)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
output.savefig(fig, transparent=True)
def get_sim_steps(path):
A = glob.glob(path + '_[0-9]*.h5')
m = regex.compile('.*_([0-9]+).h5$')
result = [int(y.groups()[0]) for y in [m.match(x) for x in A] ]
result.sort()
return result
def main():
args = get_args()
sys.exit()
available_steps = get_sim_steps(args.input_path)
if args.input_i1 == -1:
args.input_i1 = available_steps[-1]
steps = range(args.input_i0, args.input_i1, args.input_istep)
setup_plot()
# create safe interpreter for evaluation of scale expressions
aeval = asteval.Interpreter()
import pbpl.units
for x in pbpl.units.__all__:
aeval.symtable[x] = pbpl.units.__dict__[x]
output = PdfPages(args.output_filename)
for i in steps:
f = h5py.File('{}_{:04}.h5'.format(args.input_path, i), 'r')
particles = f['particles'].value.T
N = args.input_num_particles
if N == -1:
N = particles.shape[1]
particles = particles[:, 0:N]
# reference particle
m0 = f['mass'].value * (GeV/c_light**2)
p0 = f['pz'].value * (GeV/c_light)
# beta0 = p0 / np.sqrt((m0 * c_light)**2 + p0**2)
# gamma0 = 1 / np.sqrt(1 - beta0**2)
gamma0 = np.sqrt(1 + (p0/(m0 * c_light))**2)
beta0 = np.sqrt(1 - (1/gamma0**2))
x = particles[0] * meter
px = particles[1] * p0
y = particles[2] * meter
py = particles[3] * p0
cdt = particles[4] * meter
deltap = particles[5] * p0
zeta = -beta0 * cdt
p = p0 + deltap
pz = np.sqrt(p**2 - px**2 - py**2)
dpz = pz - p0
gamma = np.sqrt(1 + (p/(m0 * c_light))**2)
beta = np.sqrt(1 - (1/gamma**2))
vz = (p0 + deltap) / (gamma * m0)
v0 = beta0 * c_light
print(px/p0)
print(py/p0)
print(dpz/p0)
# sys.exit()
aeval.symtable['m0'] = m0
aeval.symtable['p0'] = p0
aeval.symtable['gamma0'] = gamma0
aeval.symtable['beta0'] = beta0
aeval.symtable['x'] = x
aeval.symtable['px'] = px
aeval.symtable['y'] = y
aeval.symtable['py'] = py
aeval.symtable['zeta'] = zeta
aeval.symtable['deltap'] = deltap
aeval.symtable['p'] = p
aeval.symtable['pz'] = pz
aeval.symtable['dpz'] = dpz
aeval.symtable['vz'] = vz
aeval.symtable['v0'] = v0
plot_frame(output, args, aeval)
f.close()
output.close()
if __name__ == '__main__':
sys.exit(main())
|
Python
|
CL
|
f132781467585c9ba7c2ca2571dc014a23f3109fbaa6f102af7b7138c220b238
|
#! /usr/bin/env python3.3
"""Common functions for all P4GF server implementations."""
from collections import namedtuple
from contextlib import contextmanager, ExitStack
import functools
import logging
import os
import random
import re
import signal
import sys
import time
import p4gf_atomic_lock
from P4 import Map, P4Exception
import p4gf_branch
import p4gf_config
import p4gf_const
import p4gf_context
import p4gf_copy_p2g
import p4gf_create_p4
import p4gf_mem_gc
import p4gf_git
import p4gf_git_repo_lock
from p4gf_git_swarm import GSReviewCollection
import p4gf_group
import p4gf_init_host
import p4gf_imports
import p4gf_init
from p4gf_init_repo import InitRepo, InitRepoMissingView, InitRepoReadOnly
import p4gf_p4key as P4Key
from p4gf_l10n import _, NTR
import p4gf_lock
import p4gf_log
import p4gf_p4msg
import p4gf_p4msgid
import p4gf_p4spec
from p4gf_prl_file import PRLFile
import p4gf_proc
from p4gf_profiler import Timer, with_timer
import p4gf_protect
from p4gf_repolist import RepoList
import p4gf_translate
import p4gf_util
from p4gf_util import CommandError
import p4gf_version_3
import p4gf_read_permission
import p4gf_lock_status
LOG = logging.getLogger(__name__)
CMD_GIT_UPLOAD_PACK = "git-upload-pack" # aka fetch/pull/clone
CMD_GIT_RECEIVE_PACK = "git-receive-pack" # aka push
CMD_LFS_OBJECTS = "objects" # aka Git LFS pre-push metadata
COMMAND_TO_PERM = {
CMD_GIT_UPLOAD_PACK: p4gf_group.PERM_PULL,
CMD_GIT_RECEIVE_PACK: p4gf_group.PERM_PUSH,
# All LFS requests must be treated as "pull" until we can determine
# exactly where in the depot (repo and branch) the files are going.
CMD_LFS_OBJECTS: p4gf_group.PERM_PULL
}
class ServerCommonException(Exception):
"""base class for exceptions that don't require logging."""
pass
class BadRequestException(ServerCommonException):
"""bad args or similar."""
pass
class PerforceConnectionFailed(ServerCommonException):
"""trouble with p4 connection."""
pass
class SpecialCommandException(ServerCommonException):
"""requested repo is actually a special command."""
pass
class RepoNotFoundException(ServerCommonException):
"""requested repo does not exist."""
pass
class RepoInitFailedException(ServerCommonException):
"""problem initializing a repo."""
pass
class MissingSubmoduleImportUrlException(ServerCommonException):
"""repo has Stream imports but no configured ssh-url."""
pass
class ReadOnlyInstanceException(ServerCommonException):
"""Git Fusion instance is configured to reject pushes."""
pass
class TerminatingException(ServerCommonException):
"""A terminating signal was received during pull/push processing."""
pass
class ExceptionAuditLogger:
"""Print errors to standard channels, then propagate."""
def __init__(self):
"""Initialize the logger."""
pass
def __enter__(self):
"""On enter do nothing."""
return self
def __exit__(self, exc_type, exc_value, _traceback):
"""On exit, log exceptions in some cases."""
# Skip calls to exit().
if exc_type == SystemExit:
return False
# Skip logging of certain known exception types
if isinstance(exc_value, ServerCommonException):
return False
if exc_value:
msg = "{}".format(exc_value)
# Improve the readability of the error message for the client (GF-2311).
msg = msg.replace('\\t', '\t').replace('\\n', '\n')
sys.stderr.write(msg + '\n')
if hasattr(exc_value, "usage") and exc_value.usage:
print(exc_value.usage)
return False # False = do not squelch. Propagate
def check_lfs_enabled_maps_top_level(ctx):
"""If LFS is enabled for this repo, views must map top level in all branches. """
if ctx.is_lfs_enabled and not ctx.check_branches_map_top_level():
raise RuntimeError(
_('Perforce: Improperly configured branch views.'
'\n LFS is enabled for this repo, but at least one branch does'
'\n not map the top level directory for .gitattributes).'))
class Server(object):
"""base class for Git Fusion servers."""
def __init__(self):
"""Init the Server object."""
self.p4 = None
self.user = None
self.foruser = None
self.repo_name_git = None
self.command = None
self.git_caller = None
self.skip_perms_check = False
self.poll_only = False
self.repo_name = None
self.repo_perm = None
self.git_dir = None
self._repo_config = None
self._should_remove_atomic_lock = False
def before(self):
"""override to do setup before process."""
pass
def after(self):
"""override to do cleanup after process."""
pass
def push_received(self):
"""Return True if the push payload is being received.
Some protocols, such as HTTP, receive multiple requests during a
push operation, only one of which is the actual push payload. We
need to know when that is the case, and the server implementations
must override this method to provide that information.
"""
# pylint:disable=no-self-use
return False
def record_access(self):
"""Record the access of the repository in the audit log."""
pass
def record_error(self, msg):
"""Record the given error message to the audit log.
:param str msg: the error message to record.
"""
pass
@property
def repo_config(self):
"""Fetch or create the repo configuration file, if not already loaded."""
if self._repo_config is None:
self._repo_config = p4gf_config.RepoConfig.from_depot_file(
self.repo_name, self.p4, create_if_missing=True)
return self._repo_config
@with_timer('Setup')
def _setup(self):
"""do setup; no lock required."""
LOG.debug(p4gf_log.memory_usage())
p4gf_util.has_server_id_or_exit(log=LOG)
p4gf_util.reset_git_enviro()
#
# Initialize the external process launcher early, before allocating
# lots of memory, and just after all other conditions have been
# checked.
#
# Do this _after_ changing to the git working tree, as it seems to
# be rather difficult to correct this later using the 'cwd'
# argument to the subprocess.Popen() constructor, at least on
# Linux systems.
#
# Also do this before creating any P4 connections, as that seems to
# effect whether temporary clients are automatically deleted or not.
#
p4gf_proc.init()
self.p4 = p4gf_create_p4.create_p4_temp_client()
if not self.p4:
raise PerforceConnectionFailed()
LOG.debug("connected to P4: %s", self.p4)
p4gf_group.invalidate_groups_i()
p4gf_branch.init_case_handling(self.p4)
self._get_repo_name_and_foruser()
self._check_readiness()
self._check_lock_perm()
self._check_protects()
self.check_special_command()
self._check_valid_command()
self._check_gf_depot_permissions()
self.check_user_exists()
self._check_perms()
self._init_system()
self.check_lfs_enabled()
self._write_motd()
def _init_system(self):
"""Initialize the Git Fusion system."""
# Create Git Fusion server depot, user, config. NOPs if already created.
p4gf_init.init(self.p4)
@with_timer('Server process')
def process(self):
"""process the request."""
exit_code = 1
with ExitStack() as stack:
stack.enter_context(ExceptionAuditLogger())
stack.enter_context(p4gf_create_p4.Closer())
stack.enter_context(run_before_after(self))
stack.enter_context(gc_debug())
stack.enter_context(log_start_end(self))
self._setup()
ctx = p4gf_context.create_context(self.repo_name)
ctx.p4gf = self.p4
ctx.foruser = self.foruser
stack.enter_context(ctx)
ctx.log_context()
self._init_host(ctx)
# N.B. this temporarily takes both exclusive locks, if needed
# N.B. for the LFS server, this function does nothing
repo_created = self._init_repo(ctx)
check_lfs_enabled_maps_top_level(ctx)
# Change into the git working directory. Not all git commands
# react well to the --work-tree option.
self.git_dir = ctx.repo_dirs.GIT_DIR
os.chdir(ctx.repo_dirs.GIT_WORK_TREE)
stack.enter_context(raise_on_sigterm())
try:
exit_code = self.process_request(ctx, repo_created)
except TerminatingException:
# The design of the "raise on sigterm" handler is to raise
# an exception when SIGTERM is received, which should cause
# all of the "process" functions to clean up and remove any
# locks. We then quietly exit to avoid causing grief with
# the T4 tests running in ElectricCommander.
LOG.warning('terminating signal received, exiting quietly')
return exit_code
def process_request(self, ctx, repo_created):
"""Handle the incoming request, now that everything is set up.
:param ctx: Git Fusion context.
:param bool repo_created: True if repo was just created now.
:return: exit status code (usually zero).
"""
try:
rollback_prl(ctx, blocking=False)
except p4gf_git_repo_lock.LockBusy:
# This will happen often, do not fail fast, just log and move on.
LOG.info("repo %s busy at this time, no rollback attempted", self.repo_name)
if CMD_GIT_UPLOAD_PACK in self.command or self.poll_only:
# This is a pull request.
update_only_on_poll = self.repo_config.getboolean(
p4gf_config.SECTION_PERFORCE_TO_GIT,
p4gf_config.KEY_UPDATE_ONLY_ON_POLL)
if p4gf_const.READ_ONLY:
# Retrieve whatever is available in the object cache.
exit_code = self._process_readonly(ctx)
elif update_only_on_poll and not self.poll_only:
# The antithesis of polling: no update at all, serve
# whatever is available in the repo right now.
exit_code = self._call_git(ctx)
else:
# Normal fetch or a poll.
exit_code = self._process_upload(ctx, repo_created)
else:
# This is a push request.
if p4gf_const.READ_ONLY:
raise ReadOnlyInstanceException(_('Push to read-only instance prohibited'))
exit_code = self._process_receive(ctx, repo_created)
return exit_code
def _process_upload(self, ctx, _repo_created):
"""Service the git-upload-pack (fetch) request.
:param ctx: Git Fusion context.
:param repo_created: True if repo was created in this request.
:return: exit code of the git command, or OK if poll_only is True.
"""
log = LOG.getChild('upload')
# Acquire the git read lock _before_ getting the p4key lock.
with p4gf_git_repo_lock.read_lock(self.repo_name) as waited_on_writer:
# Attempt to acquire the p4key lock to gain exclusive access to
# the Git Fusion repository so that we may perform the Perforce
# to Git translation. Failing that, we will skip the p4-to-git
# phase and simply let git-upload-pack return the currently
# available data to the client.
log.debug('read commencing for {}'.format(self.repo_name))
if not waited_on_writer:
repo_lock = p4gf_lock.RepoLock(self.p4, self.repo_name, blocking=False)
try:
repo_lock.acquire()
ctx.repo_lock = repo_lock
lock_acquired = True
except p4gf_lock.LockBusy:
lock_acquired = False
try:
if lock_acquired:
# Set the lock(s) to "blocking" so that the release
# will succeed despite any momentary contention for
# access to the owners key.
repo_lock.blocking = True
repo_lock.lock.blocking = True
log.debug('p2g commencing for {}'.format(self.repo_name))
ctx.checkpoint("server_common:acquire_lock")
# Upgrade to a writer lock when copying to git; this is
# safe as we already have the p4key lock and at worst
# would have to wait for any newly arrived readers to
# finish their requests. Those other readers would fail
# to acquire the p4key lock and thus would not take
# this path through the code, hence no deadlock. Since
# we have the read lock and the p4key lock, any other
# writer will be put on hold until we are done.
# If not polling, do not block on getting the write lock.
write_lock = p4gf_git_repo_lock.write_lock(
self.repo_name, upgrade=True, blocking=self.poll_only)
try:
with ExitStack() as stack:
stack.enter_context(Timer('with Lock'))
stack.enter_context(write_lock)
# We now have the exclusive write lock and the p4key
# lock and can perform the p4-to-git translation.
self._copy_p2g(ctx)
ctx.update_changes_since_last_seen()
except p4gf_git_repo_lock.LockBusy:
# Oh well, this fetch will possibly be missing the latest changes.
# But at least we responded quickly, without waiting on anything.
LOG.debug("skipping p4-to-git due to contention on {}".format(
self.repo_name))
finally:
if lock_acquired:
# Give up the p4key lock _before_ invoking git to respond to client.
ctx.checkpoint("server_common:releasing_lock")
# Disconnect while we have the lock so temp clients are removed.
ctx.disconnect()
repo_lock.release()
if self.poll_only:
# Record the event of accessing the repo for the audit log.
self.record_access()
code = os.EX_OK
else:
log.debug('delegating to git for {}'.format(self.repo_name))
code = self._call_git(ctx)
return code
def _process_receive(self, ctx, _repo_created):
"""Service the git-receive-pack (push) request.
:param ctx: Git Fusion context.
:param repo_created: True if repo was created in this request.
:return: exit code of the git command.
"""
def cleanup(repo_lock, msg):
"""Clean up after error."""
# Do not wait to release the lock, just do it immediately. We
# cannot make any assumptions about what git-receive-pack and
# our hooks are doing right now, we can only exit as quickly
# and cleanly as possible.
repo_lock.ignore_pending_acquire = True
# When something bad happens, remove the atomic lock.
self._maybe_remove_atomic_lock(ctx)
# Record the failure in the push status key.
ctx.record_push_failed_p4key(msg)
with ExitStack() as stack:
repo_lock = stack.enter_context(self._acquire_both_write_locks())
self._should_remove_atomic_lock = True
ctx.repo_lock = repo_lock
# We now have the exclusive write lock and the p4key lock and
# can perform the p4-to-git and git-to-p4 translations.
stack.enter_context(Timer('with Lock'))
ctx.checkpoint("server_common:acquire_lock")
try:
self._increment_push_counter(ctx)
# In the push case, engage the atomic view lock.
# Do this BEFORE copying Perforce to Git, to avoid a race
# condition with new Perforce changelists coming in after we
# finish the copy.
p4gf_atomic_lock.lock_update_repo_reviews(ctx, action=p4gf_atomic_lock.ADD)
self._copy_p2g(ctx)
# Ensure the atomic lock is removed if the p4key lock is
# released without successfully transferring it to the
# background process (hence the two callbacks).
release_cb = functools.partial(self._maybe_remove_atomic_lock, ctx)
repo_lock.set_lock_release_cb(release_cb)
repo_lock.set_transfer_complete_cb(self._lock_transfer_cb)
# The git-to-p4 translation will happen in post-receive.
_set_pre_receive_flag(ctx)
stack.enter_context(Timer('call git'))
ec = self._call_git(ctx)
if ec:
msg = _('git-receive-pack returned {error_code}').format(error_code=ec)
LOG.error('{} for {}'.format(msg, self.repo_name))
cleanup(repo_lock, msg)
sys.stderr.write(msg + "\n")
elif _detect_pre_receive_flag(ctx):
# Treat the "nop" as a success, but record in the log exactly
# what happened for easier debugging when a push seems to fail.
LOG.info('push {} had nothing to do (pre-receive not invoked)'.format(
ctx.push_id))
ctx.record_push_success_p4key()
ctx.update_changes_since_last_seen()
ctx.checkpoint("server_common:releasing_lock")
return ec
except: # pylint:disable=bare-except
LOG.exception('receive failed')
cleanup(repo_lock, sys.exc_info()[1])
raise
def _process_readonly(self, ctx):
"""Service the git-upload-pack (fetch) request on a read-only instance.
:param ctx: Git Fusion context.
:return: exit code of the git command.
"""
with p4gf_git_repo_lock.read_lock(self.repo_name):
# Upgrade to a writer lock when writing to the git repo.
with p4gf_git_repo_lock.write_lock(self.repo_name, upgrade=True):
# Ensure our repository is up to date with the mirror.
try:
stream_imports = p4gf_imports.StreamImports(ctx)
if stream_imports.missing_submodule_import_url():
raise MissingSubmoduleImportUrlException
p4gf_copy_p2g.copy_p2g_ctx(ctx)
stream_imports.process()
except p4gf_imports.ReadOnlyException as e:
raise ReadOnlyInstanceException(str(e))
except InitRepoReadOnly as e:
raise ReadOnlyInstanceException(str(e))
p4gf_git.set_bare(True)
code = self._call_git(ctx)
return code
@contextmanager
def _acquire_both_write_locks(self):
"""Acquire both the write lock and p4key lock without causing deadlock.
In particular, acquire the locks in the established order (file
lock first, then p4key lock), releasing the file lock if the p4key
lock is busy, and waiting briefly before trying again. This allows
the background push process to operate without holding the file
lock, and yet have an incoming push wait on that background push to
complete. Without this, the incoming push would temporarily block
the background push, get both the file and p4key locks, and then
fail to acquire the atomic lock, and reject the push (which is a
bad user experience).
"""
try:
repo_lock = p4gf_lock.RepoLock(self.p4, self.repo_name, blocking=False)
while True:
# Acquire the write lock first, then try to get the p4key
# lock. If that lock is busy, release the write lock and
# sleep for a brief time before trying again.
p4gf_git_repo_lock.acquire_write_lock(self.repo_name)
try:
repo_lock.acquire()
# Set the lock(s) to "blocking" so that the release
# will succeed despite any momentary contention for
# access to the owners key.
repo_lock.blocking = True
repo_lock.lock.blocking = True
break
except p4gf_lock.LockBusy:
p4gf_git_repo_lock.remove_write_lock(self.repo_name)
# sleep randomly between 1/4 and 1 second
time.sleep(random.randrange(1, 4) / 4.0)
yield repo_lock
finally:
repo_lock.release()
p4gf_git_repo_lock.remove_write_lock(self.repo_name)
def _get_repo_name_and_foruser(self):
"""Extract foruser from url if present; get translated repo name."""
foruser_patt = re.compile('@foruser=([^ @]+)')
m = foruser_patt.search(self.repo_name_git)
if m:
self.foruser = m.group(1)
repo_name_git = self.repo_name_git[:m.start(0)] + self.repo_name_git[m.end(0):]
LOG.debug('foruser: %s', self.foruser)
else:
repo_name_git = self.repo_name_git
# translate '/' ':' ' ' .. etc .. for internal repo_name
# and figure out if repo is 'repo' or 'repo.git'
self.repo_name = p4gf_translate.TranslateReponame.url_to_repo(repo_name_git, self.p4)
p4gf_log.configure_for_repo(self.repo_name)
LOG.debug("public repo_name: %s internal repo_name: %s",
repo_name_git, self.repo_name)
def _check_readiness(self):
"""Check that P4GF is ready for accepting connections from clients."""
prevent_session = P4Key.get_all(self.p4, p4gf_const.P4GF_P4KEY_PREVENT_NEW_SESSIONS + '*')
trigger_version = P4Key.get(self.p4, p4gf_const.P4GF_P4KEY_TRIGGER_VERSION)
# Check if the "prevent further access" p4key has been set, and raise an
# error if the p4key is anything other than zero.
if prevent_session:
every_instance = prevent_session.get(p4gf_const.P4GF_P4KEY_PREVENT_NEW_SESSIONS, '0')
key_name = p4gf_const.P4GF_P4KEY_PREVENT_NEW_SESSIONS + '-' + p4gf_util.get_server_id()
our_instance = prevent_session.get(key_name, '0')
if every_instance != '0' or our_instance != '0':
raise RuntimeError(_('Git Fusion is shutting down. Please contact your admin.'))
# Check that GF submit trigger is installed and has a compatible version.
trigger_version_p4key = trigger_version.split(":")[0].strip() if trigger_version else '0'
if trigger_version_p4key != p4gf_const.P4GF_TRIGGER_VERSION:
LOG.error("Incompatible trigger version: {0} should be {1} but got {2}".format(
p4gf_const.P4GF_P4KEY_TRIGGER_VERSION,
p4gf_const.P4GF_TRIGGER_VERSION, trigger_version_p4key))
if trigger_version_p4key == '0':
raise RuntimeError(_('Git Fusion submit triggers are not installed.'
' Please contact your admin.'))
else:
raise RuntimeError(_('Git Fusion submit triggers need updating.'
' Please contact your admin.'))
def _raise_p4gf_perm(self):
"""User-visible permission failure."""
p4gf_util.raise_gfuser_insufficient_perm(p4port=self.p4.port)
def _check_lock_perm(self):
"""Permission check: see if git-fusion-user has adequate permissions to use locks."""
# try deleting a p4 key to see if we can. If so, we have enough rights
# to be able to use locks. P4 checks perms before checking if the key
# actually exists, so it doesn't need to be a key that actually exists.
try:
P4Key.delete(self.p4, p4gf_const.P4GF_P4KEY_PERM_CHECK)
except P4Exception:
# expect a protect error if we don't have access to use keys
if p4gf_p4msg.contains_protect_error(self.p4):
self._raise_p4gf_perm()
return
if p4gf_p4msg.find_msgid(self.p4, p4gf_p4msgid.MsgDm_NoSuchKey):
return
raise
def _check_protects(self):
"""Check that the protects table does not deny the Git Fusion user.
It must either be empty or grant the Git Fusion user sufficient privileges.
Return False if this is not the case.
"""
if not p4gf_version_3.p4d_supports_protects(self.p4):
self._raise_p4gf_perm()
def check_lfs_enabled(self):
"""Validate repo configuration if processing an LFS request.
If we're processing a Git LFS request, but the current repo is not
configured to allow Git LFS requests, reject.
Cannot check until after you load the repo config.
"""
pass
def _check_gf_depot_permissions(self):
"""Verify P4GF_USER has admin access to //P4GF_DEPOT/...
Fetch the admin permisions and raise perm exception
if any of the test files are not mapped.
The set of tested files are those required the given repo_name
"""
# init admin filter
gf_client_map = Map()
gf_client_map.insert("//{0}/...".format(p4gf_const.P4GF_DEPOT), "//client/...")
gf_admin_filter = p4gf_protect.UserToProtect(self.p4).user_to_protect(
p4gf_const.P4GF_USER).map_for_perm(p4gf_protect.ADMIN)
gf_admin_filter = Map.join(gf_admin_filter, gf_client_map)
# Exhaustive list of RW paths required by GF for a given repo
files_to_test = ["//{0}/branch_info/foo",
"//{0}/branches/{1}/foo",
"//{0}/objects/repos/{1}/foo",
"//{0}/objects/trees/foo",
"//{0}/p4gf_config",
"//{0}/repos/{1}/p4gf_config",
"//{0}/repos/{1}/p4gf_config2",
"//{0}/users/p4gf_usermap"
]
for f in files_to_test:
f = f.format(p4gf_const.P4GF_DEPOT, # pylint: disable=too-many-format-args
self.repo_name)
if not gf_admin_filter.includes(f):
LOG.error("check_gf_depot_permissions FAILED test {0}".format(f))
self._raise_p4gf_perm()
def check_special_command(self):
"""If repo is actually a special command, run it and raise SpecialCommandException."""
if not self.repo_name.startswith('@'):
return
SpecialCommandHandler.create(self).run()
raise SpecialCommandException()
def _check_valid_command(self):
"""Verify requested command is valid."""
if self.command not in COMMAND_TO_PERM:
LOG.debug("command %s not in %s", self.command, COMMAND_TO_PERM)
raise BadRequestException(_("Unrecognized service\n"))
def _check_user_exists(self, user):
"""Check that the user actually exists."""
if not p4gf_p4spec.spec_exists(self.p4, 'user', user):
form = _('User {user} does not exist in Perforce. Please contact your admin.')
msg = form.format(user=user)
self.record_error(msg)
raise RuntimeError(msg)
if not p4gf_p4spec.spec_values_match(self.p4, 'user', user, {'Type': 'standard'}):
form = _("Perforce: User '{user}' has invalid 'Type'."
" User Type must be 'standard'. Please contact your admin.")
msg = form.format(user=user)
self.record_error(msg)
raise RuntimeError(msg)
def check_user_exists(self):
"""Check that the user(s) actually exist."""
self._check_user_exists(self.user)
if self.foruser:
self._check_user_exists(self.foruser)
def _raise_perms_error(self):
"""Raise an exception for insufficient permissions."""
msg = _("User '{user}' not authorized for '{command}' on '{repo}'.").format(
user=self.repo_perm.p4user_name, command=self.command, repo=self.repo_name)
# if user permissions prevent the pull provide verbose message.
if self.repo_perm.user_read_permission_checked and self.repo_perm.error_msg:
msg += self.repo_perm.error_msg
self.record_error(msg)
raise CommandError(msg)
def _check_perms(self):
"""Check that user has permission to run the command.
If not, raise an exception.
We use the translated internal repo name here for perm authorization
"""
if self.skip_perms_check:
return
required_perm = COMMAND_TO_PERM[self.command]
# first, check foruser if set. This will leave self.repo_perm set
# to RepoPerm for foruser, so any error can be reported
if self.foruser:
LOG.debug("_check_perms for {}".format(self.foruser))
if not self.check_permissions(required_perm, user=self.foruser):
self._raise_perms_error()
# if that worked, check for authenticated user, resetting self.repo_perm
if not self.check_permissions(required_perm):
self._raise_perms_error()
def check_permissions(self, required_perm, repo_name=None, user=None):
"""Check that user has permission to run the command.
We use the translated internal repo name here for perm authorization.
:type required_perm: str
:param required_perm: either p4gf_group.PERM_PULL or p4gf_group.PERM_PUSH
:type repo_name: str
:param repo_name: name of repository, or None to use default.
:rtype: bool
:return: True if access permitted, False otherwise.
"""
user = user or self.user
if repo_name is None:
repo_name = self.repo_name
self.repo_perm = p4gf_group.RepoPerm.for_user_and_repo(
self.p4, user, repo_name, required_perm)
LOG.debug2('check_permissions() require %s perm %s', required_perm, self.repo_perm)
if self.repo_perm.can(required_perm): # check group permissions
if required_perm == p4gf_group.PERM_PULL:
# if group grants permissions - then check for user read perms
if p4gf_read_permission.user_has_read_permissions(
self.p4, self.repo_perm, required_perm):
LOG.debug2('check_permissions() read accept perm %s', self.repo_perm)
return True
else: # PERM_PUSH
LOG.debug2('check_permissions() push accept perm %s', self.repo_perm)
return True
if p4gf_read_permission.can_create_depot_repo(self.p4, repo_name):
LOG.debug2('check_permissions() create accept perm %s', self.repo_perm)
return True
LOG.debug2('check_permissions() reject perm %s', self.repo_perm)
return False
def _write_motd(self):
"""If there is a .git-fusion/motd.txt file, return it on stderr."""
motd = self._read_motd()
if motd:
sys.stderr.write(motd)
@staticmethod
def _read_motd():
"""If there is a message of the day file, return its contents.
If not, return None.
"""
p4gf_dir = p4gf_const.P4GF_HOME
motd_file_path = p4gf_const.P4GF_MOTD_FILE.format(P4GF_DIR=p4gf_dir)
if not os.path.exists(motd_file_path):
return None
with open(motd_file_path, 'r') as f:
content = f.read()
return content
def _init_repo(self, ctx):
"""Create Git Fusion per-repo client view mapping and config.
:return: True if repo created, False otherwise.
"""
LOG.debug("ensuring repo {} is initialized".format(self.repo_name))
repo_initer = InitRepo(self.p4, None).set_repo_name(self.repo_name)
if repo_initer.is_init_needed():
# Set the context first, and then the config, using the init
# repo setters so that they are coordinated.
repo_initer.context = ctx
# Lazily fetch/create the repo configuration.
repo_initer.set_repo_config(self.repo_config)
with ExitStack() as stack:
# Temporarily ignore SIGTERM while we take out several locks.
stack.enter_context(ignore_sigterm())
LOG.debug("_init_repo() initializing repo {}".format(self.repo_name))
# Hold all the locks while initializing this repository and
# its configuration, as well as possibly modifying the
# group membership.
stack.enter_context(p4gf_git_repo_lock.write_lock(self.repo_name))
# If in read-only mode, do not block trying to acquire the
# p4key lock and exit immediately with an error message.
blocking = not p4gf_const.READ_ONLY
repo_lock = p4gf_lock.RepoLock(self.p4, self.repo_name, blocking=blocking)
try:
stack.enter_context(repo_lock)
except p4gf_lock.LockBusy:
raise ReadOnlyInstanceException(_("Repo currently busy, try again later."))
stack.enter_context(Timer('with Lock'))
try:
repo_initer.repo_lock = repo_lock
# Do not permit initialization from scratch during
# pull/push when in read-only instance mode.
repo_initer.set_fail_when_read_only(True)
repo_created = repo_initer.init_repo()
except InitRepoMissingView as e:
LOG.debug('InitRepoMissingView')
raise RepoNotFoundException(str(e))
except RuntimeError as e:
LOG.exception('repo initialization failed')
raise RepoInitFailedException(str(e))
# If authorization came from default, not explicit group
# membership, copy that authorization to a group now. Could
# not do this until after p4gf_init_repo() has a chance to
# create not-yet-existing groups.
if self.repo_perm:
self.repo_perm.write_if(self.p4)
return repo_created
if self.repo_perm and self.repo_perm.needs_write():
# Not convinced we need the lock for modifying the group,
# but since this will be rare, it is an acceptable cost.
with p4gf_lock.RepoLock(self.p4, self.repo_name):
self.repo_perm.write_if(self.p4)
return False
def _init_host(self, ctx):
"""Initialize the Git Fusion host for this repo, if needed."""
if p4gf_init_host.is_init_needed(ctx.repo_dirs):
with p4gf_git_repo_lock.write_lock(self.repo_name):
p4gf_init_host.init_host(ctx.repo_dirs, ctx)
@with_timer('copy to Git')
def _copy_p2g(self, ctx):
"""Copy any recent changes from Perforce to Git."""
try:
# Since we fetch the configuration before we acquire the
# exclusive lock, there is a chance that another pull/push has
# modified/created that configuration.
# Now we have the exclusive lock, preventing submit to
# config/config2, Refresh if a new file revision for config or
# config2 submitted since we originally loaded but before we
# acquired the lock.
self.repo_config.refresh_if(self.p4, create_if_missing=True)
ctx.repo_config = self.repo_config
stream_imports = p4gf_imports.StreamImports(ctx)
if stream_imports.missing_submodule_import_url():
raise MissingSubmoduleImportUrlException
p4gf_copy_p2g.copy_p2g_ctx(ctx)
stream_imports.process()
# Now is also an appropriate time to clear out any stale Git
# Swarm reviews. We're pre-pull, pre-push, time when we've
# got exclusive write access to the Git repo,
GSReviewCollection.delete_refs_for_closed_reviews(ctx)
p4gf_git.set_bare(True)
except:
# Dump failure to log, BEFORE cleanup, just in case
# cleanup ALSO fails and throws its own error (which
# happens if we're out of memory).
LOG.exception('copy p2g failed')
p4gf_lock_status.print_lock_status(ctx.p4gf, p4gf_util.get_server_id(), LOG.error)
raise
def _increment_push_counter(self, ctx):
"""Increment the push counter value now that the push is underway."""
# Increment if we are getting the actual push payload.
if self.push_received():
id_key_name = P4Key.calc_repo_push_id_p4key_name(self.repo_name)
push_id = P4Key.increment(ctx.p4gf, id_key_name)
msg = _("Push {push_id} started").format(push_id=push_id)
ctx.record_push_status_p4key(msg)
def _lock_transfer_cb(self):
"""Lock ownership was transferred successfully."""
# Lock was successfully transferred to the background process, do
# _not_ release the atomic lock in this "foreground" process.
LOG.debug2('canceling repo lock removal')
self._should_remove_atomic_lock = False
def _maybe_remove_atomic_lock(self, ctx):
"""Remove the atomic lock for the repo."""
if self._should_remove_atomic_lock:
LOG.debug('removing atomic-lock for %s', self.repo_name)
# In the event of a preflight error, it is highly likely the
# connection has been dropped, so temporarily reconnect.
with ExitStack() as stack:
if not ctx.p4gf.connected():
p4gf_create_p4.p4_connect(ctx.p4gf)
stack.callback(p4gf_create_p4.p4_disconnect, ctx.p4gf)
p4gf_atomic_lock.lock_update_repo_reviews(ctx, action=p4gf_atomic_lock.REMOVE)
def _call_git(self, ctx):
"""Delegate to the appropriate Git command defined in git_caller.
Call git (e.g. git-upload-pack, git-receive-pack) while keeping reviews updated.
Returns the exit code of the Git command.
"""
# Record the event of transferring information (i.e. audit log).
self.record_access()
retval = None
# Detach git repo's HEAD before calling original git, otherwise we
# won't be able to push the current branch (if any).
if not p4gf_git.is_bare_git_repo():
p4gf_git.checkout_detached_head()
# Flush stderr before returning control to Git. Otherwise Git's own
# output might interrupt ours.
sys.stderr.flush()
# Keep the idle P4 connection open during call to git since we have
# to keep the temporary client (and likewise the lock) alive.
LOG.debug('_call_git() delegating to git for {}'.format(ctx.config.repo_name))
# Ignore the SIGTERM signal during this particularly delicate stage
# in the process. Allowing this signal to get through to Git could
# possibly result in the pre-receive hook running but not the post-
# receive hook, which may leave the git references ahead of the
# last translated commit. This is very difficult to detect and
# prevent after the fact. For instance, HTTP push seemingly always
# ends up with Git references that are ahead of Perforce changes.
# The same happens with a push that introduces a change rejected by
# a trigger.
with ignore_sigterm():
retval = self.git_caller(ctx) # pylint:disable=not-callable
LOG.debug('_call_git() returning {}'.format(retval))
return retval
# functions and log messages for tag or branch rollback in _rollback_prt().
RollbackHow = namedtuple("RollbackHow",
[ "log_nop"
, "log_force", "func_force"
, "log_delete", "func_delete"
])
_ROLLBACK_BRANCH = RollbackHow(
"git branch no change required"
, "git branch -f {ref:<20} {old_sha1}", p4gf_git.force_branch_ref
, "git branch -D {ref:<20}", p4gf_git.delete_branch_ref )
_ROLLBACK_TAG = RollbackHow(
"git tag no change required"
, "git tag -f {ref:<20} {old_sha1}", p4gf_git.force_tag_ref
, "git tag -d {ref:<20}", p4gf_git.delete_tag_ref )
def _rollback_prt(prt, how):
"""Roll one tag or branch ref back, with logging."""
curr_sha1 = p4gf_util.git_rev_list_1(prt.ref)
if curr_sha1 is None:
curr_sha1 = p4gf_const.NULL_COMMIT_SHA1
prefix = "rollback: prt={prt:<60} curr={curr_sha1} "\
.format(prt=prt, curr_sha1=p4gf_util.abbrev(curr_sha1))
# Surely we have a "strip /refs/xxx/ prefix" utility function somewhere?
short_ref = prt.ref
for p in ["refs/heads/", "refs/tags/"]:
if short_ref.startswith(p):
short_ref = short_ref[len(p):]
break
if curr_sha1 == prt.old_sha1:
LOG.warning(prefix + how.log_nop)
elif prt.old_sha1 == p4gf_const.NULL_COMMIT_SHA1:
LOG.warning(prefix + how.log_delete.format(ref=short_ref))
how.func_delete(short_ref)
else:
LOG.warning(prefix + how.log_force.format(ref=short_ref, old_sha1=prt.old_sha1))
how.func_force(short_ref, prt.old_sha1)
def rollback_prl(ctx, blocking):
"""If a prl file exists, then a previous push has failed. Git might have
refs that point to history not yet copied to Perforce. Roll those
refs back to where they were before the previous, failed, push.
NOP if no prl file: post-receive took over and (should have) finished
the translation to Perforce.
:param blocking: if True, will block until performing rollback,
if false, raise LockBusy.
Returns with Git repo checked out to master-ish branch (if any),
since rollback_prl() has to detach head to change any branch refs.
"""
prlfile = PRLFile(ctx.config.repo_name)
prl = prlfile.read()
if not prl:
return
with p4gf_git_repo_lock.write_lock(ctx.config.repo_name, upgrade=False, blocking=blocking):
# When we can get the lock, then log as an error, otherwise it
# means we had lock contention, which is not an error.
LOG.warning("rollback: checking Git refs after previous push failed: {}"
.format(ctx.config.repo_name))
# Re-fetch prl AFTER we hold the lock. Can't trust pre-lock data
# that some other process might have changed out from under us.
prl = prlfile.read()
if not prl:
return
if not p4gf_git.is_bare_git_repo():
p4gf_git.checkout_detached_head()
for prt in prl.heads():
_rollback_prt(prt, _ROLLBACK_BRANCH)
for prt in prl.tags():
_rollback_prt(prt, _ROLLBACK_TAG)
prlfile.delete()
if not p4gf_git.is_bare_git_repo():
ctx.checkout_master_ish()
@contextmanager
def run_before_after(server):
"""Wrap something with calls to before() and after()."""
server.before()
try:
yield
finally:
server.after()
@contextmanager
def log_start_end(server):
"""Log the start and end of the processing of a request."""
LOG.info('process-start of {} for {} on {}'.format(
server.command, server.user, server.repo_name_git))
try:
yield
finally:
LOG.info('process-end of {} for {} on {}'.format(
server.command, server.user, server.repo_name_git))
@contextmanager
def gc_debug():
"""Yield to caller with garbage collection debugging, if enabled."""
p4gf_mem_gc.init_gc()
try:
yield
finally:
p4gf_mem_gc.process_garbage(NTR('at end of server process'))
@contextmanager
def raise_on_sigterm():
"""Raise an exception if the SIGTERM signal is received."""
def raise_exception(_signum, _frame):
"""Raise an exception."""
LOG.exception("SIGTERM received. Raising exception.")
# Avoid receiving another SIGTERM again while we are trying to exit
# cleanly already, so ignore any further SIGTERM signals.
signal.signal(signal.SIGTERM, signal.SIG_IGN)
raise TerminatingException('SIGTERM received')
LOG.debug("raise_on_sigterm() will raise exception on SIGTERM")
term_handler = signal.signal(signal.SIGTERM, raise_exception)
try:
yield
finally:
if term_handler:
signal.signal(signal.SIGTERM, term_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
@contextmanager
def ignore_sigterm():
"""Temporarily ignore the SIGTERM signal."""
if 'REMOTE_ADDR' in os.environ:
LOG.debug("ignore_sigterm() not ignoring SIGTERM because http")
# For HTTP, secretly do nothing as we really need to exit when
# Apache sends us a SIGTERM, lest it employs SIGKILL after we fail
# to politely shut down in a timely manner.
yield
else:
LOG.debug("ignore_sigterm() ignoring SIGTERM signal")
term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
try:
yield
finally:
if term_handler:
signal.signal(signal.SIGTERM, term_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
class StatusReporter:
"""Reports current repo status occassionally."""
def __init__(self, p4, repo_name, wait_push_id, active_push_id):
"""Construct a new instance with the given wait time."""
self.last_time = time.time()
self.p4 = p4
self.repo_name = repo_name
self.wait_push_id = wait_push_id
self.active_push_id = active_push_id
self.last_status = None
def maybe_report(self):
"""If it has been a while, report the current status."""
new_time = time.time()
# check for updated status every 5 seconds
if new_time - self.last_time > 5:
self.last_time = new_time
msg = _status_for_push(self.p4, self.repo_name, self.wait_push_id, self.active_push_id)
if msg != self.last_status:
self.last_status = msg
sys.stderr.write("{}\n".format(msg))
sys.stderr.flush()
class SpecialCommandHandler:
"""Base class for handlers of special commands."""
def __init__(self, server):
"""Init the handler."""
self.server = server
self.repo_name = server.repo_name
self.p4 = server.p4
self.user = server.user
self.foruser = server.foruser
@staticmethod
def create(server):
"""Factory method for SpecialCommandHandler.
:param server: instance of Server.
:return: instance of SpecialCommandHandler.
"""
special_names = [
(p4gf_const.P4GF_UNREPO_HELP, SpecialCommandHelp),
(p4gf_const.P4GF_UNREPO_INFO, SpecialCommandInfo),
(p4gf_const.P4GF_UNREPO_LIST, SpecialCommandList),
(p4gf_const.P4GF_UNREPO_FEATURES, SpecialCommandFeatures),
(NTR("@mirror_wait"), SpecialCommandMirrorWait) # deprecated special
]
command = server.repo_name
for (s, f) in special_names:
if s == command:
return f(server)
special_patterns = [
(p4gf_const.P4GF_UNREPO_WAIT, SpecialCommandWaitRepo),
(p4gf_const.P4GF_UNREPO_PROGRESS, SpecialCommandProgressRepo),
(p4gf_const.P4GF_UNREPO_FEATURES, SpecialCommandFeatureRepo),
(p4gf_const.P4GF_UNREPO_STATUS, SpecialCommandStatusRepo),
(p4gf_const.P4GF_UNREPO_CONFIG, SpecialCommandConfigRepo)
]
for (s, f) in special_patterns:
if re.compile("^" + s + "@").match(command):
repo_name = command[len(s)+1:]
try:
repo_name, wait_id, active_id = _parse_push_identifiers(server.p4, repo_name)
except RuntimeError as e:
# use the normal process to report the error
return SpecialCommandError(server, s, str(e))
return f(server, repo_name, wait_id, active_id)
# Did not match any known command, fall back to listing supported commands.
return SpecialCommandHandler(server)
def run(self):
"""Print the list of special commands."""
# pylint:disable=no-self-use
special_cmds = " ".join(p4gf_const.P4GF_UNREPO)
sys.stderr.write(
_("Git Fusion: unrecognized special command.\n"
"Valid commands are: {commands}\n")
.format(commands=special_cmds))
class SpecialCommandError(SpecialCommandHandler):
"""Reports an error in parsing the special command."""
def __init__(self, server, command, error_msg):
"""Init the handler."""
SpecialCommandHandler.__init__(self, server)
self.command = command
self.error_msg = error_msg
def run(self):
"""Print the error message and write an error to the log."""
LOG.warning("{} unable to proceed: {}".format(self.command, self.error_msg))
sys.stderr.write(self.error_msg + "\n")
class SpecialCommandHelp(SpecialCommandHandler):
"""Processes the '@help' special command."""
def run(self):
"""Dump the contents of the help.txt file, if it exists."""
# pylint:disable=no-self-use
help_text = p4gf_util.read_bin_file('help.txt')
if help_text is False:
sys.stderr.write(_("file 'help.txt' not found\n"))
else:
sys.stderr.write(help_text)
class SpecialCommandInfo(SpecialCommandHandler):
"""Processes the '@info' special command."""
def run(self):
"""Print version information to stderr."""
sys.stderr.write(p4gf_version_3.as_string_extended(p4=self.p4, include_checksum=True))
class SpecialCommandList(SpecialCommandHandler):
"""Processes the '@list' special command."""
def run(self):
"""Print a list of known repositories to stderr."""
def _merge_perm(p1, p2):
"""Return the lower permission of p1 and p2."""
if p1 == NTR('pull') or p2 == NTR('pull'):
return NTR('pull')
return NTR('push')
def _merge_lists(rl1, rl2):
"""merge two repo lists.
Result contains only repos that are in both lists.
perm for each repo in result is the lower of that from the two lists.
"""
result = []
for r1 in rl1:
for r2 in rl2:
if r1[0] != r2[0]:
continue
result.append([
r1[0],
_merge_perm(r1[1], r2[1]),
r1[2],
r1[3]
])
result.sort(key=lambda tup: tup[0])
return result
def _format_repo(r):
"""format info for a single repo."""
# pylint can't see the nested {width}
# pylint:disable=unused-format-string-argument
return ("{name:<{width}} {perm} {charset:<10} {desc}"
.format(width=width,
name=p4gf_translate.TranslateReponame.repo_to_git(r[0]),
perm=r[1],
charset=r[2],
desc=r[3]))
try:
self.server.check_user_exists()
repos = RepoList.list_for_user(self.p4, self.user).repos
if self.foruser:
repos2 = RepoList.list_for_user(self.p4, self.foruser).repos
repos = _merge_lists(repos, repos2)
if len(repos):
width = max(len(r[0]) for r in repos)
sys.stderr.write("\n".join([_format_repo(r) for r in repos]) + "\n")
else:
sys.stderr.write(_('no repositories found\n'))
except RuntimeError:
LOG.exception('repo list retrieval failed')
sys.stderr.write(_('no repositories found\n'))
class SpecialCommandFeatures(SpecialCommandHandler):
"""Processes the '@features' special command."""
def run(self):
"""Print a list of all available features."""
# pylint:disable=no-self-use
sys.stderr.write(_('Available features:\n'))
for k in p4gf_config.configurable_features():
sys.stderr.write("{} : {}\n".format(k, p4gf_config.FEATURE_KEYS[k]))
class SpecialCommandHandlerRepo(SpecialCommandHandler):
"""Base class for special commands targeting a single repo.
Some (but not all) of these commands also take wait_id and active_id.
"""
def __init__(self, server, repo_name, wait_id, active_id):
"""Init the handler."""
SpecialCommandHandler.__init__(self, server)
self.repo_name = repo_name
self.wait_push_id = wait_id
self.active_push_id = active_id
class SpecialCommandProgressRepo(SpecialCommandHandlerRepo):
"""Processes the '@progress@repo' special command."""
def run(self):
"""Wait for a repo lock and report changes to the repo status."""
if self.server.check_permissions(p4gf_group.PERM_PULL, self.repo_name):
_wait_for_push(self.p4, self.repo_name, self.wait_push_id, self.active_push_id,
progress=True)
else:
LOG.warning("@progress unable to report status for {} to {}".format(
self.repo_name, self.user))
sys.stderr.write(_('Status not available due to permissions\n'))
class SpecialCommandWaitRepo(SpecialCommandHandlerRepo):
"""Processes the '@wait@repo' special command."""
def run(self):
"""Wait for repo lock to be released."""
if self.server.check_permissions(p4gf_group.PERM_PULL, self.repo_name):
_wait_for_push(self.p4, self.repo_name, self.wait_push_id, self.active_push_id)
else:
LOG.warning("@wait unable to report status for {} to {}".format(
self.repo_name, self.user))
sys.stderr.write(_('Status not available due to permissions\n'))
class SpecialCommandFeatureRepo(SpecialCommandHandlerRepo):
"""Processes the '@features@repo' special command."""
def run(self):
"""Report which features are enabled for a repo."""
config = p4gf_config.RepoConfig.from_depot_file(self.repo_name, self.p4)
sys.stderr.write(_("Enabled features for repo '{repo_name}':\n")
.format(repo_name=self.repo_name))
for k in p4gf_config.configurable_features():
sys.stderr.write("{} : {}\n".format(
k, config.is_feature_enabled(k)))
class SpecialCommandStatusRepo(SpecialCommandHandlerRepo):
"""Processes the '@status@repo' special command."""
def run(self):
"""Show the status of a push operation."""
if self.server.check_permissions(p4gf_group.PERM_PULL, self.repo_name):
msg = _status_for_push(self.p4, self.repo_name, self.wait_push_id, self.active_push_id)
sys.stderr.write("{}\n".format(msg))
else:
LOG.warning("@status denied for {} on {}".format(self.user, self.repo_name))
sys.stderr.write(_('Permission denied for @status\n'))
class SpecialCommandConfigRepo(SpecialCommandHandlerRepo):
"""Dump the configuration of the named repository."""
def run(self):
"""Dump the repository configuration."""
if self.server.check_permissions(p4gf_group.PERM_PULL, self.repo_name):
try:
config = p4gf_config.RepoConfig.from_depot_file(self.repo_name, self.p4)
config_content = p4gf_config.to_text('', config.repo_config)
sys.stderr.write(config_content)
except p4gf_config.ConfigLoadError as err:
sys.stderr.write(_('Unable to load configuration: {}\n').format(err))
else:
LOG.warning("@config denied for {} on {}".format(self.user, self.repo_name))
sys.stderr.write(_('Permission denied for @config\n'))
class SpecialCommandMirrorWait(SpecialCommandHandler):
"""Processes the '@mirror_wait' special command."""
def run(self):
"""Warn user that @mirror_wait is no longer implemented."""
# pylint:disable=no-self-use
sys.stderr.write(_('@mirror_wait is no longer implemented, try @wait instead\n'))
def _status_for_push(p4, repo_name, wait_push_id, active_push_id):
"""Return the status for the requested push, or something reasonable.
:param p4: instance of P4API.
:param repo_name: name fo the repository for which to retrieve status.
:param wait_push_id: push identifier of interest.
:param active_push_id: identifier of the active push.
"""
# check for the push-specific status key, if any
n_key_name = P4Key.calc_repo_status_p4key_name(repo_name, wait_push_id)
result = P4Key.get(p4, n_key_name)
if result and result != '0':
return result
# otherwise, retrieve the current status, if appropriate
if wait_push_id == active_push_id:
key_name = P4Key.calc_repo_status_p4key_name(repo_name)
result = P4Key.get(p4, key_name)
if result and result != '0':
return result
# and if that didn't work, just return some generic message
if wait_push_id != '0':
# try to keep this text the same as in record_push_success_p4key()
return _('Push {push_id} completed successfully').format(
push_id=wait_push_id)
return _('No status available')
def _parse_push_identifiers(p4, repo_name):
"""Return a tuple of adjusted repository name, requested push id, and active push id.
:param p4: instance of P4API.
:param repo_name: name fo the repository for which to retrieve status; may contain
the requested push identifier (e.g. repo_name@push_id).
Returns a 3-tuple of repository name, requested push identifer, and active push id.
If no push identifier was requested, the active push identifier is substituted.
The returned repository name will also have been translated.
"""
if '@' in repo_name:
(repo_name, wait_push_id) = repo_name.rsplit('@', 1)
try:
if int(wait_push_id) < 1:
raise ValueError()
except ValueError:
raise RuntimeError(_("Push identifier must be a positive integer."))
else:
wait_push_id = None
repo_name = p4gf_translate.TranslateReponame.url_to_repo(repo_name, p4)
key_name = P4Key.calc_repo_push_id_p4key_name(repo_name)
active_push_id = P4Key.get(p4, key_name)
if wait_push_id is None:
wait_push_id = active_push_id
elif int(wait_push_id) > int(active_push_id):
raise RuntimeError(_("Push {push_id} unrecognized\n")
.format(push_id=wait_push_id))
return repo_name, wait_push_id, active_push_id
def _wait_for_push(p4, repo_name, wait_push_id, active_push_id, progress=False):
"""Wait for the completion of a push on a particular repository.
:param p4: P4API instance.
:param str repo_name: name of repository.
:param int wait_push_id: push identifier on which to wait.
:param int active_push_id: identifier of active push.
:param bool progress: True to display progress of push periodically.
"""
if wait_push_id == active_push_id:
LOG.debug("checking for active lock on {}".format(repo_name))
# quick check to see if the lock is held or not
lock_key_name = p4gf_const.P4GF_P4KEY_LOCK_VIEW_OWNERS.format(repo_name=repo_name)
lock_value = P4Key.get(p4, lock_key_name)
if lock_value and lock_value != '0':
LOG.debug("waiting for lock on {}".format(repo_name))
# wait for either the lock status or the active push id to change
sys.stderr.write(_("Waiting for push {push_id}...\n".format(push_id=wait_push_id)))
sys.stderr.flush()
key_name = P4Key.calc_repo_push_id_p4key_name(repo_name)
label = "@wait on {}".format(repo_name)
wait_reporter = p4gf_log.LongWaitReporter(label, LOG)
status_reporter = StatusReporter(p4, repo_name, wait_push_id, active_push_id)
while lock_value and lock_value != '0' and wait_push_id == active_push_id:
time.sleep(1)
lock_value = P4Key.get(p4, lock_key_name)
active_push_id = P4Key.get(p4, key_name)
wait_reporter.been_waiting()
if progress:
status_reporter.maybe_report()
LOG.debug("active push changed or lock released for {}".format(
repo_name))
# retrieve the status while we're here
msg = _status_for_push(p4, repo_name, wait_push_id, active_push_id)
sys.stderr.write("{}\n".format(msg))
def _set_pre_receive_flag(ctx):
"""Write a file that pre-receive hook should delete when it runs."""
fname = os.path.join(ctx.repo_dirs.repo_container, p4gf_const.P4GF_PRE_RECEIVE_FLAG)
with open(fname, 'w') as fobj:
fobj.write(str(os.getpid()))
def _detect_pre_receive_flag(ctx):
"""Check for the file that indicates whether pre-receive ran or not.
:return: True if file still exists, indicating pre-receive did _not_ run.
"""
fname = os.path.join(ctx.repo_dirs.repo_container, p4gf_const.P4GF_PRE_RECEIVE_FLAG)
return os.path.exists(fname)
|
Python
|
CL
|
d3a4c8ca8b1a9b2d28836d1991e1314e0d2e15e48b8d517ebbed6cc7d318cf0c
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 16:02:26 2021
@author: Syed Muhammmad Hamza
"""
from keras.layers import merge, Dropout, Dense, Lambda, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras import backend as K
"""
Implementation of Inception-Residual Network v1 [Inception Network v4 Paper](http://arxiv.org/pdf/1602.07261v1.pdf) in Keras.
"""
def inception_resnet_stem(input):
# Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
c = Convolution2D(32, 3, 3, activation='relu', subsample=(2, 2))(input)
c = Convolution2D(32, 3, 3, activation='relu', )(c)
c = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(c)
c1 = MaxPooling2D((3, 3), strides=(2, 2))(c)
c2 = Convolution2D(96, 3, 3, activation='relu', subsample=(2, 2))(c)
m = merge([c1, c2], mode='concat', concat_axis=channel_axis)
c1 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
c1 = Convolution2D(96, 3, 3, activation='relu', )(c1)
c2 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
c2 = Convolution2D(64, 7, 1, activation='relu', border_mode='same')(c2)
c2 = Convolution2D(64, 1, 7, activation='relu', border_mode='same')(c2)
c2 = Convolution2D(96, 3, 3, activation='relu', border_mode='valid')(c2)
m2 = merge([c1, c2], mode='concat', concat_axis=channel_axis)
p1 = MaxPooling2D((3, 3), strides=(2, 2), )(m2)
p2 = Convolution2D(192, 3, 3, activation='relu', subsample=(2, 2))(m2)
m3 = merge([p1, p2], mode='concat', concat_axis=channel_axis)
m3 = BatchNormalization(axis=1)(m3)
m3 = Activation('relu')(m3)
return m3
def inception_resnet_v2_A(input, scale_residual=True):
# Input is relu activation
init = input
ir1 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(ir2)
ir3 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)
ir3 = Convolution2D(48, 3, 3, activation='relu', border_mode='same')(ir3)
ir3 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(ir3)
ir_merge = merge([ir1, ir2, ir3], concat_axis=channel_axis, mode='concat')
ir_conv = Convolution2D(384, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=1)(out)
out = Activation("relu")(out)
return out
def inception_resnet_v2_B(input, scale_residual=True):
# Input is relu activation
init = input
ir1 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(128, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(160, 1, 7, activation='relu', border_mode='same')(ir2)
ir2 = Convolution2D(192, 7, 1, activation='relu', border_mode='same')(ir2)
ir_merge = merge([ir1, ir2], mode='concat', concat_axis=channel_axis)
ir_conv = Convolution2D(1152, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=1)(out)
out = Activation("relu")(out)
return out
def inception_resnet_v2_C(input, scale_residual=True):
# Input is relu activation
init = input
ir1 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(224, 1, 3, activation='relu', border_mode='same')(ir2)
ir2 = Convolution2D(256, 3, 1, activation='relu', border_mode='same')(ir2)
ir_merge = merge([ir1, ir2], mode='concat', concat_axis=channel_axis)
ir_conv = Convolution2D(2144, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=1)(out)
out = Activation("relu")(out)
return out
def reduction_A(input, k=192, l=224, m=256, n=384):
r1 = MaxPooling2D((3,3), strides=(2,2))(input)
r2 = Convolution2D(n, 3, 3, activation='relu', subsample=(2,2))(input)
r3 = Convolution2D(k, 1, 1, activation='relu', border_mode='same')(input)
r3 = Convolution2D(l, 3, 3, activation='relu', border_mode='same')(r3)
r3 = Convolution2D(m, 3, 3, activation='relu', subsample=(2,2))(r3)
m = merge([r1, r2, r3], mode='concat', concat_axis=channel_axis)
m = BatchNormalization(axis=1)(m)
m = Activation('relu')(m)
return m
def reduction_resnet_v2_B(input):
r1 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input)
r2 = Convolution2D(256, 1, 1, activation='relu', border_mode='same')(input)
r2 = Convolution2D(384, 3, 3, activation='relu', subsample=(2,2))(r2)
r3 = Convolution2D(256, 1, 1, activation='relu', border_mode='same')(input)
r3 = Convolution2D(288, 3, 3, activation='relu', subsample=(2, 2))(r3)
r4 = Convolution2D(256, 1, 1, activation='relu', border_mode='same')(input)
r4 = Convolution2D(288, 3, 3, activation='relu', border_mode='same')(r4)
r4 = Convolution2D(320, 3, 3, activation='relu', subsample=(2, 2))(r4)
m = merge([r1, r2, r3, r4], concat_axis=channel_axis, mode='concat')
m = BatchNormalization(axis=1)(m)
m = Activation('relu')(m)
return m
def create_inception_resnet_v2(input, nb_output=7, scale=True):
# Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
x = inception_resnet_stem(input)
# 5 x Inception Resnet A
x = inception_resnet_v2_A(x, scale_residual=scale)
x = inception_resnet_v2_A(x, scale_residual=scale)
x = inception_resnet_v2_A(x, scale_residual=scale)
x = inception_resnet_v2_A(x, scale_residual=scale)
x = inception_resnet_v2_A(x, scale_residual=scale)
# Reduction A
x = reduction_A(x, k=256, l=256, m=384, n=384)
# 10 x Inception Resnet B
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
x = inception_resnet_v2_B(x, scale_residual=scale)
# Reduction Resnet B
x = reduction_resnet_v2_B(x)
# 5 x Inception Resnet C
x = inception_resnet_v2_C(x, scale_residual=scale)
x = inception_resnet_v2_C(x, scale_residual=scale)
x = inception_resnet_v2_C(x, scale_residual=scale)
x = inception_resnet_v2_C(x, scale_residual=scale)
x = inception_resnet_v2_C(x, scale_residual=scale)
# Average Pooling
x = AveragePooling2D((8,8))(x)
# Dropout
x = Dropout(0.8)(x)
x = Flatten()(x)
# Output
x = Dense(output_dim=nb_output, activation='softmax')(x)
return x
if __name__ == "__main__":
from keras.layers import Input
from keras.models import Model
from keras.utils.visualize_util import plot
ip = Input(shape=(3, 299, 299))
inception_resnet_v2 = create_inception_resnet_v2(ip, scale=True)
model = Model(ip, inception_resnet_v2)
|
Python
|
CL
|
aceca5172043b90253a1f26a739beb1e0bcd9f7158c0bc2c763f929fe651fd4e
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
class ImageRecognizer:
def __init__(self, path, graph, labels, input_layer_name, output_layer_name, num_top_predictions):
self.path = path
self.graph = graph
self.labels = labels
self.input_layer_name = input_layer_name
self.output_layer_name = output_layer_name
self.num_top_predictions = num_top_predictions
def load_image(self, filename):
"""Read in the image_data to be classified."""
return tf.gfile.FastGFile(filename, 'rb').read()
def load_labels(self, filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.gfile.GFile(filename)]
def load_graph(self, filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def run_graph(self, image_data, labels, input_layer_name, output_layer_name,
num_top_predictions):
#print ("r_g()")
with tf.Session() as sess:
# Feed the image_data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: image_data})
# Sort to show labels in order of confidence
top_k = predictions.argsort()[-self.num_top_predictions:][::-1]
values = {}
#print ("run_graph() 2")
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id].item()
print('%s (score = %.5f)' % (human_string, score))
values[human_string] = score
return values
def recognize(self, image):
image_data = self.load_image(image)
labels = self.load_labels(self.path + self.labels)
self.load_graph(self.path+ self.graph)
#print ("recognize()")
return self.run_graph(image_data, labels, self.input_layer_name, self.output_layer_name,
self.num_top_predictions)
|
Python
|
CL
|
3e956ac922c734b85c6b09046de4f2141d5ecc7b0040e301fc8de18d48be1530
|
# -*- coding: utf-8 -*-
r"""Kinked WLC with twist and fixed ends
This module calculates statistics for a series of worm-like chains with twist (DNA linkers) connected
by kinks imposed by nucleosomes. Calculations include R^2, Kuhn length, propogator matrices,
full Green's function for the end-to-end distance of the polymer, and looping statistics. For a detailed
derivation of how these calculations are carried out, refer to Deepti's notes.
#TODO move anything matching "def plot" to the plotting module (or testing
module), adjust code to reference correct members of fluctuations module.
"""
import re
import inspect
import pickle
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
from scipy import stats
from scipy import sparse
from scipy import special
from scipy.integrate import quad
from scipy.optimize import curve_fit
from scipy.interpolate import splint
from scipy.interpolate import splrep
from . import utils
from . import wignerD as wd
from . import geometry as ncg
from . import rotations as ncr
from . import data as ncd
from . import linkers as ncl
from .linkers import convert
from MultiPoint import propagator
from MultiPoint import WLCgreen
from multiprocessing import Pool
from filelock import Timeout, FileLock
import sys
###{{{
# """Constants"""
tau_d_nm = ncg.dna_params['tau_d']/ncg.dna_params['lpb']
"""naked DNA twist density in rad/nm"""
default_lt = 100 / ncg.dna_params['lpb']
"""twist persistance length of DNA in bp: 100 nm, or ~301 bp"""
default_lp = 50 / ncg.dna_params['lpb']
"""bend persistance length of DNA in bp: 50 nm, or ~150 bp"""
Klin = np.linspace(0, 10**5, 20000)
Klog = np.logspace(-3, 5, 10000)
Kvals = np.unique(np.concatenate((Klin, Klog)))
#convert to little k -- units of inverse bp (this results in kmax = 332)
kvals = Kvals / (2*default_lp)
"""Good values to use for integrating our Green's functions. If the lp of the
bare chain under consideration changes, these should change."""
###}}}
###{{{
# R^2 and Kuhn length calculations
def build_B_matrices_for_R2(link, alpha, beta, gamma, lt=default_lt, lp=default_lp, tau_d=ncg.dna_params['tau_d'], lmax=2):
r"""Helper function to construct propogator B matrices for a single linker length link with kink
rotation given by alpha, beta, gamma. Returns the following three matrices:
.. math::
B^{(n)} = \lim_{k \to 0}\frac{d^n B}{dk^n}
for n = 0, 1, and 2. where B is defined as
.. math::
B^{l_f j_f}_{l_0 j_0} = \sqrt{\frac{8\pi^2}{2l_f+1}} {\mathcal{D}}^{j_f j_0}_{l_f}(-\gamma, -\beta, -\alpha) g^{j_0}_{l_f l_0}
B^{(n)}[I_f, I_0] = M[I_f, I_0] * g^{(n)}[I_f, I_0]
The g matrix is used for the linker propogator, and the M matrix represents the rotation due to the kink.
All matrices are super-indexed by B[If, I0] where :math:`I(l, j) = l^2 + l + j`. :math:`I` can take on :math:`l^2+2l+1` possible values.
Notes
-----
Andy adds 1 to the above formula for :math:`I` since his script is in Matlab, which is
one-indexed.
Parameters
----------
link : float
linker length in bp
lt : float
twist persistence length in bp
lp : float
DNA persistence length in bp
tau_d : float
twist density of naked DNA in rad/bp
lmax : int
maximum eigenvalue l for which to compute wigner D' (default lmax = 2)
Returns
-------
[B0, B1, B2] : (3,) list
list of 3 matrices, each of dimension :math:`l_{max}^2 + 2l_{max} + 1`
"""
# corresponds to If, I0 indices in Andy's notes: for every l, (2l+1) possible values of j
ntot = lmax**2 + 2*lmax + 1
# so matrix elements look like g[If,I0] where I0 = l0**2 + l0 + j0 and If = lf**2 + lf + jf
# Note that for linker propogators (g matrices), jf always equals j0 (only perturbs l)
# and for kink propogators (M matrices), lf always equals l0 (only perturbs j)
# NOTE: for python, need to subtract 1 from Andy's formulas for indexing to work
# g0 represents the 0th derivative of g with respect to k in the limit as k goes to 0
g0 = np.zeros((ntot, ntot), 'complex')
g1 = np.zeros_like(g0)
g2 = np.zeros_like(g0)
M = np.zeros_like(g0)
mywd = wd.wigner_d_vals()
# define useful lambda functions of l and j used to compute eigenvalues, matrix elements
I = lambda l, j: l**2 + l + j # indexing
al = lambda l, j: np.sqrt((l-j)*(l+j)/(4*l**2 - 1)) # ladder coefficients alpha
lam = lambda l, j: (l*(l+1))/(2*lp) + 0.5*((1/lt)-(1/lp))*j**2 - 1j*tau_d*j # eigenvalue of H0
# build g and M matrices by looping over l0 and j0
for l0 in range(lmax+1):
for j0 in range(-l0, l0+1):
# for this particular tuple (l0, j0), compute the relevant index in the g matrix:
I0 = I(l0, j0)
# Compute relevant values of lambda_lj and alpha_lj to construct g0, g1, g2
laml0 = lam(l0, j0)
laml0p1 = lam(l0+1, j0)
laml0m1 = lam(l0-1, j0)
laml0p2 = lam(l0+2, j0)
laml0m2 = lam(l0-2, j0)
all0 = al(l0, j0)
all0p1 = al(l0+1, j0)
# NOTE: this will produce nans for (l0, j0) = (2, 2) and (2, -2), but
# this quantity isn't used for those values of l0, j0 so no problem
if (l0 != 2 or abs(j0) != 2):
all0m1 = al(l0-1, j0)
all0p2 = al(l0+2, j0)
### Construct g0 matrix###
# answer for g0 says l = l0 due to delta function, so g0 is diagonal
g0[I0, I0] = np.exp(-laml0*link)
#### Construct g1 matrix###
# first consider case where l = l0-1
l = l0 - 1
If = I(l, j0)
# check out of bounds for l, ensure l does not exceed j0 (because j = j0)
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)):
g1[If, I0] = (1j*all0/(laml0m1 - laml0))*(np.exp(-laml0*link) - np.exp(-laml0m1*link))
# next consider the case where l = l0+1
l = l0 + 1
If = I(l, j0)
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)):
g1[If, I0] = (1j*all0p1/(laml0p1 - laml0))*(np.exp(-laml0*link) - np.exp(-laml0p1*link))
#### Construct g2 matrix###
# Case 1: l = l0 + 2
l = l0 + 2
If = I(l, j0)
# only valid case is when (l0, j0) = (0, 0)
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)):
g2[If, I0] = -2*all0p1*all0p2*(np.exp(-laml0*link)/((laml0p1 - laml0)*(laml0p2 - laml0)) +
np.exp(-laml0p1*link)/((laml0 - laml0p1)*(laml0p2 - laml0p1)) +
np.exp(-laml0p2*link)/((laml0 - laml0p2)*(laml0p1 - laml0p2)))
# Case 2: l = l0 --- diagonal entries of g2 matrix
l = l0
If = I(l, j0)
# Case 2A: terms with l0 + 1
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)):
g2[If, I0] = -2*all0p1**2*(link*np.exp(-laml0*link)/(laml0p1 - laml0) +
(np.exp(-laml0p1*link) - np.exp(-laml0*link))/(laml0 - laml0p1)**2)
# Case 2B: terms with l0 - 1
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)+1):
g2[If, I0] += -2*all0**2*(link*np.exp(-laml0*link)/(laml0m1 - laml0) +
(np.exp(-laml0m1*link) - np.exp(-laml0*link))/(laml0 - laml0m1)**2)
# Case 3: l = l0 - 2
l = l0 - 2
If = I(l, j0)
# only valid case is when (l0, j0) = (2, 0)
if (l >= 0) and (l <= lmax) and (l >= np.abs(j0)):
g2[If, I0] = -2*all0*all0m1*(np.exp(-laml0*link)/((laml0m1 - laml0)*(laml0m2 - laml0)) +
np.exp(-laml0m1*link)/((laml0 - laml0m1)*(laml0m2 - laml0m1)) +
np.exp(-laml0m2*link)/((laml0 - laml0m2)*(laml0m1 - laml0m2)))
# Next build M matrix
for jf in range(-l0, l0+1):
If = I(l0, jf)
M[If, I0] = mywd.get(l0, jf, j0, -gamma, -beta, -alpha) / mywd.normalize(l0, jf, j0)
B0 = M@g0
B1 = M@g1
B2 = M@g2
return [B0, B1, B2]
def r2wlc(ldna, lp=default_lp):
"""Analytical formula for R^2 of WLC as a function of length of chain."""
return 2*(lp*ldna - lp**2 + lp**2*np.exp(-ldna/lp))
def R2_kinked_WLC_no_translation(links, figname='fig', plotfig=False,
lt=default_lt, lp=default_lp, kd_unwrap=None, w_ins=ncg.default_w_in,
w_outs=ncg.default_w_out, tau_d=ncg.dna_params['tau_d'],
tau_n=ncg.dna_params['tau_n'], lmax=2, helix_params=ncg.helix_params_best,
unwraps=None, random_phi=False):
"""Calculate the mean squared end-to-end distance, or :math:`\langle{R^2}\rangle` of a kinked WLC with a given set of linkers and unwrapping amounts.
Parameters
----------
links : (L,) array-like
linker length in bp
figname: string
name of figure to be saved as pdf
plotfig: bool (default = False)
whether or not to plot R^2 vs. Rmax
w_ins : float or (L+1,) array_like
amount of DNA wrapped on entry side of central dyad base in bp
w_outs : float or (L+1,) array_like
amount of DNA wrapped on exit side of central dyad base in bp
tau_n : float
twist density of nucleosome-bound DNA in rad/bp
tau_d : float
twist density of naked DNA in rad/bp
lt : float
twist persistence length in bp
lp : float
DNA persistence length in bp
lmax : int
maximum eigenvalue l for which to compute wigner D' (default lmax = 2)
Returns
-------
r2 : (L,) array-like
mean square end-to-end distance of kinked chain as a function of chain length in nm^2
ldna : (L,) array-like
mean square end-to-end distance of kinked chain as a function of chain length in nm
kuhn : float
Kuhn length as defined by :math:`\langle{R^2}\rangle / R_{max}` in long chain limit
"""
b = helix_params['b']
num_linkers = len(links)
num_nucleosomes = num_linkers + 1
# resolve kd_unwrap
if kd_unwrap is not None:
sites_unbound_left = scipy.stats.binom(7, kd_unwrap).rvs(num_nucleosomes)
sites_unbound_right = scipy.stats.binom(7, kd_unwrap).rvs(num_nucleosomes)
w_ins, w_outs = convert.resolve_wrapping_params(sites_unbound_left + sites_unbound_right,
w_ins, w_outs, num_nucleosomes, unwrap_is='sites')
else:
w_ins, w_outs = convert.resolve_wrapping_params(unwraps, w_ins, w_outs, num_nucleosomes)
# calculate unwrapping amounts based on w_ins and w_outs
mu_ins = (b - 1)/2 - w_ins
mu_outs = (b - 1)/2 - w_outs
# only need one g matrix per linker length, no need to recalculate each time
# perhaps we tabulate all g's and all M's and then mix and match to grow chain?
# for now, build dictionary of (link, wrapping) -> [B0, B1, B2]
bmats = {}
# B0-2curr will keep track of the B matrices as they propogate along the chain
# initialize based on very first linker in chain
link = mu_outs[0] + links[0] + mu_ins[1]
wrap = w_outs[0] + w_ins[1]
key = (link, wrap)
R = ncg.OmegaE2E(wrap, tau_n=tau_n)
# recall that our OmegaE2E matrix is designed to be applied from the right
# so in order to add an arbitrary twist *before* the action of the
# nucleosome (as if from changing the linker length) then we should apply
# Rz to the left of R so that when the combined R is applied on the *right*
# then the extra Rz is applied "first".
# in this code, we use "(-gamma, -beta, -alpha)" from the left as a proxy
# from right multiplication in build_B_matrices_for_R2
if random_phi:
R = ncr.Rz(2*np.pi*np.random.rand()) @ R
alpha, beta, gamma = ncr.zyz_from_matrix(R)
bmats[key] = build_B_matrices_for_R2(link, alpha, beta, gamma, lt, lp, tau_d, lmax)
B0curr, B1curr, B2curr = bmats[key]
# calculate R^2 as a function of number of nucleosomes (r2[0] is 0 nucleosomes)
r2 = np.zeros((num_linkers,))
lengthDNA = np.zeros_like(r2)
r2[0] = 3*np.real(B2curr[0,0]/B0curr[0, 0])
lengthDNA[0] = link
# recursively calculate Nth propagator using B matrices
for i in range(1, num_linkers):
# add up the effective linker lengths including unwrapping
link = mu_outs[i] + links[i] + mu_ins[i+1]
# w_ins[i+1] because the ith linker is between i, and i+1 nucs
wrap = w_outs[i] + w_ins[i+1]
key = (link, wrap)
# update dictionary for this linker and wrapping amount, if necessary
if key not in bmats:
R = ncg.OmegaE2E(wrap, tau_n=tau_n)
if random_phi:
R = ncr.Rz(2*np.pi*np.random.rand()) @ R
alpha, beta, gamma = ncr.zyz_from_matrix(R)
bmats[key] = build_B_matrices_for_R2(link, alpha, beta, gamma, lt, lp, tau_d, lmax)
B0next, B1next, B2next = bmats[key]
# propogate B0curr, B1curr, B2curr matrices by a linker
B0temp = B0next@B0curr
B1temp = B1next@B0curr + B0next@B1curr
B2temp = B2next@B0curr + 2*B1next@B1curr + B0next@B2curr
B0curr = B0temp
B1curr = B1temp
B2curr = B2temp
# Rz^2 is B2[0,0], so multiply by 3 to get R^2
# divide by B0[0,0] to ensure normalization is OK (shouldn't matter, since B0[0,0] is 1)
r2[i] = 3*np.real(B2curr[0,0]/B0curr[0, 0])
lengthDNA[i] = lengthDNA[i-1] + link
# take absolute value of R2, convert to nm^2
r2 = np.abs(r2) * (ncg.dna_params['lpb'])**2
lengthDNA = lengthDNA * ncg.dna_params['lpb']
# Find scaling of R2 with length of chain at larger length scales to calculate Kuhn length
try:
min_i = np.round(len(lengthDNA)*5/6).astype(int)
kuhn = stats.linregress(lengthDNA[min_i:], r2[min_i:])[0]
except:
kuhn = np.nan
# Plot r2 vs. length of chain (in bp)
if plotfig:
raise NotImplementedError('This module no longer plots.')
# plt.figure()
# plt.loglog(lengthDNA, np.abs(r2))
# plt.xlabel('Length of chain in basepairs')
# plt.ylabel(r'$<R^2>$')
# plt.savefig(figname)
return r2, lengthDNA, kuhn, w_ins, w_outs
def tabulate_kuhn_lengths():
"""Calculate and save Kuhn lengths for chains with fixed linker lengths 1-250 bp
and unwrapping amounts 0-146 bp. Saves output in 'kuhns_1to250links_0to146unwraps.npy' file.
Returns
-------
kuhns : (250, 147) array-like
Kuhn length in nm for a nucleosome chain with fixed linkers and unwrapping amount
"""
links, unwraps = np.mgrid[1:251, 0:147]
kuhns = np.zeros_like(links).astype(float)
# also save dictionary (link, wrap) -> [B0, B1, B2] since we have to compute all tuples for this code anyway
Bmats = {}
for i in range(links.shape[0]):
linkers = np.tile(links[i, 0], 10000)
for j in range(unwraps.shape[1]):
r2, ldna, kuhn, w_ins, w_outs = R2_kinked_WLC_no_translation(linkers, unwraps=unwraps[i, j])
kuhns[i, j] = kuhn
# append dictionary from previous calculation to the global dictionary
Bmats = {**Bmats, **bmats}
pickle.dump(kuhns, open('kuhns_1to250links_0to146unwraps.p', 'wb'))
print(f'Calculated, saved Kuhn length for link={links[i,0]}bp')
pickle.dump(Bmats, open('B0B1B2matrices_key_wrap.p', 'wb'))
# also save in .npy format just in case
np.save('kuhns_1to250links_0to146unwraps', kuhns)
return kuhns
def tabulate_kuhn_lengths_straight_linkers_genomic_distance():
"""Calculate Kuhn length in nm^2/bp for limiting case where all linkers
are straight (no kinks) but the length of the chain still includes the 146
bp bound to each nucleosome (plus the bare linkers). All this does is
rescale the answer we get for fully unwrapped linkers (which also
corresponds to no kinks) but reduces the chain length to fit this problem.
"""
links, unwraps = np.mgrid[1:251, 0:147]
kuhns = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
b = ncg.helix_params_best['b']
#extract kuhn length for straight linkers (fully unwrapped), this is just
#100 nm for all 250 linkers
kuhns_straight_linkers = kuhns[:, 146]
kuhns_corrected = np.zeros_like(kuhns)
for i, link in enumerate(links[:,0]):
for j, unwrap in enumerate(unwraps[0, :]):
Lunbound = link + unwrap
Lbound = b - unwrap - 1
scaling_factor = Lunbound / (Lunbound + Lbound)
#rescale 100nm based on unwrapping and linker length
kuhns_corrected[i, j] = kuhns_straight_linkers[i] * scaling_factor * ncg.dna_params['lpb']
#now in units of nm^2 / bp
return kuhns_corrected
def plot_kuhn_lengths_for_different_unwrapping():
"""Plots kuhn length in nm^2/bp as a function of linker length, with 2
different unwrapping levels: 0, and 20 bp. Also plots upper limit of
Kuhn length (straight linkers)."""
kuhns_nm_squared_per_bp = tabulate_kuhn_lengths_in_genomic_distance()
kuhns_straight_linkers = (
tabulate_kuhn_lengths_straight_linkers_genomic_distance())
#first plot just the kuhn lengths for 0, 20 bp unwrapping
linkers = np.arange(1, 251)
kuhns_no_unwrapping = plt.plot(linkers, kuhns_nm_squared_per_bp[:, 0],
'-o', color='g')
kuhns_no_unwrapping_straight = plt.plot(linkers,
kuhns_straight_linkers[:,0], '--',
color='g')
kuhns_20bp_unwrapping = plt.plot(linkers, kuhns_nm_squared_per_bp[:, 20],
'-o', markersize=4, color='r')
kuhns_20bp_unwrapping_straight = plt.plot(linkers,
kuhns_straight_linkers[:,20], '--',
color='r')
plt.legend(['No unwrapping', 'No unwrapping straight linkers',
'20bp unwrapping', '20bp unwrapping straight linkers'])
plt.xlabel('Linker length (bp)')
plt.ylabel('Kuhn length (${nm}^2/bp$)')
plt.title('Kuhn length for different unwrapping amounts, linkers 1-250 bp')
def tabulate_kuhn_lengths_in_nm2_per_bp():
"""Calculate Kuhn lengths for chains with fixed linker lengths 1-250 bp
and unwrapping amounts 0-146 bp, where :math:`b = \langle{R^2}\rangle/R_{max}`
and :math:`R_{max}` is total chain length in genomic distance (bp), including
bp bound to nucleosome.
Returns
-------
kuhns : (250, 147) array-like
Kuhn length in nm^2/bp for each linker length and unwrapping amount
"""
links, unwraps = np.mgrid[1:251, 0:147]
kuhns = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
b = ncg.helix_params_best['b']
for i, link in enumerate(links[:, 0]):
for j, unwrap in enumerate(unwraps[0, :]):
Lunbound = link + unwrap
Lbound = b - unwrap - 1
scaling_factor = Lunbound / (Lunbound + Lbound)
kuhns[i, j] *= scaling_factor * ncg.dna_params['lpb']
### ALTERNATIVE: get rid of tabulate_kuhn_lengths_in_bp() function and just do unit conversion:
#genomic_bp_per_linker_nm = (Lunbound + Lbound) / (Lunbound * ncg.dna_params['lpb'])
# rescale kuhn length and convert Rmax back to bp
#kuhns[i, j] *= genomic_bp_per_linker_nm
# now in units of nm^2 / bp
return kuhns
def tabulate_kuhn_lengths_in_genomic_distance():
"""Calculate Kuhn lengths for chains with fixed linker lengths 1-250 bp
and unwrapping amounts 0-146bp, where :math:`b = (b in nm)^2 / (b in nm^2/bp)`.
In other words, find the number of bp in a Kuhn length (includign wrapped
base pairs)."""
links, unwraps = np.mgrid[1:251, 0:147]
kuhns = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
b = ncg.helix_params_best['b']
for i, link in enumerate(links[:, 0]):
for j, unwrap in enumerate(unwraps[0, :]):
Lunbound = link + unwrap
Lbound = b - unwrap - 1
genomic_bp_per_linker_nm = (Lunbound + Lbound) / (Lunbound * ncg.dna_params['lpb'])
# rescale kuhn length and convert Rmax back to bp
kuhns[i, j] *= genomic_bp_per_linker_nm
# now in units of bp
return kuhns
def tabulate_kuhn_lengths_along_screw_axis():
"""Rescale kuhn length by (linker length nm / rise per linker in nm) so that
length of polymer is measured along helical axis as opposed to along chain itself.
Currently it rescales kuhn length by the purely geometrical rise/bp for
that specific homogenous chain. Maybe in the future, we could find a better
number for the "rise" that better accounts for the fluctuating chains real
effective "screw" axis. """
links, unwraps = np.mgrid[10:200, 0:147]
kuhns = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
link_ix, unwrap_ix, rise, angle, radius = ncg.tabulate_rise()
b = ncg.helix_params_best['b']
for i, link in enumerate(links[:, 0]):
for j, unwrap in enumerate(unwraps[0, :]):
Lunbound = link + unwrap
scaling_factor = (Lunbound * ncg.dna_params['lpb']) / rise[i, j]
kuhns[i+9, j] *= scaling_factor
return kuhns
def heterogenous_chains_kuhn_lengths(links, unwraps=0, numiter=10, **kwargs):
"""Calculate Kuhn length for a heterogenous chain that samples uniformly
from the linker lengths in 'links'. Performs calculation 'numiter' times,
returns kuhn lengths from each calculation. Also compares results
to harmonic average of corresponding homogenous chains. Use to test
harmonic averaging rule."""
#save a kuhn length for each iteration
kuhns = np.zeros(numiter)
r2 = np.zeros((numiter, 7500)) #grow chain to 7500 monomers, should be plenty
ldna = np.zeros_like(r2)
for i in range(numiter):
links = np.random.choice(links, 7500)
r2d, ldnad, kuhnsd, w_ins, w_outs = R2_kinked_WLC_no_translation(links, plotfig=False, unwraps=unwraps)
r2[i, :] = r2d
ldna[i, :] = ldnad
kuhns[i] = kuhnsd
print(f'Mean Kuhn length of {numiter} random chains: {np.mean(kuhns):.2f}nm')
#calculate harmonic mean of kuhn lengths of corresponding fixed linker chains
kuhns_1to250links_0to146unwraps = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
harmonic_avg = np.mean(1/kuhns_1to250links_0to146unwraps[links-1, unwraps])**(-1)
print(f'Harmonic mean kuhn length of fixed linker chains: {harmonic_avg:.2f}nm')
arithmetic_avg = np.mean(kuhns_1to250links_0to146unwraps[links-1, unwraps])
print(f'Mean kuhn length of fixed linker chains: {arithmetic_avg:.2f}nm')
return kuhns
def harmonic_avg_exponential_kuhn_lengths(kuhns, links, mu):
"""Calculate harmonic averaged kuhn length with exponential weights
determined by mean mu. Derivation of formula in Deepti's notes."""
sum = (1/kuhns)@np.exp(-mu*links)
prefactor = np.exp(mu)*(1 - np.exp(-mu))/(1 - np.exp(-mu*max(links)))
kuhn_avg_inverse = sum*prefactor
return (1/kuhn_avg_inverse)
def tabulate_r2_heterogenous_fluctuating_chains_by_variance(num_chains, chain_length, sigmas, mu=35, pool_size=None, **kwargs):
"""Tabulate R^2 for fluctuating heterogenous chains with increasing
variance. "Box" variance model. Pass unwrapping parameters through kwargs."""
n_sig = len(sigmas)
links = np.zeros((n_sig, num_chains, chain_length-1))
#For now, assume the same unwrapping amounts for all chains
#w_ins, w_outs = convert.resolve_wrapping_params(unwraps, w_ins, w_outs, chain_length)
for i in range(n_sig):
links[i,:,:] = ncl.fake_linkers_increasing_variance(mu, sigmas[i], size=(num_chains,chain_length-1), type='box')
rmax = np.zeros((n_sig, num_chains, chain_length))
r2 = rmax.copy()
variance = rmax.copy()
chain_id = rmax.copy()
kuhns = rmax.copy()
all_w_ins = rmax.copy()
all_w_outs = rmax.copy()
def given_ij(ij):
i, j = ij
#note r2, ldna returned by R2_kinked_WLC() is the same shape as links. add a 0 to the beginning
#to match bruno's code; Output is in nm
R2, Rmax, kuhn, w_ins, w_outs = R2_kinked_WLC_no_translation(links[i,j,:].flatten(), plotfig=False, **kwargs)
r2[i,j] = np.concatenate(([0], R2))
rmax[i,j] = np.concatenate(([0], Rmax))
variance[i] = sigmas[i]
kuhns[i,j] = kuhn
chain_id[i,j] = j
all_w_ins[i,j] = w_ins
all_w_outs[i,j] = w_outs
if pool_size is None:
for i in range(n_sig):
for j in range(num_chains):
given_ij((i,j))
else:
with Pool(processes=pool_size) as p:
p.map(given_ij, [(i,j) for i in range(n_sig) for j in range(num_chains)])
df = pd.DataFrame(np.stack([
r2.flatten(), rmax.flatten(), variance.flatten(), chain_id.flatten(), kuhns.flatten(),
all_w_ins.flatten(), all_w_outs.flatten()], axis=1),
columns=['r2', 'rmax', 'variance', 'chain_id', 'kuhn', 'w_ins', 'w_outs'])
return df
def tabulate_r2_heterogenous_fluctuating_chains_exponential(num_chains, chain_length, mu=35, pool_size=None, **kwargs):
"""Tabulate R^2 for fluctuating heterogenous chains with increasing variance. Pass unwrapping parameters
through kwargs."""
links = np.zeros((num_chains, chain_length-1))
#For now, assume the same unwrapping amounts for all chains
#w_ins, w_outs = convert.resolve_wrapping_params(unwraps, w_ins, w_outs, chain_length)
links = ncl.independent_linker_lengths(mu, size=(num_chains,chain_length-1))
rmax = np.zeros((num_chains, chain_length))
r2 = rmax.copy()
chain_id = rmax.copy()
kuhns = rmax.copy()
all_w_ins = rmax.copy()
all_w_outs = rmax.copy()
def given_chain_i(i):
#note r2, ldna returned by R2_kinked_WLC() is the same shape as links. add a 0 to the beginning
#to match bruno's code; Output is in nm
R2, Rmax, kuhn, w_ins, w_outs = R2_kinked_WLC_no_translation(links[i,:].flatten(), plotfig=False, **kwargs)
r2[i] = np.concatenate(([0], R2))
rmax[i] = np.concatenate(([0], Rmax))
kuhns[i] = kuhn
chain_id[i] = i
all_w_ins[i] = w_ins
all_w_outs[i] = w_outs
if pool_size is None:
for i in range(num_chains):
given_chain_i(i)
else:
raise NotImplementedError('No Pool here plz.')
# with Pool(processes=pool_size) as p:
# p.map(given_ij, [(i,j) for i in range(n_sig) for j in range(num_chains)])
df = pd.DataFrame(np.stack([
r2.flatten(), rmax.flatten(), chain_id.flatten(), kuhns.flatten(),
all_w_ins.flatten(), all_w_outs.flatten()], axis=1),
columns=['r2', 'rmax', 'chain_id', 'kuhn', 'w_ins', 'w_outs'])
df['mu'] = mu
return df
def tabulate_r2_heterogenous_fluctuating_chains_homogenous(num_chains, chain_length, mu=35, pool_size=None, **kwargs):
"""Tabulate R^2 for fluctuating heterogenous chains with increasing variance. Pass unwrapping parameters
through kwargs."""
links = np.zeros((num_chains, chain_length-1))
links = mu*np.ones((num_chains,chain_length-1))
rmax = np.zeros((num_chains, chain_length))
r2 = rmax.copy()
chain_id = rmax.copy()
kuhns = rmax.copy()
def given_chain_i(i):
#note r2, ldna returned by R2_kinked_WLC() is the same shape as links. add a 0 to the beginning
#to match bruno's code; Output is in nm
R2, Rmax, kuhn, w_ins, w_outs = R2_kinked_WLC_no_translation(links[i,:].flatten(), plotfig=False, **kwargs)
r2[i] = np.concatenate(([0], R2))
rmax[i] = np.concatenate(([0], Rmax))
kuhns[i] = kuhn
chain_id[i] = i
if pool_size is None:
for i in range(num_chains):
given_chain_i(i)
else:
raise NotImplementedError('No Pool here plz.')
# with Pool(processes=pool_size) as p:
# p.map(given_ij, [(i,j) for i in range(n_sig) for j in range(num_chains)])
df = pd.DataFrame(np.stack([
r2.flatten(), rmax.flatten(), chain_id.flatten(), kuhns.flatten()
], axis=1), columns=['r2', 'rmax', 'chain_id', 'kuhn'])
df['mu'] = mu
return df
def plot_heterogenous_chain_r2(df, mu, ax=None, running_avg=True, **kwargs):
"""Read in tabulated R^2 of heterogenous chains
generated by ncg.tabulate_r2_heterogenous_chains_by_variance(), and plot
:math:`\langle{R^2}\rangle` vs. :math:`R_{max}`.
Columns = ['r2', 'rmax', 'variance', 'chain_id']
"""
if ax is None:
fig, ax = plt.subplots()
df2 = df.sort_values('rmax')
window_size = 100 #window_size for rolling average of rigid rod heterogenous chains
def newdf(df):
return df.rolling(100).mean()
if running_avg:
df2 = df2.groupby('variance').apply(newdf)
sns.lineplot(data=df2, x='rmax', y='r2', hue='variance', ci=None, ax=ax, **kwargs)
plt.yscale('log')
plt.xscale('log')
plt.xlabel(r'$R_{max} (nm)$')
plt.ylabel(r'$\langle{R^2}\rangle (nm^2)$')
plt.title(f'$\langle{R^2}\rangle$ for heterogenous chain, $\mu={mu}$, $\sigma=0-10bp$')
def plot_r2_fluctuating_vs_geometry(dff, dfg, mu, ax=None, running_avg=True, **kwargs):
"""Plot :math:`\langle{R^2}\rangle` vs. :math:`R_{max}` for fluctuating
chain vs. geometrical chain.
Columns = ['r2', 'rmax', 'variance', 'chain_id']
"""
if ax is None:
fig, ax = plt.subplots()
#First plot geometrical case 'dfg'
#dfg['rmax'] = dfg['rmax']*ncg.dna_params['lpb'] #convert rmax from bp to nm
df2g = dfg.sort_values('rmax')
window_size = 100 #window_size for rolling average of rigid rod heterogenous chains
def newdf(df):
return df.rolling(100, min_periods=1).mean()
if running_avg:
df2g = df2g.groupby('variance').apply(newdf)
num_colors = len(np.unique(df2g.variance))
violets = sns.cubehelix_palette(num_colors)
#df2g.plot(x='rmax', y='r2', label='geometrical', colors=colors)
sns.lineplot(data=df2g, x='rmax', y='r2', hue='variance', legend='full', palette=violets, ci=None, ax=ax)
#Next plot fluctuating case 'dff' --- no running average
df2f = dff.sort_values('rmax')
num_colors = len(np.unique(df2f.variance))
greens = sns.cubehelix_palette(num_colors, start=2)
#df2f.plot(x='rmax', y='r2', label='fluctuations', colors=colors)
if running_avg:
df2f = df2f.groupby('variance').apply(newdf)
sns.lineplot(data=df2f, x='rmax', y='r2', hue='variance', legend='full', palette=greens, ci=None, ax=ax)
plt.yscale('log')
plt.xscale('log')
plt.xlabel(r'$R_{max} (nm)$')
plt.ylabel(r'$\langle{R^2}\rangle (nm^2)$')
#plt.legend()
def get_kuhn(df, thresh, rmax_col='rmax', r2_col='r2'):
"""Take a df with r2/rmax columns and a threshold (burn in length) in
number of monomers after which to fit adn do a linear fit to extract teh
kuhn length."""
if not np.any(df[rmax_col] > thresh):
ks = scipy.stats.mstats_basic.LinregressResult(np.nan, np.nan, np.nan,
np.nan, np.nan)
else:
ks = scipy.stats.linregress(df.loc[df[rmax_col] > thresh, rmax_col],
df.loc[df[rmax_col] > thresh, r2_col])
return ks
def get_kuhns_grouped(df, thresh, groups, rmax_col='rmax', r2_col='r2'):
ks = df.groupby(groups)[[rmax_col, r2_col]].apply(get_kuhn,
thresh=thresh, rmax_col=rmax_col, r2_col=r2_col)
ks = ks.apply(pd.Series)
ks.columns = ['slope', 'intercept', 'rvalue', 'pvalue', 'stderr']
ks['b'] = ks['slope']
return ks
def aggregate_existing_kuhns(glob='*.csv', thresh=5000):
"""Aggregates all Kuhn lengths that can be calculated from the
r2-tabulation script."""
kuhns = []
r2_format_re = re.compile('r2-(fluct|geom)-(box|exp)-mu_([0-9]+)-sigma_([0-9]+)-kd_unwraps_([0-9]*\.?[0-9]*(?:e-[0-9]+)?)(-.*)?.csv')
for path in Path('./csvs/r2').glob(glob):
match = r2_format_re.search(path.name)
if match is None:
print("File name cannot be parsed: " + str(path))
continue
try:
df = pd.read_csv(path)
except:
print("Pandas was unable to parse file: " + str(path))
continue
if len(df) == 0:
print("File was empty: " + str(path))
continue
sim_type, variance_type, mu, sigma, kd_unwrap, desc = match.groups()
df['mu'] = mu
df['sigma'] = sigma
df['kd_unwrap'] = kd_unwrap
ks = get_kuhns_grouped(df, thresh=thresh, groups=['mu', 'sigma', 'kd_unwrap'])
ks = ks.reset_index()
ks['sim_type'] = sim_type
ks['variance_type'] = variance_type
kuhns.append(ks)
all_ks = [ks.set_index(['variance_type', 'sim_type', 'mu', 'sigma', 'kd_unwrap']) for ks in kuhns]
all_ks = pd.concat(all_ks)
return all_ks
def calculate_kuhn_length_from_fluctuating_r2(df, mu, chain_length, **kwargs):
"""Calculate :math:`b=\langle{R^2}\rangle/R_{max}` in the long chain
limit (roughly 5000 monomers down the chain) by averaging kuhn lengths
from each individual chain."""
df2 = df.sort_values('rmax')
kuhns = []
for var, vals in df2.groupby(['variance']):
#take the average kuhn length of each individual chain in this variance group
kuhns.append(np.mean(vals['kuhn']))
return np.array(kuhns)
def calculate_kuhn_length_from_r2(df, mu, chain_length, **kwargs):
"""Calculate :math:`b=\langle{R^2}\rangle/R_{max}` in the long chain
limit (roughly 5000 monomers down the chain). Sorts df, finds threshold corresponding
roughly to 5000th monomer and does linear fit beyond this threshold."""
df2 = df.sort_values('rmax')
kuhns = []
for var, vals in df2.groupby(['variance']):
sample_links = ncl.fake_linkers_increasing_variance(mu, var, size=(chain_length-1,), type='box')
sample_rmax = convert.Rmax_from_links_unwraps(sample_links, **kwargs)
#Assume long chain limit is 5000 monomers down a random chain sampled from this distribution.
min_rmax_for_kuhn = sample_rmax[5000] * ncg.dna_params['lpb']
rmax_long = vals.rmax[vals['rmax']>=min_rmax_for_kuhn]
r2_long = vals.r2[vals['rmax']>=min_rmax_for_kuhn]
kuhns.append(stats.linregress(rmax_long, r2_long)[0])
return np.array(kuhns)
def plot_kuhn_length_vs_variance(kuhnsf, kuhnsg, mu, sigmas=np.arange(0, 11), ax=None):
if ax is None:
fig, ax = plt.subplots()
ax.plot(sigmas, kuhnsg, '--o', label='T=0')
ax.plot(sigmas, kuhnsf, '-o', label='Fluctuations')
plt.xlabel(r'Variance in $\phi$')
plt.ylabel('Kuhn Length (nm)')
plt.title(f'Rigid Rod vs. Fluctuating Chain, mu={mu}bp')
plt.ylim([0, 200])
plt.legend()
def visualize_kuhn_lengths(links, unwraps, kuhns, mfig=None, **kwargs):
"""Plot 3D surface of Kuhn length as a function of constant linker length, unwrapping amount."""
if mfig is None:
mfig = mlab.figure()
mlab.surf(links, unwraps, kuhns)
mlab.axes()
mlab.xlabel('Link (bp)')
mlab.ylabel('Unwrap (bp)')
mlab.zlabel('Kuhn length (nm)')
###}}}
###{{{
# """Propogators and Greens Functions"""
def tabulate_M_kinks(unwraps=None, l0max=None, **kwargs):
"""Return a lookup table of M matrices for a given (alpha, beta, gamma).
One matrix for each possible level of unwrapping.
Returns
-------
Mdict : dictionary
key = unwrapping amount in bp, values = 441 by 441 matrix
Saves
-----
Mkink_matrices.csv : file from pd.DataFrame
indexed by unwrapping amount --- each matrix is 441 by 441
Mdict_from_unwraps.p : pickle dumped dictionary
to load, Mdict = pickle.load(open('Mdict_from_unwraps.p', 'rb'))
"""
#retrieve dictionary of 441 by 441 M kink matrices if it exists
mdicts_file = ncd.data_dir / Path('Mdict_from_unwraps.p')
if l0max is None and unwraps is None and mdicts_file.exists():
return pickle.load(open(mdicts_file, 'rb'))
if l0max is None:
l0max = 20
if unwraps is None:
unwraps = np.arange(0, 147)
tau_n = kwargs['tau_n'] if 'tau_n' in kwargs else ncg.dna_params['tau_n']
# super indexing
ntot = l0max**2 + 2*l0max + 1
I = lambda l, j: l**2 + l + j
# create dictionary of M's from unwrapping amount to super-indexed M matrix
Mdict = {}
mywd = wd.wigner_d_vals()
for i, u in enumerate(unwraps):
w_in, w_out = convert.resolve_unwrap(u)
R = ncg.OmegaE2E(w_in+w_out, tau_n=tau_n)
alpha, beta, gamma = ncr.zyz_from_matrix(R)
M = np.zeros((ntot, ntot), 'complex')
# Construct super-indexed M matrix for kink defined by this unwrapping amount
for l0 in range(l0max+1):
for j0 in range(-l0, l0+1):
I0 = I(l0, j0)
for jf in range(-l0, l0+1):
If = I(l0, jf)
M[If, I0] = mywd.get(l0, jf, j0, -gamma, -beta, -alpha) / mywd.normalize(l0, jf, j0)
# Mdict[u] = pd.DataFrame(M)
Mdict[u] = M
pickle.dump(Mdict, open(mdicts_file, 'wb'))
return Mdict
Mdict = tabulate_M_kinks()
"""Pickled files for Greens function calculations"""
###{{{
# """Linker propogator g calculations"""
def build_A_matrix(j, k, lmax, **kwargs):
"""Build sparse A matrix in the ode for g propogators: dg/dL = Ag.
Dimension of matrix is (lmax + 1) by (lmax + 1).
Helper function for gprop_k_given_link."""
# make sure A is at least 20 by 20 in size (otherwise matrix is trivial)
if (lmax - abs(j)) < 20:
raise ValueError('lmax must be at least abs(j) + 20')
lp = kwargs['lp'] if 'lp' in kwargs else default_lp
lt = kwargs['lt'] if 'lt' in kwargs else default_lt
tau_d = kwargs['tau_d'] if 'tau_d' in kwargs else ncg.dna_params['tau_d']
al = lambda l, j: np.sqrt((l-j)*(l+j)/(4*l**2 - 1)) # ladder coefficients alpha
lam = lambda l, j: (l*(l+1))/(2*lp) + 0.5*((1/lt)-(1/lp))*j**2 - 1j*tau_d*j # eigenvalue of H0
# construct diagonals of tridiagonal matrix
lowerdiag = [1j*k*al(l, j) for l in range(abs(j)+1, lmax+1)]
maindiag = [-lam(l, j) for l in range(abs(j), lmax+1)]
upperdiag = lowerdiag
# was getting sparse efficiency warnings from scipy unless I specified 'csc' format
A = sparse.diags([lowerdiag, maindiag, upperdiag], [-1, 0, 1], format='csc')
return A
def gprop_k_given_link(k, link, unwrap, l0max=20, lmax=None, **kwargs):
"""Solve dg/dL = Ag, where A is a tridiagonal matrix.
Notes
-----
This function takes roughly 3s to run on a new (k, link, unwrap), and ~183ms to run on
a pre-computed tuple.
"""
# set some arbitrarily large limit on lf -- so A is at most a 50x50 matrix, at leas ta 20x20 matrix
if lmax is None:
lmax = l0max + 30
#Creating this path object + checking if it's a file = 14.3 us
gfile = Path(f'csvs/gprops/{unwrap:.0f}unwraps/{link:.0f}link/gprop_k{k}_{link:.0f}bplinks.csv')
try:
g = parse_gprop_csv_file(k, link, unwrap, l0max)
return g
except:
if gfile.is_file():
gfile.unlink()
print(f"Failed to parse csv file for k = {k}, unwrap = {unwrap}, link = {link}")
print(f"Recomputing...")
# otherwise calculate g propagator, save to csv
#check if all parent directories exist before writing to above file
try:
gfile.parent.mkdir(parents=True)
except:
if gfile.parent.is_dir() is False:
#if the parent directory does not exist and mkdir still failed, re-raise an exception
raise
#total linker length
Ll = link + unwrap
# construct super indexed matrix g(I0, If)
ntot = l0max**2 + 2*l0max + 1
I = lambda l, j: l**2 + l + j
g = np.zeros((ntot, ntot), 'complex')
#save list of Pandas DataFrames to be concatenated into csv
gsols = []
for j0 in range(-l0max, l0max+1):
# ODE: dg/dL = Ag
A = Ll*build_A_matrix(j0, k, lmax, **kwargs)
# initial condition: g(L=0): all 0's except when l = l0
# gsol contains the exponentiated matrix where rows are l and columns are l0
# I get a scipy sparse efficiency warning when performing the matrix exponentiation
# it recommends the 'lil' format, but when I tried that, I got different sparse efficiency
# warnings. Going to stick with 'csc' for now.
gsol = sparse.linalg.expm(A)
for l0 in range(abs(j0), l0max+1):
I0 = I(l0, j0)
for l in range(abs(j0), l0max+1):
If = I(l, j0)
g[If, I0] = gsol[l-abs(j0), l0-abs(j0)]
df = pd.DataFrame(gsol[0:(l0max+1-abs(j0)),
0:(l0max+1-abs(j0))].toarray())
df['k'] = k
df['link'] = link
df['j0'] = j0
gsols.append(df)
gdf = pd.concat(gsols, ignore_index=True, sort=False)
gdf.set_index(['k', 'link', 'j0'], inplace=True)
#add a file lock so that multiple processes cannot write to the same file at once
lock = FileLock(str(gfile) + '.lock')
with lock.acquire():
gdf.to_csv(gfile)
print(f'saved gprop for k={k}, link={link}bp, unwrap={unwrap}bp')
sys.stdout.flush()
return g
def save_gprop_csv_file(k, link, unwrap, l0max=20, lmax=None, **kwargs):
"""Saves solutions to dg/dL = Ag ODEs to a csv file, does not return anything.
Use this function for tabulating linker propagators when the
super-indexed matrix is not going to be used immediately."""
# set some arbitrarily large limit on lf -- so A is at most a 50x50 matrix, at least a 20x20 matrix
if lmax is None:
lmax = l0max + 30
#Path of file to be created
gfile = Path(f'csvs/gprops/{unwrap:.0f}unwraps/{link:.0f}link/gprop_k{k}_{link:.0f}bplinks.csv')
if (gfile.is_file() is False):
#check if all parent directories exist before writing/reading above file:
#missing parents of path are created as needed, FileExists error will be thrown if attempt
#to make a directory that already exists
try:
gfile.parent.mkdir(parents=True)
except:
# might fail because the directory was created by someone else
if gfile.parent.is_dir() is False:
#if the parent directory does not exist and mkdir still failed, re-raise an exception
raise
#calculate g propagator, save to csv
#total linker length
Ll = link + unwrap
#save list of Pandas DataFrames to be concatenated into csv
gsols = []
for j0 in range(-l0max, l0max+1):
# ODE: dg/dL = Ag
A = Ll*build_A_matrix(j0, k, lmax, **kwargs)
# initial condition: g(L=0): all 0's except when l = l0
# gsol contains the exponentiated matrix where rows are l and columns are l0
# I get a scipy sparse efficiency warning when performing the matrix exponentiation
# it recommends the 'lil' format, but when I tried that, I got different sparse efficiency
# warnings. Going to stick with 'csc' for now.
gsol = sparse.linalg.expm(A)
df = pd.DataFrame(gsol[0:(l0max+1-abs(j0)),
0:(l0max+1-abs(j0))].toarray())
df['k'] = k
df['link'] = link
df['j0'] = j0
gsols.append(df)
gdf = pd.concat(gsols, ignore_index=True, sort=False)
gdf.set_index(['k', 'link', 'j0'], inplace=True)
gdf.to_csv(gfile)
print(f'k={k}, link={link}bp, unwrap={unwrap}bp')
def parse_gprop_csv_file(k, link, unwrap, l0max=20):
"""Reads in csv file containing saved g propagators. Assumes the columns of the csv are
k, link, j0, 0-l0max+1. Parses complex matrix values and reorganizes data into
super-indexed g[If, I0] where :math:`I(l, j) = l^2 + l + j`. As always,
:math:`I` can take on :math:`l^2+2l+1` possible values."""
#### READ CSV: 15.3ms #####
inds = [i for i in range(l0max+1)]
#for 36bp linkers, file format was different. Columns are 'k', 'link', 'j0', 'index', [inds]
if (link == 36):
df = pd.read_csv(f'csvs/gprops/{unwrap:.0f}unwraps/{link:.0f}link/gprop_k{k}_{link:.0f}bplinks.csv',
header=0, names=['k', 'link', 'j0', 'ind']+inds)
#effectively remove the random index column in between 'j0' and the matrix values
df = df[['k', 'link', 'j0']+inds]
else :
df = pd.read_csv(f'csvs/gprops/{unwrap:.0f}unwraps/{link:.0f}link/gprop_k{k}_{link:.0f}bplinks.csv',
header=0, names=['k', 'link', 'j0']+inds)
#### CONVERT COMPLEX: 37.9 ms ######
def complexify(x):
try:
return np.complex(x)
except ValueError as ve:
print(f'Failed to convert {x} into a complex number.')
sys.stdout.flush()
raise ve
for i in inds:
df[i] = df[i].str.replace(' ', '') #remove extraneous white spaces
df[i] = df[i].str.replace('(', '')
df[i] = df[i].str.replace(')', '')
df[i] = df[i].str.replace('i','j').apply(complexify)
#### SUPERINDEX : 100 ms ####
# construct super indexed matrix g(I0, If)
ntot = l0max**2 + 2*l0max + 1
g = np.zeros((ntot, ntot), 'complex')
I = lambda l, j: l**2 + l + j
j0 = -l0max
for key, matrix in df.groupby(['k','link','j0']):
for l0 in range(abs(j0), l0max+1):
I0 = I(l0, j0)
for l in range(abs(j0), l0max+1):
If = I(l, j0)
# +3 to skip 'k', 'link', 'j0' columns
g[If, I0] = matrix.iat[l-abs(j0), l0-abs(j0)+3]
j0 += 1
return g
###}}}
###{{{
# """Bare WLC Calculations"""
# various functions renormalized to match Quinn/Shifan's WLC code to check that
# ours works
def gprop_K_given_N(K, N, l0max=20, **kwargs):
"""Solve for the linker propogator without twist to compare to Quinn's code.
Non-dminesionalize k and linker length to obtain G(K;N) where K=2lpk, N=L/(2lp).
"""
# set persistance length to be super high (effectively no twist)
lt = kwargs['lt'] if 'lt' in kwargs else 10000*default_lt
lp = kwargs['lp'] if 'lp' in kwargs else default_lp
tau_d = kwargs['tau_d'] if 'tau_d' in kwargs else ncg.dna_params['tau_d']
link = (2*lp)*N
k = K/(2*lp)
return gprop_k_given_link(k, link, l0max, lp=lp, lt=lt, tau_d=tau_d)
def get_G(K, N, l, l0, j, l0max=20, lmax=None, **kwargs):
"""Return :math:`g_{l l_0}^j(K;N)`. Only need to solve one ODE for given value of j."""
if (abs(j) > l) or (abs(j) > l0):
raise ValueError('abs(j) must be less than or equal to both l and l0')
if lmax is None:
lmax = l0max + 30
# set twist persistance length to be super high (effectively no twist)
lt = kwargs['lt'] if 'lt' in kwargs else 10000*default_lt
lp = kwargs['lp'] if 'lp' in kwargs else default_lp
tau_d = kwargs['tau_d'] if 'tau_d' in kwargs else ncg.dna_params['tau_d']
link = (2*lp)*N
k = K/(2*lp)
A = link*build_A_matrix(j, k, lmax, lp=lp, lt=lt, tau_d=tau_d)
gsol = sparse.linalg.expm(A)
return gsol[l-abs(j), l0-abs(j)]
def plot_GKN(Ks, Ns, l, l0, j):
"""Plotting code for G(K;N) vs K for a given l, l0, j. Plots one curve for each N."""
fig, ax = plt.subplots()
Gs = np.zeros((Ns.size, Ks.size), 'complex')
for nn in range(Ns.size):
for kk in range(Ks.size):
Gs[nn, kk] = get_G(Ks[kk], Ns[nn], l, l0, j)
ax.loglog(Ks, np.abs(Gs[nn, :].real))
plt.xlabel('K')
plt.ylabel('G(K;N)')
plt.title(f'$l={l}, l_0={l0}, j={j}$')
plt.legend([f'N={N}' for N in Ns])
plt.ylim([10**-12, 2])
plt.show()
return Gs
def GRN_fourier_integrand(K, r, N):
"""Un-normalized fourier-inversion integrand for bare WLC."""
G0 = get_G(K, N, 0, 0, 0)
return K**2 * special.spherical_jn(0, N*r*K) * G0.real
def gprop_R_given_N_quad_integration(Kmin=10**-3, Kmax=10**5, l0max=20, **kwargs):
"""Return the un-normalized, real-space Green's function G(R;N) for bare WLC,
using scipy's quad integration and GRN_fourier_integrand(K, r, N)
Performs integration across 5 orders of magnitude for N and rvals from 0 to 1,
where r=R/L, N=L/(2lp)."""
rvals = np.linspace(0.0, 1.0, 100) # R/L
Nvals = np.array([0.1, 1.0, 10.0, 100., 1000.])
integral = np.zeros((rvals.size, Nvals.size))
errs = np.zeros_like(integral)
for i, r in enumerate(rvals):
for j, N in enumerate(Nvals):
sol, err = quad(GRN_fourier_integrand, Kmin, Kmax, args=(r, N))
integral[i, j] = sol
errs[i, j] = err
pickle.dump(integral, open(f'GRN_integral_K{Kmin}to{Kmax}_r0to1.p', 'wb'))
pickle.dump(errs, open(f'GRN_errors_K{Kmin}to{Kmax}_r0to1.p', 'wb'))
print(f'Computed integral for r={r}')
np.save(f'GRN_integral_K{Kmin}to{Kmax}_r0to1', integral)
np.save(f'GRN_errors_K{Kmin}to{Kmax}_r0to1', errs)
return integral, errs
###}}}
###{{{
# """Propogators for kinked WLC"""
def Bprop_k_given_L(k, links, filepath, w_ins=ncg.default_w_in, w_outs=ncg.default_w_out,
helix_params=ncg.helix_params_best, unwraps=None, **kwargs):
"""Calculate :math:`B_{00}^{00}(k;L)` for a chain of heterogenous linkers.
NOTE: this is the only function for heterogenous chains that allows for different
unwrapping amounts. All other functions in the pipeline (aka Fourier inversion,
looping, etc.) assume fixed unwrapping amount. These other functions need to be
modified to accept variable unwrapping.
Parameters
----------
k : float
k value for which B(k;L) should be calculated, in :math:`bp^{-1}`
links : (L,) array-like
bare linker lengths in bp
filepath : str or pathlib.Path object
full path to folder where output should be saved,
should include name to identify this particular heterogenous chain
e.g.: 'csvs/Bprops/0unwraps/heterogenous/{chain_identifier}'
w_ins : float or (L+1,) array_like
amount of DNA wrapped on entry side of central dyad base in bp
w_outs : float or (L+1,) array_like
amount of DNA wrapped on exit side of central dyad base in bp
Notes
-----
Adding a new propagator to a chain takes roughly 200ms.
Saves
-----
'Bprop_k{k}_given_L_{len(links)}nucs.npy' : binary file
:math:`B_{000}^{000}(k; L)`, one per monomer in chain
Saved in 'csvs/Bprops/0unwraps/heterogenous/chain_identifier'
"""
#save this singleton array in npy format since pool isn't letting me save at the end
#check if all parent directories exist before writing/reading file:
filepath = Path(filepath) #in case str is passed
Bfile = filepath/Path(f'Bprop_k{k}_given_L_{len(links)}nucs.npy')
#if file already exist, don't bother recalculating. Move on to next k.
try:
np.load(Bfile)
return
except:
# if there's some error loading the Bfile, delete it if it exists
if Bfile.is_file():
Bfile.unlink()
b = helix_params['b']
num_linkers = len(links)
num_nucleosomes = num_linkers + 1
w_ins, w_outs = convert.resolve_wrapping_params(unwraps, w_ins, w_outs, num_nucleosomes)
# calculate unwrapping amounts based on w_ins and w_outs
mu_ins = (b - 1)/2 - w_ins
mu_outs = (b - 1)/2 - w_outs
# save array of propogators of size (num_linkers,)
Bprops = np.zeros((num_linkers,)).astype('complex')
#save dictionary of B matrices in case there are a lot of repeat linkers
Bmats = {}
#first link in chain
unwrap = mu_outs[0] + mu_ins[1]
link = links[0]
try:
g = gprop_k_given_link(k, link, unwrap, **kwargs)
except Exception as e:
print('ERROR: k={k}, link={link}, unwrap={unwrap}')
sys.stdout.flush()
raise e
M = Mdict[unwrap]
Bcurr = M@g
Bmats[(link, unwrap)] = Bcurr
Bprops[0] = Bcurr[0, 0]
for i in range(1, num_linkers):
unwrap = mu_outs[i] + mu_ins[i+1]
link = links[i]
key = (link, unwrap)
if key not in Bmats:
# our M kink matrices are index by *unwrapping* amount, not wrapped amount
# calculate propagators
g = gprop_k_given_link(k, link, unwrap, **kwargs)
M = Mdict[unwrap]
Bnext = M@g
Bmats[key] = Bnext
Bnext = Bmats[key]
# advance chain by one linker
Bcurr = Bnext@Bcurr
Bprops[i] = Bcurr[0, 0]
try:
Bfile.parent.mkdir(parents=True)
except:
if Bfile.parent.is_dir() is False:
#if the parent directory does not exist and mkdir still failed, re-raise an exception
raise
#save file
np.save(Bfile, Bprops, allow_pickle=False)
print(f'wrote bprop file for k={k}, link={links[0]}')
sys.stdout.flush()
def combine_Bprop_files(filepath, links, unwraps, kvals=None, bareWLC=True, **kwargs):
"""Combine each of the files saved by Bprop_k_given_L() into a single matrix,
compute the Fourier inversion for the kinked WLC and corresponding bare WLC of
the same length and save them all in the directory specified by 'filepath'.
Parameters
----------
filepath : Path object or str
full path to directory where files from Bprop_k_given_L() were saved
e.g. 'csvs/Bprops/0unwraps/heterogenous/chain_identifier'
links : (L,) array-like
bare linker lengths in bp, one for each monomer in heterogenous chain
unwraps : int
unwrapping amount in bp. Assumes fixed unwrapping.
"""
num_linkers = len(links)
filepath = Path(filepath) #in case str is passed
if kvals is None:
Klin = np.linspace(0, 10**5, 20000)
Klog = np.logspace(-3, 5, 10000)
Kvals = np.unique(np.concatenate((Klin, Klog)))
#convert to little k -- units of inverse bp (this results in kmax = 332)
kvals = Kvals / (2*default_lp)
Bprops = np.zeros((len(kvals), num_linkers)).astype('complex')
for kk, k in enumerate(kvals):
Bfile = filepath/f'Bprop_k{k}_given_L_{len(links)}nucs.npy'
#try loading in Bprop file; if faulty, halt combine process
try:
Bprops[kk, :] = np.load(Bfile)
except:
# if there's some error loading the Bfile, delete it if it exists
if Bfile.is_file():
print(f'Problem reading file {Bfile}... deleting...')
Bfile.unlink()
print(f'Recomputing bprop for k={k}...')
Bprop_k_given_L(k, links, filepath)
try:
Bprops[kk, :] = np.load(Bfile)
except:
print('Recomputed file also can not be loaded. Halting combine process.')
raise
chain_identifier = filepath.name #parent directory of Bprop files = chain_identifier
np.save(filepath/Path(f'linker_lengths_{chain_identifier}_{num_linkers}nucs.npy'),
links, allow_pickle=False)
np.save(filepath/Path(f'B0_k_given_L_{chain_identifier}_{num_linkers}nucs_30000Ks.npy'), Bprops, allow_pickle=False)
if bareWLC:
qprop = bareWLC_gprop(kvals, links, unwraps, **kwargs)
qintegral = BRN_fourier_integrand_splines(kvals, links, unwraps, Bprop=qprop, **kwargs) #default: 1000 rvals
np.save(filepath/Path(f'bareWLC_greens_{chain_identifier}_{num_linkers}nucs.npy'), qintegral, allow_pickle=False)
integral = BRN_fourier_integrand_splines(kvals, links, unwraps, Bprop=Bprops, **kwargs) #default: 1000 rvals
np.save(filepath/Path(f'kinkedWLC_greens_{chain_identifier}_{num_linkers}nucs.npy'), integral, allow_pickle=False)
print(f'Saved G(R;L) for {chain_identifier}, {num_linkers} monomers!')
sys.stdout.flush()
def Bprop_k_given_L_Sarah_chain_Rlinks(k, unwrap=0, Nvals=None, **kwargs):
"""Grow a chain that has a finite set of heteoregenous linkers followed by constant linkers. The first part
will be calculated by manually multiplying propagators, and the second will use matrix exponentiation."""
#for Rlinks, I actually saved the matrices obtained from 7 links: 47, 21, 18, 15, 20, 17, 35
#so just need to caculate propagator for 35 link and exponentiate to get rest of chain
Bcurr = pickle.load(open(f'csvs/Bprops/0unwraps/Sarah/Bmatrices_Rlinks_7nucs/Bmatrix_k{k}_Sarah_Rlinks_7nucs_no_unwraps.p', 'rb'))
#for constant 35bp linkers
links = np.tile(35, 493) #so total chain is 500 nucs
num_linkers = len(links)
if Nvals is None:
Nvals = np.arange(1, num_linkers+1)
Bprops = np.zeros_like(Nvals).astype('complex')
#B propagator for 35 bp link, 0 unwrap
g = gprop_k_given_link(k, links[0], unwrap, **kwargs)
M = Mdict[unwrap]
B = M@g
for i, N in enumerate(Nvals):
#exponentiate propagator
BN = np.linalg.matrix_power(B, N)
#multiply by heterogenous propagator from first 7 nucs in chain
Bfinal = BN@Bcurr
Bprops[i] = Bfinal[0, 0]
print(f'Rlinks, 450to500nucs, k=({k}')
#this only includes linkers 8-50; properly append it to pre-saved Bprops for the first 7 nucs
return Bprops
def Bprop_k_given_link(k, link, unwrap, **kwargs):
"""Calculates B propagator for a single monomer with linker length 'link' and unwrapping
amount 'unwrap'.
Parameters
----------
k : float
k value for which to compute B(k; L) in :math:`bp^{-1}`
link : int
bare linker length in bp (not including unwrapping)
unwrap : int
unwrapping amount in bp
Returns
-------
B00 : complex
:math:`B_{00}^{00}(k;N=1)` component of propagator
"""
#total linker length is bare linker + unwrapping amount
g = gprop_k_given_link(k, link, unwrap, **kwargs)
M = Mdict[unwrap]
B = M@g
return B[0, 0]
@utils.cache
def Bprop_k_given_L_fixed_linkers_fixed_unwrap(k, N, link, unwrap, **kwargs):
"""Calculate propagator for Nth monomer in a chain with fixed linkers
and fixed unwrapping amounts.
Parameters
----------
k : float
k value for which to compute B(k; L) in :math:`bp^{-1}`
N : int
number of nucleosomes down the chain (i.e. Nth propogator)
link : int
bare linker length in bp (not including unwrapping)
unwrap : float
unwrapping amount in bp
Returns
-------
B00 : complex
:math:`B_{00}^{00}(k;N)` component of Nth propagator
"""
g = gprop_k_given_link(k, link, unwrap, **kwargs)
M = Mdict[unwrap]
B = M@g
BN = np.linalg.matrix_power(B, N)
return BN[0, 0]
def Bprop_k_fixed_linkers_fixed_unwrap(k, links, unwrap, Nvals=None, **kwargs):
"""Same as Bprop_k_given_L except assumes fixed linker length and constant unwrapping amount.
Under these assumptions, the B propogator for each monomer is identical, so growing the chain
simply requires a matrix exponentiation. TODO: change this code to use for loop; matrix
exponentiation is slow for large N.
Parameters
----------
k : float
k value for which to compute B(k; L) in :math:`bp^{-1}`
links : (L,) array-like
bare linker length in bp (not including unwrapping)
unwraps : float
unwrapping amount in bp
Nvals : array-like
number of linkers down the chain for which you want the propagator. Defaults to one propagator
per monomer of the chain.
Returns
-------
Bprops : (Nvals.size,), array-like
:math:`B_{00}^{00}(k;N)` for all N values
"""
num_linkers = len(links)
if(np.array_equal(np.tile(links[0], num_linkers), links) is False):
raise ValueError('linker lengths must all be constant')
if Nvals is None:
Nvals = np.arange(1, num_linkers+1)
g = gprop_k_given_link(k, links[0], unwrap, **kwargs)
M = Mdict[unwrap]
B = M@g
Bprops = np.zeros_like(Nvals).astype('complex')
#matrix exponentiation is slow for large N. Switch to for loop
for i, N in enumerate(Nvals):
BN = np.linalg.matrix_power(B, N)
Bprops[i] = BN[0, 0]
print(f'k=({k}, link={links[0]}bp, unwrap={unwrap}bp')
return Bprops
def BRN_fourier_integrand_splines(links, unwrap, Nvals=None, rvals=None, Bprop=None):
"""Return normalized, real space Green's function G(R;Rmax) for kinked WLC using
spline integration.
Parameters
----------
kvals : (29999,) array-like
k values for which to compute G(k; N) in :math:`bp^{-1}`
links : (L,) array-like
bare linker lengths in bp (not including unwrapping), one per nuc in chain. Assumes length
of chain in len(links).
unwrap : float
unwrapping amount in bp. Assumes fixed unwrapping.
Nvals : (N,) array-like
number of linkers down the chain for which Bprop was calculated. Defaults to one
per monomer of the chain. (Nvals should correspond to columns of Bprop)
rvals : array-like
dimensionless chain length R/Rmax, where Rmax is cumulative length of linkers and R is the
length of the polymer
Bprop : (29999, N) array-like
:math:`B_{00}^{00}(k;N)` for each k (rows) and each chain length N (columns).
Defaults to propagator for chain with fixed linkers and fixed unwrapping and loads in corresponding
pickled file.
Returns
-------
greens : (rvals.size, Nvals.size), array-like
:math:`G(R;L)` for all specified chain lengths
"""
num_linkers = len(links)
if Bprop is None:
Bprop =(
pickle.load(open(f'csvs/Bprops/{unwrap}unwraps/{links[0]}link/B0_k_given_N_{links[0]}bplinkers_{unwrap}unwraps_{len(links)}nucs_{len(kvals)}Ks.p','rb')))
if Nvals is None:
Nvals = np.arange(1, num_linkers+1)
if (Nvals.size != Bprop.shape[1]):
raise ValueError('Nvals must correspond to columns of Bprop')
if rvals is None:
rvals = np.linspace(0.0, 1.0, 1000) #R/L
Rmax = convert.Rmax_from_links_unwraps(links, unwraps=unwrap)
inds = Nvals - 1
Rmax = Rmax[inds]
integral = np.zeros((rvals.size, Rmax.size))
for i, r in enumerate(rvals):
for j in range(len(Rmax)):
R = r*Rmax[j]
out = kvals**2 * special.spherical_jn(0, kvals*R) * Bprop[:, j].real
integral[i, j] = 1/(2*np.pi**2) * splint(min(kvals), max(kvals), splrep(kvals, out))
return integral
def plot_BKN(Ks, Bprop, links, unwrap, Nvals=None):
"""Plotting code for B(K;N) vs K where N is the number of nucleosomes in the chain.
Assumes Bprop's rows are K values and columns are N values."""
if Nvals is None:
Nvals = np.arange(1, len(links)+1)
fig, ax = plt.subplots()
ldna = convert.genomic_length_from_links_unwraps(links, unwraps=unwrap)
inds = Nvals - 1
Ls = ldna[inds]
for i in inds:
ax.loglog(Ks, Bprop[:, i].real)
plt.xlabel('K')
plt.ylabel('B(K;L)')
plt.title(f'$B_0^0(K;L)$ for {links[0]}bp linkers, {unwrap}bp unwrapping')
plt.legend([f'L={L:.0f}bp' for L in Ls])
plt.ylim([10**-12, 2])
return fig, ax
def plot_fourier_integrand(kvals, Bprop, R, N):
"""Plotting code for the actual function being integrated to obtain G(R;N)
Use this code and zoom in on ranges of k values to ensure K spacing is
accurant enough to capture periodicity in integrand."""
fig, ax = plt.subplots()
outs = kvals**2 * special.spherical_jn(0, kvals*R) * Bprop[:, N-1].real
ax.plot(kvals, outs, '-o')
def plot_greens(integral, links, unwrap, Nvals, rvals=None, rminN1=0.0, ax=None):
"""Plot G(R;Rmax) vs. dimensionless chain length r = R/Rmax, one curve
per Nval, where N is the chain length in number of nucleosomes.
Parameters
----------
integral : (rvals.size, Rmax.size) array-like
Green's function for chain with this set of linkers and unwrapping
links : (L,) array-like
bare linker length in bp (not including unwrapping)
unwraps : float
unwrapping amount in bp. Assumes fixed unwrapping.
Nvals : array-like
number of linkers down the chain for which you want to plot G(R;Rmax).
rminN1 : float
minimum r value from which the N=1 curve should be plotted. Defaults to 0.0.
Due to numerical issues, there tends to be noise for r values < 0.7. To avoid plotting this noise,
set rminN1=0.7 (or whatever value seems fitting for your particular chain).
"""
if rvals is None:
rvals = np.linspace(0.0, 1.0, 1000)
if ax is None:
fig, ax = plt.subplots()
#rows are rvals, columns are N vals
#integral = BRN_fourier_integrad_splines(kvals, links, unwrap)
ldna = convert.genomic_length_from_links_unwraps(links, unwraps=unwrap)
inds = Nvals - 1
Ls = ldna[inds]
for i in inds:
rmin = 0.0
if (i==0): #for N=1 case, don't plot noise -- can manually pass in the right cutoff
rmin = rminN1
rsub = rvals[(rvals >= rmin)]
intsub = integral[(rvals >= rmin), i]
ax.semilogy(rsub, intsub, '-o', markersize=2, linewidth=1)
plt.xlabel('$R/R_{max}$')
plt.ylabel('G(R;L)')
plt.legend([f'L={L:.0f}bp' for L in Ls], frameon=True)
plt.title(f'{links[0]}bp linkers, {unwrap} unwraps')
return fig, ax
def plot_greens_kinkedWLC_bareWLC(integral, qintegral, links, unwrap, Nvals, rvals=None, rminN1=0.0, qrminN1=0.0):
"""Plot G(R;Rmax) for kinked WLC and bare WLC with same Rmax vs. dimensionless chain length r = R/Rmax,
one curve per Nval, where N is the chain length in number of nucleosomes.
Parameters
----------
integral : (rvals.size, Rmax.size) array-like
Green's function for kinked WLC with this set of linkers and unwrapping
qintegral : (rvals.size, Rmax.size) array-like
Green's function for bare WLC with this set of linkers and unwrapping
links : (L,) array-like
bare linker length in bp (not including unwrapping)
unwraps : float
unwrapping amount in bp. Assumes fixed unwrapping.
Nvals : array-like
number of linkers down the chain for which you want to plot G(R;Rmax).
rminN1 : float
minimum r value from which the N=1 curve should be plotted for kinked WLC. Due to numerical
issues, there tends to be noise for r values < 0.7. To avoid plotting this noise,
set rminN1=0.7 (or whatever value seems fitting for your particular chain).
qrminN1 : float
minimum r value for which the N=1 curve should be plotted for bare WLC. e.g. qrminN1=0.7
Note: hard-coded rmin to be 0.4 for chains of length N=2 because there tends to be noise
for small r even for the N=2 case.
"""
if rvals is None:
rvals = np.linspace(0.0, 1.0, 1000)
fig, ax = plt.subplots()
ldna = ncg.genomic_length_from_links_unwraps(links, unwraps=unwrap)
inds = Nvals - 1
Ls = ldna[inds]
for ii, i in enumerate(inds):
color = np.random.rand(3)
rmin = 0.0
qrmin = 0.0
if (i==0): #for N=1 case, don't plot noise
rmin = rminN1
qrmin = qrminN1
if (i==1):
qrmin = 0.4
rsub = rvals[(rvals >= rmin)]
qrsub = rvals[(rvals >= qrmin)]
intsub = integral[(rvals >= rmin), i]
qintsub = qintegral[(rvals >= qrmin), i]
ax.semilogy(rsub, intsub, '-o', markersize=2, linewidth=1,
color=color, label=f'L={Ls[ii]:.0f}bp, kinked')
ax.semilogy(qrsub, qintsub, '--', color=color, label=f'L={Ls[ii]:.0f}bp, no kinks')
ax.legend(frameon=True)
plt.xlabel('$R/R_{max}$')
plt.ylabel('G(R;L)')
#plt.legend([f'L={L:.0f}bp' for L in Ls], frameon=True)
plt.title(f'{links[0]}bp linkers, {unwrap} unwraps')
return fig, ax
###}}}
###{{{
# looping stuff
def sarah_looping(N):
"""Looping probability of a bare WLC with a capture radius of a=0.1b
(1/10th Kuhn lengths), tabulated by Sarah. For values beyond the N that was
tabulated, Gaussian chain behavior is assumed.
To compare to the probabilities that we calculate in e.g. load_WLC_looping,
just put into our units,
>>> n, sarah_looping(n/b)/b**2
In [1]: m, intercept, rvalue, pvalue, stderr = scipy.stats.linregres
...: s(np.log10(sarah_loops.n[sarah_loops.n > 10]), np.log10(sara
...: h_loops.pLoop[sarah_loops.n > 10]))
In [2]: m
Out[2]: -1.47636997617001
In [3]: intercept
Out[3]: -2.981924639134709
"""
a = 0.1 # capture radius used by Sarah
n_max = ncd.sarah_looping.n.max()
N = np.atleast_1d(N)
# implicitly N<0.1 ==> pLoop = 1
out = np.ones_like(N)
int_i = (a < N) & (N < n_max)
out[int_i] = np.interp(N[int_i], ncd.sarah_looping.n, ncd.sarah_looping.pLoop)
# see doc for source of these
m = -1.47636997617001
intercept = -2.981924639134709
out[N >= n_max] = 10**(m * np.log10(N[N>=n_max]) + intercept)
return out
def load_WLC_looping(ns=None):
"""Compute or load existing values for the bare WLC looping probability,
in units of base pairs. Saved in files that refer to non-dimensionalized
units N=L/2*lp (WARNING: not calculated in non-dimensionalized units, so
they cannot be used unless put back into their own units).
The default N = [0.005n]*50, [0.02n]*100, [0.2n]*50, [10n]*100.
There is some overlap, but for values of n where the function is
numerically stable, they match at the overlap points nicely.
TODO: fix issue where small n are numerically unstable."""
default_gprops_dir = Path('csvs/gprops/straight')
gprops_re = re.compile('gprops_bare_([0-9]+\.?[0-9]*)n_([0-9]+)steps.pkl')
gprops = []
all_ns = []
if ns is not None:
# operates in "bases" and assumes an lp of 50nm, uses "linker" lengths
# instead of positions along polymer, so do appropriate conversions
gprops = [bareWLC_gprop(np.diff(ns)*100/ncg.dna_params['lpb'], unwrap=0)]
all_ns = [ns]
else:
for file in default_gprops_dir.glob('*.pkl'):
match = gprops_re.match(file.name)
if match is None:
continue
dn, nsteps = match.groups()
dn = float(dn)
nsteps = int(nsteps)
all_ns.append(np.cumsum(np.tile(dn, nsteps)))
gprops.append(pickle.load(open(file, 'rb')))
Ploops = []
for i, gprop in enumerate(gprops):
dns = np.insert(np.diff(all_ns[i]), 0, all_ns[i][0])
Ploops.append(BRN_fourier_integrand_splines(dns, unwrap=0,
rvals=np.array([0]), Bprop=gprop).ravel())
n, Ploop = np.concatenate(all_ns), np.concatenate(Ploops)
i = np.argsort(n)
return 2*default_lp*n[i], Ploop[i]
def fit_persistance_length_to_gaussian_looping_prob(integral, links, unwrap, Nvals=None, Nmin=40):
"""Fit effective persistance length to log-log looping probability vs. chain length (Rmax).
Nmin is the minimum number of nucleosomes to begin powerlaw fitting to Gaussian chain.
"""
if Nvals is None:
Nvals = np.arange(1, len(links)+1)
ploops = integral[0, :]
Rmax = convert.Rmax_from_links_unwraps(links, unwraps=unwrap)
#extract out chain lengths that correspond to Nvals >= Nmin
inds = Nvals - 1
inds = inds[Nvals >= Nmin]
#Gaussian chain limit in log-log space
ploop_gaussian = np.log(ploops[Nvals >= Nmin])
Rmax_gaussian = np.log(Rmax[inds])
m, intercept, rvalue, pvalue, stderr = stats.linregress(Rmax_gaussian, ploop_gaussian)
print(f'Power law: N^{m}')
#For Guassian chain, the intercept = (3/2)log(3/(4pi*lp)) -- see Deepti's notes
lp = 3 / (4*np.pi*np.exp(intercept/np.abs(m)))
lp = lp * ncg.dna_params['lpb']
return m, lp
def tabulate_bareWLC_propagators(Ks):
m = 0
props = []
for i, K in enumerate(Ks):
props.append(propagator.propagator(i, K, m))
return props
def bareWLC_gprop(links, unwrap, Nvals=None, props=None, **kwargs):
"""Calculate G(K;N) for a bare WLC with the same Rmax as a kinked WLC with
given linker lengths and unwrapping amount at chain lengths dictated by
Nvals (# of nucleosomes).
Parameters
----------
kvals : (29999,) array-like
k values for which to compute G(k; N) in :math:`bp^{-1}`
links : (L,) array-like
bare linker lengths in bp (not including unwrapping), one per nuc in chain. Assumes length
of chain in len(links).
unwrap : float
unwrapping amount in bp. Assumes fixed unwrapping.
Nvals : array-like
number of linkers down the chain for which you want the propagator. Defaults to one propagator
per monomer of the chain.
props : (29999,) array-like
list of objects of class 'MultiPoint.propagator', one for each k value.
Returns
-------
gprops : (kvals.size, Nvals.size), array-like
:math:`G_{00}^{0}(k;N)` for all N values
"""
num_linkers = len(links)
#here, Nvals refers to number of nucleosomes; need to convert to units of Rmax / 2lp
if Nvals is None:
Nvals = np.arange(1, num_linkers+1)
inds = Nvals - 1
#in case we want to create WLCs with different persistance lengths
lp = kwargs['lp'] if 'lp' in kwargs else default_lp
Rmax = convert.Rmax_from_links_unwraps(links, unwraps=unwrap) #max WLC chain length in bp
#select out chain lengths for which propgator needs to be calculated
Rmax = Rmax[inds]
#here, Ns refere to number of WLC kuhn lengths
Ns = Rmax / (2*lp)
#convert to big K units in Quinn's code
Ks = (2*lp) * kvals
#Calculate bare WLC propagators, one for each value of K
if props is None:
props_file = Path('csvs/quinn_props_default_kvals.pkl')
if props_file.exists():
props = pickle.load(open(props_file, 'rb'))
else:
props = tabulate_bareWLC_propagators(Ks)
pickle.dump(props, open(props_file, 'wb'))
#only care about G000 for real space green's function
l0 = 0
l = 0
#same shape as my Bprops for comparison
gprops = np.zeros((Ks.size, Ns.size)).astype('complex')
for i in range(len(Ks)):
#only calculate l=0, m=0, j=0 case
#by default, nlam=10 (this is analogous to l0max, but we only want l0=0 anyway)
for j, N in enumerate(Ns):
gprops[i, j] = props[i].get_G(N, l0, l)
return gprops
def prob_R_given_L(integral, rvals, links, unwrap, Nvals=None):
"""Calculate probability that polymer is of length R (as opposed to R vector).
Integrate :math:`P(R;L) = 4\pi dR R^2 G(R;L)` from R=0 to R=Rmax and
check that the answer is 1 for all chain lengths.
Assumes integral is (rvals.size, Rmax.size)."""
if Nvals is None:
Nvals = np.arange(1, len(links)+1)
inds = Nvals - 1
#R = r * Rmax
Rmax = convert.Rmax_from_links_unwraps(links, unwraps=unwrap)
PRN = np.zeros_like(Nvals).astype('float')
for i in inds:
Rvals = rvals * Rmax[i]
y = (4*np.pi) * Rvals**2 * integral[:, i]
PRN[i] = splint(0.0, Rmax[i], splrep(Rvals, y))
print(f'P for {Nvals[i]} nucleosomes = {PRN[i]}')
return PRN #should be 1 for all N values
def prob_R_in_radius_a_given_L(a, integral, rvals, links, unwrap, Nvals=None):
"""Calculate probability that 2 ends of of polymer fall within some contact radius a.
Integrate :math:`P(R;L) = 4\pi dR R^2 G(R;L)` from R=0 to R=a, where a is in bp.
Note: In Sarah's paper, a = L / (2lp) = 0.1, so multiply by 0.1 * 2*lp = 30 bp
Assumes integral is (rvals.size, Rmax.size)."""
if Nvals is None:
Nvals = np.arange(1, len(links)+1)
inds = Nvals - 1
Rmax = convert.Rmax_from_links_unwraps(links, unwraps=unwrap)
PRN = np.zeros_like(Nvals).astype('float')
for i in inds:
Rvals = rvals * Rmax[i]
y = (4*np.pi) * Rvals**2 * integral[:, i]
PRN[i] = splint(0.0, a, splrep(Rvals, y))
#1 probability per chain length
return PRN
def BRN_fourier_integrand(k, R, N, link, unwrap):
"""Fourier integrand for kinked WLC green's function to be passed to quad."""
B0 = Bprop_k_given_L_fixed_linkers_fixed_unwrap(k, N, link, unwrap)
return k**2 * special.spherical_jn(0, k*R) * B0.real
def greens_R_given_L_fixed_linkers_fixed_unwraps(r, links, unwrap, kmin=0, kmax=300):
"""Return the unnormalized, real-space Green's function G(R; L) using scipy's
quad integration. NOTE: This is too slow, so don't ever run this code."""
num_linkers = len(links)
if(np.array_equal(np.tile(links[0], num_linkers), links) is False):
raise ValueError('linker lengths must all be constant')
#total length of chain in genomic distance; len(Ldna) = number of linkers/nucleosomes
Ldna = ncg.genomic_length_from_links_unwraps(links, unwraps=unwrap)
integral = np.zeros_like(Ldna).astype('float')
#errs = np.zeros_like(integral)
#for i, r in enumerate(rvals):
for j, L in enumerate(Ldna):
N = j+1 #number of nucleosomes, aka Nth propagator
R = r*L
sol, err = quad(BRN_fourier_integrand, kmin, kmax, args=(R, N, links[0], unwrap))
print(f'Computed integral for r={r}, L={L}')
integral[j] = sol
#errs[j] = err
return integral
###}}}
###{{{
# TESTING FUNCTIONS
def test_lmax_convergence(K, N, l, l0, j, l0max=20, tol=10**-8, **kwargs):
"""Determine the maximum l value needed for G(K;N) to converge."""
# let lmax range from 50 to 500
lmax = l0max + 30
ans = get_G(K, N, l, l0, j, l0max, lmax, **kwargs)
prev = ans - 10
while(np.abs(ans - prev) > tol):
# keep increasing lmax by 10 until answers are within specified tolerance
lmax += 10
prev = ans
ans = get_G(K, N, l, l0, j, l0max, lmax, **kwargs)
print(f'G(K;N) for l={l}, l0={l0}, j={j} converged within tol={tol} at lmax={lmax}')
return lmax, ans
def save_greens_fixed_links_fixed_unwraps(links, unwrap=0, num_nucs=50, Kprops_bareWLC=None):
"""To process many chains with fixed linker lengths, load in the saved Bprops, do the Fourier
inversion, and save the output so we can quickly look at them later."""
Klin = np.linspace(0, 10**5, 20000)
Klog = np.logspace(-3, 5, 10000)
Kvals = np.unique(np.concatenate((Klin, Klog)))
#convert to little k -- units of inverse bp (this results in kmax = 332)
kvals = Kvals / (2*wlc.default_lp)
if Kprops_bareWLC is None:
Kprops_bareWLC = wlc.tabulate_bareWLC_propagators(Kvals)
print('Tabulated K propagators for bare WLC!')
for link in links:
linkers = np.tile(link, num_nucs)
#for each chain, do fourier inversion integral and save output as .npy file in Bprops directory
Bprop = np.load(f'csvs/Bprops/{unwrap}unwraps/{link}link/B0_k_given_N_{link}bplinkers_{unwrap}unwraps_50nucs_30000Ks.npy')
rvals = np.linspace(0.0, 1.0, 1000)
qprop = wlc.bareWLC_gprop(kvals, linkers, unwrap, props=Kprops_bareWLC) #default: 1000 rvals
integral = wlc.BRN_fourier_integrand_splines(kvals, linkers, unwrap, Bprop=Bprop, rvals=rvals) #default: 1000 rvals
qintegral = wlc.BRN_fourier_integrand_splines(kvals, linkers, unwrap, Bprop=qprop, rvals=rvals) #default: 1000 rvals
#integral takes ~10 min to run, so prob worth saving
np.save(f'csvs/Bprops/{unwrap}unwraps/{link}link/kinkedWLC_greens_{link}link_{unwrap}unwraps_{len(rvals)}rvals_50nucs.npy', integral, allow_pickle=False)
np.save(f'csvs/Bprops/{unwrap}unwraps/{link}link/bareWLC_greens_{link}link_{unwrap}unwraps_{len(rvals)}rvals_50nucs.npy', qintegral, allow_pickle=False)
print(f'Saved G(R;L) for {link}link, {unwrap} unwrap!')
def save_greens_hetero_links_plus_one(links, unwrap=0, Kprops_bareWLC=None):
"""Load in saved Bprop for a heterogenous chain with 2 random linker lengths, link and link+1,
and calculate/save Green's function."""
if Kprops_bareWLC is None:
Kprops_bareWLC = wlc.tabulate_bareWLC_propagators(Kvals)
print('Tabulated K propagators for bare WLC!')
link = min(links)
#for each chain, do fourier inversion integral and save output as .npy file in Bprops directory
Bprop = np.load(f'csvs/Bprops/{unwrap}unwraps/heterogenous/B0_k_given_N_links{link}or{link+1}_50nucs_30000Ks.npy')
rvals = np.linspace(0.0, 1.0, 1000)
qprop = wlc.bareWLC_gprop(kvals, links, unwrap, props=Kprops_bareWLC) #default: 1000 rvals
integral = wlc.BRN_fourier_integrand_splines(kvals, links, unwrap, Bprop=Bprop, rvals=rvals) #default: 1000 rvals
qintegral = wlc.BRN_fourier_integrand_splines(kvals, links, unwrap, Bprop=qprop, rvals=rvals) #default: 1000 rvals
#integral takes ~10 min to run, so prob worth saving
np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/kinkedWLC_greens_links{link}or{link+1}_{len(rvals)}rvals_50nucs.npy', integral, allow_pickle=False)
np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/bareWLC_greens_links{link}or{link+1}_{len(rvals)}rvals_50nucs.npy', qintegral, allow_pickle=False)
print(f'Saved G(R;L) for {link} or {link+1} link, {unwrap} unwrap!')
# def compare_Quinn_Deepti_props(Ks, Ns, l, l0, j):
# props = []
# for i,K in enumerate(Ks):
# props.append(propagator.propagator(i, K, 1))
# for N in Ns:
# lmax, my_ans = wlc.test_lmax_convergence(K, N, l, l0, j)
# quinn_ans = props[i].get_G(N, l0, l)
# print(f'G({K};{N}): Quinn -- {quinn_ans}, Deepti -- {my_ans}')
# return props
###}}}
# from multiprocessing import Pool
# %time
# if __name__ == '__main__':
# #this is the same-ish range that Quinn used
# Klin = np.linspace(0, 10**5, 20000)
# Klog = np.logspace(-3, 5, 10000)
# Kvals = np.unique(np.concatenate((Klin, Klog)))
# #convert to little k -- units of inverse bp (this results in kmax = 332)
# kvals = Kvals / (2*wlc.default_lp)
# with Pool(31) as pool:
# #returns a list of 30,000 441 by 441 matrices; don't need to pickle
# #since function already saves them in csv's
# gprops = pool.map(partial(wlc.gprop_k_given_link, link=26, unwrap=0), kvals)
# with Pool(31) as pool:
# gprops = pool.map(partial(wlc.gprop_k_given_link, link=30, unwrap=0), kvals)
# with Pool(31) as pool:
# gprops = pool.map(partial(wlc.gprop_k_given_link, link=245, unwrap=0), kvals)
# if __name__ == '__main__':
# #this is the same-ish range that Quinn used
# Kmax = 10**5
# kmax = Kmax / (2*wlc.default_lp)
# #suppose chain consists of 50, 36bp linkers -- this is about the length at which the R^2 plot levels out
# links = np.tile(36, 50)
# rvals = np.linspace(0.0, 1.0, 100) # R/L
# with Pool(32) as pool:
# #returns a list of np arrays, each array is B000(k;N) for each of the Nvals
# integral = pool.map(partial(wlc.greens_R_given_L_fixed_linkers_fixed_unwraps, links=links, unwrap=0, kmax=kmax), rvals)
# #integral should now be a matrix where rows are r values and columns are N (or L) values
# integral = np.array(integral)
# pickle.dump(integral, open(f'G_R_given_L_36bplinkers_0unwraps_50nucs.p', 'wb'))
#GROW CHAIN FROM 400 TO 500 nucs
# %%time
# from multiprocessing import Pool
# if __name__ == '__main__':
# #this is the same-ish range that Quinn used
# Klin = np.linspace(0, 10**5, 20000)
# Klog = np.logspace(-3, 5, 10000)
# Kvals = np.unique(np.concatenate((Klin, Klog)))
# #convert to little k -- units of inverse bp (this results in kmax = 332)
# kvals = Kvals / (2*wlc.default_lp)
# Nvals = np.arange(400, 501, 2)
# #suppose chain consists of 50, 36bp linkers -- this is about the length at which the R^2 plot levels out
# links = np.tile(50, 500)
# with Pool(31) as pool:
# #returns a list of np arrays, each array is B000(k;N) for each of the Nvals
# bprops50_400to500nucs = pool.map(partial(wlc.Bprop_k_fixed_linkers_fixed_unwrap, links=links, unwrap=0, Nvals=Nvals), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values
# bprops50_400to500nucs = np.array(bprops50_400to500nucs)
# np.save(f'csvs/Bprops/0unwraps/50link/B0_k_given_N_50bplinkers_0unwraps_400to500nucs_30000Ks.npy', bprops50_400to500nucs, allow_pickle=False)
# links = np.tile(42, 500)
# with Pool(31) as pool:
# #returns a list of np arrays, each array is B000(k;N) for each of the Nvals
# bprops42_400to500nucs = pool.map(partial(wlc.Bprop_k_fixed_linkers_fixed_unwrap, links=links, unwrap=0, Nvals=Nvals), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values
# bprops42_400to500nucs = np.array(bprops42_400to500nucs)
# np.save(f'csvs/Bprops/0unwraps/42link/B0_k_given_N_42bplinkers_0unwraps_400to500nucs_30000Ks.npy', bprops42_400to500nucs, allow_pickle=False)
# links = np.tile(36, 500)
# with Pool(31) as pool:
# #returns a list of np arrays, each array is B000(k;N) for each of the Nvals
# bprops36_400to500nucs = pool.map(partial(wlc.Bprop_k_fixed_linkers_fixed_unwrap, links=links, unwrap=0, Nvals=Nvals), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values
# bprops36_400to500nucs = np.array(bprops36_400to500nucs)
# np.save(f'csvs/Bprops/0unwraps/36link/B0_k_given_N_36bplinkers_0unwraps_400to500nucs_30000Ks.npy', bprops36_400to500nucs, allow_pickle=False)
#HOMOGENOUS CHAINS
# %%time
# from multiprocessing import Pool
# if __name__ == '__main__':
# #this is the same-ish range that Quinn used
# Klin = np.linspace(0, 10**5, 20000)
# Klog = np.logspace(-3, 5, 10000)
# Kvals = np.unique(np.concatenate((Klin, Klog)))
# #convert to little k -- units of inverse bp (this results in kmax = 332)
# kvals = Kvals / (2*wlc.default_lp)
# #suppose chain consists of 50, 36bp linkers -- this is about the length at which the R^2 plot levels out
# links = np.tile(38, 50)
# with Pool(31) as pool:
# #returns a list of np arrays, each array is B000(k;N) for each of the Nvals
# bprops38 = pool.map(partial(wlc.Bprop_k_fixed_linkers_fixed_unwrap, links=links, unwrap=20), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values
# bprops38 = np.array(bprops38)
# np.save(f'csvs/Bprops/20unwraps/38link/B0_k_given_N_38bplinkers_20unwraps_50nucs_30000Ks.npy', bprops38, allow_pickle=False)
# %%time
# from multiprocessing import Pool
# if __name__ == '__main__':
# #this is the same-ish range that Quinn used
# Klin = np.linspace(0, 10**5, 20000)
# Klog = np.logspace(-3, 5, 10000)
# Kvals = np.unique(np.concatenate((Klin, Klog)))
# #convert to little k -- units of inverse bp (this results in kmax = 332)
# kvals = Kvals / (2*wlc.default_lp)
# rvals = np.linspace(0.0, 1.0, 1000)
# Kprops_bareWLC = wlc.tabulate_bareWLC_propagators(Kvals)
# print('Tabulated K propagators for bare WLC!')
# unwrap = 0
# ### Sample heterogenous linkers from an entire period
# links36to47 = np.random.randint(36, 48, 50)
# with Pool(31) as pool:
# #returns a list of B000(k;N=1) for each of the kvals
# bprops36to47 = pool.map(partial(wlc.Bprop_k_given_L, links=links36to47, unwraps=0), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values (50 of them)
# bprops36to47 = np.array(bprops36to47)
# np.save(f'csvs/Bprops/0unwraps/heterogenous/B0_k_given_N_links36to47_50nucs_30000Ks.npy', bprops36to47, allow_pickle=False)
# np.save(f'csvs/Bprops/0unwraps/heterogenous/linker_lengths_36to47_50nucs.npy', links36to47, allow_pickle=False)
# qprop36to47 = wlc.bareWLC_gprop(kvals, links36to47, unwrap, props=Kprops_bareWLC)
# integral36to47 = wlc.BRN_fourier_integrand_splines(kvals, links36to47, unwrap, Bprop=bprops36to47, rvals=rvals) #default: 1000 rvals
# qintegral36to47 = wlc.BRN_fourier_integrand_splines(kvals, links36to47, unwrap, Bprop=qprop36to47, rvals=rvals) #default: 1000 rvals
# #integral takes ~10 min to run, so prob worth saving
# np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/kinkedWLC_greens_links36to47_{len(rvals)}rvals_50nucs.npy', integral36to47, allow_pickle=False)
# np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/bareWLC_greens_links36to47_{len(rvals)}rvals_50nucs.npy', qintegral36to47, allow_pickle=False)
# print(f'Saved G(R;L) for 36to47 links, {unwrap} unwrap!')
# #and do it again!
# links36to47_r2 = np.random.randint(36, 48, 50)
# with Pool(31) as pool:
# #returns a list of B000(k;N=1) for each of the kvals
# bprops36to47 = pool.map(partial(wlc.Bprop_k_given_L, links=links36to47_r2, unwraps=0), kvals)
# #bprops should now be a matrix where rows are k values and columns are N values (50 of them)
# bprops36to47_r2 = np.array(bprops36to47_r2)
# np.save(f'csvs/Bprops/0unwraps/heterogenous/B0_k_given_N_links36to47_r2_50nucs_30000Ks.npy', bprops36to47_r2, allow_pickle=False)
# np.save(f'csvs/Bprops/0unwraps/heterogenous/linker_lengths_36to47_r2_50nucs.npy', links36to47_r2, allow_pickle=False)
# qprop36to47_r2 = wlc.bareWLC_gprop(kvals, links36to47_r2, unwrap, props=Kprops_bareWLC)
# integral36to47_r2 = wlc.BRN_fourier_integrand_splines(kvals, links36to47_r2, unwrap, Bprop=bprops36to47_r2, rvals=rvals) #default: 1000 rvals
# qintegral36to47_r2 = wlc.BRN_fourier_integrand_splines(kvals, links36to47_r2, unwrap, Bprop=qprop36to47_r2, rvals=rvals) #default: 1000 rvals
# #integral takes ~10 min to run, so prob worth saving
# np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/kinkedWLC_greens_links36to47_r2_{len(rvals)}rvals_50nucs.npy', integral36to47_r2, allow_pickle=False)
# np.save(f'csvs/Bprops/{unwrap}unwraps/heterogenous/bareWLC_greens_links36to47_r2_{len(rvals)}rvals_50nucs.npy', qintegral36to47_r2, allow_pickle=False)
# print(f'Saved G(R;L) for 36to47 links, {unwrap} unwrap round 2!')
|
Python
|
CL
|
ae5a6d93f59a732751230e316016b1f1f26aec92c0591e4f90dbeefc61d1f7c9
|
# Class adapted from:
# https://github.com/TheMrGhostman/InceptionTime-Pytorch
# and
# https://github.com/okrasolar/pytorch-timeseries
import numpy as np
from typing import cast, Union, List
import torch
from torch import nn
from DeepLearning.deep_learning_utils import Conv1dSamePadding
class ShortNetwork(nn.Module):
"""A PyTorch implementation of the InceptionTime model.
From https://arxiv.org/abs/1909.04939
Attributes
----------
num_blocks:
The number of inception blocks to use. One inception block consists
of 3 convolutional layers, (optionally) a bottleneck and (optionally) a residual
connector
in_channels:
The number of input channels (i.e. input.shape[-1])
number_channel_out_conv:
The number of "hidden channels" to use. Can be a list (for each block) or an
int, in which case the same value will be applied to each block
bottleneck_channels:
The number of channels to use for the bottleneck. Can be list or int. If 0, no
bottleneck is applied
kernel_sizes:
The size of the kernels to use for each inception block. Within each block, each
of the 3 convolutional layers will have kernel size
`[kernel_size // (2 ** i) for i in range(3)]`
num_pred_classes:
The number of output classes
"""
def __init__(self, num_blocks: int, in_channels: int, number_channel_out_conv: Union[List[int], int],
bottleneck_channels: Union[List[int], int], kernel_sizes: Union[List[int], int],
use_residuals: Union[List[bool], bool, str] = 'default',
num_pred_classes: int = 2, activation_function=nn.SiLU
) -> None:
super().__init__()
# for easier saving and loading
self._input_args = {
'num_blocks': num_blocks,
'in_channels': in_channels,
'out_channels': number_channel_out_conv,
'bottleneck_channels': bottleneck_channels,
'kernel_sizes': kernel_sizes,
'use_residuals': use_residuals,
'num_pred_classes': num_pred_classes
}
bottleneck_channels = cast(List[int], self._expand_to_blocks(bottleneck_channels,
num_blocks))
kernel_sizes = cast(List[int], self._expand_to_blocks(kernel_sizes, num_blocks))
if use_residuals == 'default':
use_residuals = [True if i % 3 == 2 else False for i in range(num_blocks)]
use_residuals = cast(List[bool], self._expand_to_blocks(
cast(Union[bool, List[bool]], use_residuals), num_blocks)
)
blocks = []
for i in range(num_blocks):
if i == 0:
blocks.append(InceptionBlock(in_channels=in_channels, out_channels=number_channel_out_conv,
residual=use_residuals[i], bottleneck_channels=bottleneck_channels[i],
kernel_size=kernel_sizes[i], activation_function=activation_function))
else:
blocks.append(
InceptionBlock(in_channels=number_channel_out_conv * 4, out_channels=number_channel_out_conv,
residual=use_residuals[i], bottleneck_channels=bottleneck_channels[i],
kernel_size=kernel_sizes[i], activation_function=activation_function))
self._blocks = nn.Sequential(*blocks)
self._linear = nn.Linear(in_features=number_channel_out_conv * 4, out_features=num_pred_classes)
print(self)
print("Number Parameters: ", self.get_n_params())
def get_n_params(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
number_params = sum([np.prod(p.size()) for p in model_parameters])
return number_params
def forward(self, input: torch.Tensor, get_features=False) -> torch.Tensor: # type: ignore
x = input
x = torch.swapaxes(x, 2, 1)
x = self._blocks(x).mean(dim=-1) # the mean is the global average pooling
if get_features:
return x
else:
return self._linear(x)
@staticmethod
def _expand_to_blocks(value: Union[int, bool, List[int], List[bool]],
num_blocks: int) -> Union[List[int], List[bool]]:
if isinstance(value, list):
assert len(value) == num_blocks, \
f'Length of inputs lists must be the same as num blocks, ' \
f'expected length {num_blocks}, got {len(value)}'
else:
value = [value] * num_blocks
return value
class InceptionBlock(nn.Module):
"""An inception block consists of an (optional) bottleneck, followed
by 3 conv1d layers. Optionally residual
"""
def __init__(self, in_channels: int, out_channels: int,
residual: bool, activation_function, stride: int = 1, bottleneck_channels: int = 32,
kernel_size: int = 41) -> None:
super().__init__()
self._use_bottleneck = bottleneck_channels > 0
if self._use_bottleneck:
self._bottleneck = Conv1dSamePadding(in_channels, bottleneck_channels,
kernel_size=1, bias=False)
kernel_size_s = [kernel_size // (2 ** i) for i in range(3)]
start_channels = bottleneck_channels if self._use_bottleneck else in_channels
self._conv1 = Conv1dSamePadding(in_channels=start_channels, out_channels=out_channels,
kernel_size=kernel_size_s[0], stride=stride, bias=False)
self._conv2 = Conv1dSamePadding(in_channels=start_channels, out_channels=out_channels,
kernel_size=kernel_size_s[1] + 1, stride=stride, bias=False)
self._conv3 = Conv1dSamePadding(in_channels=start_channels, out_channels=out_channels,
kernel_size=kernel_size_s[2] + 1, stride=stride, bias=False)
self._maxpool = nn.MaxPool1d(kernel_size=3, stride=1, padding=1)
self._conv_from_maxpool = Conv1dSamePadding(in_channels=start_channels, out_channels=out_channels,
kernel_size=1, stride=1, bias=False)
self._activation = activation_function()
self._batch_norm = nn.BatchNorm1d(num_features=out_channels * 4)
self._use_residual = residual
if residual:
self._residual = nn.Sequential(*[
Conv1dSamePadding(in_channels=in_channels, out_channels=out_channels * 4,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(out_channels * 4),
activation_function()
])
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
original_x = x
if self._use_bottleneck:
x = self._bottleneck(x)
z_maxpool = self._maxpool(x)
z1 = self._conv1(x)
z2 = self._conv2(x)
z3 = self._conv3(x)
z4 = self._conv_from_maxpool(z_maxpool)
z_concatenated = torch.cat([z1, z2, z3, z4], 1)
z_concatenated = self._activation(self._batch_norm(z_concatenated))
if self._use_residual:
z_concatenated = z_concatenated + self._residual(original_x)
return z_concatenated
|
Python
|
CL
|
ff6fcfa2312245300dc4df93ddaea78752669a387e56075963a59b681fcd3ce1
|
import torch
import torch.nn as nn
from collections import OrderedDict
# import tensorboardX as tbx
from package.model.vgg import vgg16, vgg16_bn
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.view(x.size(0), -1)
return x
class L2Normalization(nn.Module):
def __init__(self):
super(L2Normalization, self).__init__()
def forward(self, x):
div = torch.sqrt(torch.sum(x * x,1))
x = (x.T / (div + 1e-10)).T
return x
class D3Shape(nn.Module):
"""
We use vgg16 for the network.
"""
def __init__(self, from_pretrain=True, batch_normalization=True, debug=False):
super(D3Shape, self).__init__()
self.debug = debug
_vgg16 = vgg16_bn if batch_normalization else vgg16
# two structure-identical networks that share no parameters
self.features_sk = _vgg16(pretrained=from_pretrain, return_type=1)
self.features_imsk = _vgg16(pretrained=from_pretrain, return_type=1)
def forward(self, sk=None, imsk=None):
rets = []
if sk is not None:
rets.append(self.features_sk(sk) if not self.debug else torch.zeros(sk.size(0), 10))
if imsk is not None:
rets.append(self.features_imsk(imsk) if not self.debug else torch.zeros(sk.size(0), 10))
return rets
if __name__=='__main__':
pass
|
Python
|
CL
|
70d9e66571fc9558945b7d35e33d51c3db5e90b1ae6a75a9814923068212360a
|
# PySNMP SMI module. Autogenerated from smidump -f python OSPF-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:58:06 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( InterfaceIndexOrZero, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Counter32, Gauge32, Integer32, Integer32, IpAddress, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Unsigned32, mib_2, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "Gauge32", "Integer32", "Integer32", "IpAddress", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Unsigned32", "mib-2")
( RowStatus, TextualConvention, TimeStamp, TruthValue, ) = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "TimeStamp", "TruthValue")
# Types
class BigMetric(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,16777215)
class DesignatedRouterPriority(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,255)
class HelloRange(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(1,65535)
class Metric(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,65535)
class OspfAuthenticationType(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(0,1,2,)
namedValues = NamedValues(("none", 0), ("simplePassword", 1), ("md5", 2), )
class PositiveInteger(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,2147483647)
class Status(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(2,1,)
namedValues = NamedValues(("enabled", 1), ("disabled", 2), )
class TOSType(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,30)
class UpToMaxAge(TextualConvention, Integer32):
displayHint = "d-0"
subtypeSpec = Integer32.subtypeSpec+ValueRangeConstraint(0,3600)
class AreaID(IpAddress):
pass
class RouterID(IpAddress):
pass
# Objects
ospf = ModuleIdentity((1, 3, 6, 1, 2, 1, 14)).setRevisions(("2006-11-10 00:00","1995-01-20 12:25",))
if mibBuilder.loadTexts: ospf.setOrganization("IETF OSPF Working Group")
if mibBuilder.loadTexts: ospf.setContactInfo("WG E-Mail: ospf@ietf.org\n\nWG Chairs: acee@cisco.com\n rohit@gmail.com\n\nEditors: Dan Joyal\n Nortel\n 600 Technology Park Drive\n Billerica, MA 01821\n djoyal@nortel.com\n\n Piotr Galecki\n Airvana\n 19 Alpha Road\n Chelmsford, MA 01824\n pgalecki@airvana.com\n\n Spencer Giacalone\n CSFB\n Eleven Madison Ave\n New York, NY 10010-3629\n spencer.giacalone@gmail.com")
if mibBuilder.loadTexts: ospf.setDescription("The MIB module to describe the OSPF Version 2\nProtocol. Note that some objects in this MIB\nmodule may pose a significant security risk.\nRefer to the Security Considerations section\nin RFC 4750 for more information.\n\n\n\nCopyright (C) The IETF Trust (2006).\nThis version of this MIB module is part of\nRFC 4750; see the RFC itself for full legal\nnotices.")
ospfGeneralGroup = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 1))
ospfRouterId = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 1), RouterID()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfRouterId.setDescription("A 32-bit integer uniquely identifying the\nrouter in the Autonomous System.\nBy convention, to ensure uniqueness, this\nshould default to the value of one of the\nrouter's IP interface addresses.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile storage.")
ospfAdminStat = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 2), Status()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfAdminStat.setDescription("The administrative status of OSPF in the\nrouter. The value 'enabled' denotes that the\nOSPF Process is active on at least one interface;\n'disabled' disables it on all interfaces.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile storage.")
ospfVersionNumber = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 3), Integer().subtype(subtypeSpec=SingleValueConstraint(2,)).subtype(namedValues=NamedValues(("version2", 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVersionNumber.setDescription("The current version number of the OSPF protocol is 2.")
ospfAreaBdrRtrStatus = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaBdrRtrStatus.setDescription("A flag to note whether this router is an Area\nBorder Router.")
ospfASBdrRtrStatus = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 5), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfASBdrRtrStatus.setDescription("A flag to note whether this router is configured as\nan Autonomous System Border Router.\n\nThis object is persistent and when written the\nentity SHOULD save the change to non-volatile storage.")
ospfExternLsaCount = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExternLsaCount.setDescription("The number of external (LS type-5) link state\nadvertisements in the link state database.")
ospfExternLsaCksumSum = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExternLsaCksumSum.setDescription("The 32-bit sum of the LS checksums of\nthe external link state advertisements\ncontained in the link state database. This sum\ncan be used to determine if there has been a\nchange in a router's link state database and\nto compare the link state database of two\nrouters. The value should be treated as unsigned\nwhen comparing two sums of checksums.")
ospfTOSSupport = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 8), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfTOSSupport.setDescription("The router's support for type-of-service routing.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfOriginateNewLsas = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfOriginateNewLsas.setDescription("The number of new link state advertisements\nthat have been originated. This number is\nincremented each time the router originates a new\nLSA.\n\nDiscontinuities in the value of this counter can\noccur at re-initialization of the management system,\nand at other times as indicated by the value of\nospfDiscontinuityTime.")
ospfRxNewLsas = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfRxNewLsas.setDescription("The number of link state advertisements received\nthat are determined to be new instantiations.\nThis number does not include newer instantiations\nof self-originated link state advertisements.\n\nDiscontinuities in the value of this counter can\noccur at re-initialization of the management system,\nand at other times as indicated by the value of\nospfDiscontinuityTime.")
ospfExtLsdbLimit = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfExtLsdbLimit.setDescription("The maximum number of non-default\nAS-external LSAs entries that can be stored in the\nlink state database. If the value is -1, then\nthere is no limit.\n\nWhen the number of non-default AS-external LSAs\nin a router's link state database reaches\nospfExtLsdbLimit, the router enters\noverflow state. The router never holds more than\nospfExtLsdbLimit non-default AS-external LSAs\nin its database. OspfExtLsdbLimit MUST be set\nidentically in all routers attached to the OSPF\nbackbone and/or any regular OSPF area (i.e.,\nOSPF stub areas and NSSAs are excluded).\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfMulticastExtensions = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 12), Integer32().clone(0)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfMulticastExtensions.setDescription("A bit mask indicating whether the router is\nforwarding IP multicast (Class D) datagrams\nbased on the algorithms defined in the\nmulticast extensions to OSPF.\n\nBit 0, if set, indicates that the router can\n\n\n\nforward IP multicast datagrams in the router's\ndirectly attached areas (called intra-area\nmulticast routing).\n\nBit 1, if set, indicates that the router can\nforward IP multicast datagrams between OSPF\nareas (called inter-area multicast routing).\n\nBit 2, if set, indicates that the router can\nforward IP multicast datagrams between\nAutonomous Systems (called inter-AS multicast\nrouting).\n\nOnly certain combinations of bit settings are\nallowed, namely: 0 (no multicast forwarding is\nenabled), 1 (intra-area multicasting only), 3\n(intra-area and inter-area multicasting), 5\n(intra-area and inter-AS multicasting), and 7\n(multicasting everywhere). By default, no\nmulticast forwarding is enabled.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfExitOverflowInterval = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 13), PositiveInteger().clone('0')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfExitOverflowInterval.setDescription("The number of seconds that, after entering\nOverflowState, a router will attempt to leave\nOverflowState. This allows the router to again\noriginate non-default AS-external LSAs. When\nset to 0, the router will not leave\noverflow state until restarted.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfDemandExtensions = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 14), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfDemandExtensions.setDescription("The router's support for demand routing.\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfRFC1583Compatibility = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 15), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfRFC1583Compatibility.setDescription("Indicates metrics used to choose among multiple\nAS-external LSAs. When RFC1583Compatibility is set to\nenabled, only cost will be used when choosing among\nmultiple AS-external LSAs advertising the same\ndestination. When RFC1583Compatibility is set to\ndisabled, preference will be driven first by type of\npath using cost only to break ties.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfOpaqueLsaSupport = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfOpaqueLsaSupport.setDescription("The router's support for Opaque LSA types.")
ospfReferenceBandwidth = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 17), Unsigned32()).setMaxAccess("readwrite").setUnits("kilobits per second")
if mibBuilder.loadTexts: ospfReferenceBandwidth.setDescription("Reference bandwidth in kilobits/second for\n\n\n\ncalculating default interface metrics. The\ndefault value is 100,000 KBPS (100 MBPS).\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfRestartSupport = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 18), Integer().subtype(subtypeSpec=SingleValueConstraint(3,1,2,)).subtype(namedValues=NamedValues(("none", 1), ("plannedOnly", 2), ("plannedAndUnplanned", 3), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfRestartSupport.setDescription("The router's support for OSPF graceful restart.\nOptions include: no restart support, only planned\nrestarts, or both planned and unplanned restarts.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfRestartInterval = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1800))).setMaxAccess("readwrite").setUnits("seconds")
if mibBuilder.loadTexts: ospfRestartInterval.setDescription("Configured OSPF graceful restart timeout interval.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfRestartStrictLsaChecking = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 20), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfRestartStrictLsaChecking.setDescription("Indicates if strict LSA checking is enabled for\ngraceful restart.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\n\n\n\nstorage.")
ospfRestartStatus = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 21), Integer().subtype(subtypeSpec=SingleValueConstraint(2,3,1,)).subtype(namedValues=NamedValues(("notRestarting", 1), ("plannedRestart", 2), ("unplannedRestart", 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfRestartStatus.setDescription("Current status of OSPF graceful restart.")
ospfRestartAge = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 22), Unsigned32()).setMaxAccess("readonly").setUnits("seconds")
if mibBuilder.loadTexts: ospfRestartAge.setDescription("Remaining time in current OSPF graceful restart\ninterval.")
ospfRestartExitReason = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 23), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("inProgress", 2), ("completed", 3), ("timedOut", 4), ("topologyChanged", 5), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfRestartExitReason.setDescription("Describes the outcome of the last attempt at a\ngraceful restart. If the value is 'none', no restart\nhas yet been attempted. If the value is 'inProgress',\na restart attempt is currently underway.")
ospfAsLsaCount = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 24), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsaCount.setDescription("The number of AS-scope link state\nadvertisements in the AS-scope link state database.")
ospfAsLsaCksumSum = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 25), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsaCksumSum.setDescription("The 32-bit unsigned sum of the LS checksums of\nthe AS link state advertisements contained in the AS-scope\nlink state database. This sum can be used to determine\nif there has been a change in a router's AS-scope link\nstate database, and to compare the AS-scope link state\ndatabase of two routers.")
ospfStubRouterSupport = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 26), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfStubRouterSupport.setDescription("The router's support for stub router functionality.")
ospfStubRouterAdvertisement = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 27), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("doNotAdvertise", 1), ("advertise", 2), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ospfStubRouterAdvertisement.setDescription("This object controls the advertisement of\nstub router LSAs by the router. The value\ndoNotAdvertise will result in the advertisement\nof a standard router LSA and is the default value.\n\nThis object is persistent and when written\nthe entity SHOULD save the change to non-volatile\nstorage.")
ospfDiscontinuityTime = MibScalar((1, 3, 6, 1, 2, 1, 14, 1, 28), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfDiscontinuityTime.setDescription("The value of sysUpTime on the most recent occasion\nat which any one of this MIB's counters suffered\na discontinuity.\n\nIf no such discontinuities have occurred since the last\nre-initialization of the local management subsystem,\nthen this object contains a zero value.")
ospfAreaTable = MibTable((1, 3, 6, 1, 2, 1, 14, 2))
if mibBuilder.loadTexts: ospfAreaTable.setDescription("Information describing the configured parameters and\ncumulative statistics of the router's attached areas.\nThe interfaces and virtual links are configured\nas part of these areas. Area 0.0.0.0, by definition,\nis the backbone area.")
ospfAreaEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 2, 1)).setIndexNames((0, "OSPF-MIB", "ospfAreaId"))
if mibBuilder.loadTexts: ospfAreaEntry.setDescription("Information describing the configured parameters and\ncumulative statistics of one of the router's attached areas.\nThe interfaces and virtual links are configured as part of\nthese areas. Area 0.0.0.0, by definition, is the backbone\narea.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaId.setDescription("A 32-bit integer uniquely identifying an area.\nArea ID 0.0.0.0 is used for the OSPF backbone.")
ospfAuthType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 2), OspfAuthenticationType().clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAuthType.setDescription("The authentication type specified for an area.")
ospfImportAsExtern = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 3), Integer().subtype(subtypeSpec=SingleValueConstraint(1,2,3,)).subtype(namedValues=NamedValues(("importExternal", 1), ("importNoExternal", 2), ("importNssa", 3), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfImportAsExtern.setDescription("Indicates if an area is a stub area, NSSA, or standard\narea. Type-5 AS-external LSAs and type-11 Opaque LSAs are\nnot imported into stub areas or NSSAs. NSSAs import\nAS-external data as type-7 LSAs")
ospfSpfRuns = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfSpfRuns.setDescription("The number of times that the intra-area route\ntable has been calculated using this area's\nlink state database. This is typically done\nusing Dijkstra's algorithm.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfAreaBdrRtrCount = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaBdrRtrCount.setDescription("The total number of Area Border Routers reachable\nwithin this area. This is initially zero and is\ncalculated in each Shortest Path First (SPF) pass.")
ospfAsBdrRtrCount = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsBdrRtrCount.setDescription("The total number of Autonomous System Border\nRouters reachable within this area. This is\ninitially zero and is calculated in each SPF\npass.")
ospfAreaLsaCount = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaLsaCount.setDescription("The total number of link state advertisements\nin this area's link state database, excluding\nAS-external LSAs.")
ospfAreaLsaCksumSum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 8), Integer32().clone(0)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaLsaCksumSum.setDescription("The 32-bit sum of the link state\nadvertisements' LS checksums contained in this\narea's link state database. This sum excludes\nexternal (LS type-5) link state advertisements.\nThe sum can be used to determine if there has\nbeen a change in a router's link state\ndatabase, and to compare the link state database of\ntwo routers. The value should be treated as unsigned\nwhen comparing two sums of checksums.")
ospfAreaSummary = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 9), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("noAreaSummary", 1), ("sendAreaSummary", 2), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaSummary.setDescription("The variable ospfAreaSummary controls the\nimport of summary LSAs into stub and NSSA areas.\nIt has no effect on other areas.\n\nIf it is noAreaSummary, the router will not\noriginate summary LSAs into the stub or NSSA area.\nIt will rely entirely on its default route.\n\nIf it is sendAreaSummary, the router will both\nsummarize and propagate summary LSAs.")
ospfAreaStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfAreaNssaTranslatorRole = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 11), Integer().subtype(subtypeSpec=SingleValueConstraint(1,2,)).subtype(namedValues=NamedValues(("always", 1), ("candidate", 2), )).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaNssaTranslatorRole.setDescription("Indicates an NSSA border router's ability to\nperform NSSA translation of type-7 LSAs into\ntype-5 LSAs.")
ospfAreaNssaTranslatorState = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 12), Integer().subtype(subtypeSpec=SingleValueConstraint(3,2,1,)).subtype(namedValues=NamedValues(("enabled", 1), ("elected", 2), ("disabled", 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaNssaTranslatorState.setDescription("Indicates if and how an NSSA border router is\nperforming NSSA translation of type-7 LSAs into type-5\n\n\n\nLSAs. When this object is set to enabled, the NSSA Border\nrouter's OspfAreaNssaExtTranslatorRole has been set to\nalways. When this object is set to elected, a candidate\nNSSA Border router is Translating type-7 LSAs into type-5.\nWhen this object is set to disabled, a candidate NSSA\nborder router is NOT translating type-7 LSAs into type-5.")
ospfAreaNssaTranslatorStabilityInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 13), PositiveInteger().clone('40')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaNssaTranslatorStabilityInterval.setDescription("The number of seconds after an elected translator\ndetermines its services are no longer required, that\nit should continue to perform its translation duties.")
ospfAreaNssaTranslatorEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaNssaTranslatorEvents.setDescription("Indicates the number of translator state changes\nthat have occurred since the last boot-up.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfStubAreaTable = MibTable((1, 3, 6, 1, 2, 1, 14, 3))
if mibBuilder.loadTexts: ospfStubAreaTable.setDescription("The set of metrics that will be advertised\nby a default Area Border Router into a stub area.")
ospfStubAreaEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 3, 1)).setIndexNames((0, "OSPF-MIB", "ospfStubAreaId"), (0, "OSPF-MIB", "ospfStubTOS"))
if mibBuilder.loadTexts: ospfStubAreaEntry.setDescription("The metric for a given Type of Service that\nwill be advertised by a default Area Border\nRouter into a stub area.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfStubAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 3, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfStubAreaId.setDescription("The 32-bit identifier for the stub area. On\ncreation, this can be derived from the\ninstance.")
ospfStubTOS = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 3, 1, 2), TOSType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfStubTOS.setDescription("The Type of Service associated with the\nmetric. On creation, this can be derived from\n\n\n\nthe instance.")
ospfStubMetric = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 3, 1, 3), BigMetric()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfStubMetric.setDescription("The metric value applied at the indicated Type\nof Service. By default, this equals the least\nmetric at the Type of Service among the\ninterfaces to other areas.")
ospfStubStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfStubStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfStubMetricType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 3, 1, 5), Integer().subtype(subtypeSpec=SingleValueConstraint(3,2,1,)).subtype(namedValues=NamedValues(("ospfMetric", 1), ("comparableCost", 2), ("nonComparable", 3), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfStubMetricType.setDescription("This variable displays the type of metric\nadvertised as a default route.")
ospfLsdbTable = MibTable((1, 3, 6, 1, 2, 1, 14, 4))
if mibBuilder.loadTexts: ospfLsdbTable.setDescription("The OSPF Process's link state database (LSDB).\nThe LSDB contains the link state advertisements\nfrom throughout the areas that the device is attached to.")
ospfLsdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 4, 1)).setIndexNames((0, "OSPF-MIB", "ospfLsdbAreaId"), (0, "OSPF-MIB", "ospfLsdbType"), (0, "OSPF-MIB", "ospfLsdbLsid"), (0, "OSPF-MIB", "ospfLsdbRouterId"))
if mibBuilder.loadTexts: ospfLsdbEntry.setDescription("A single link state advertisement.")
ospfLsdbAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbAreaId.setDescription("The 32-bit identifier of the area from which\nthe LSA was received.")
ospfLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,4,10,5,2,7,6,)).subtype(namedValues=NamedValues(("routerLink", 1), ("areaOpaqueLink", 10), ("networkLink", 2), ("summaryLink", 3), ("asSummaryLink", 4), ("asExternalLink", 5), ("multicastLink", 6), ("nssaExternalLink", 7), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbType.setDescription("The type of the link state advertisement.\nEach link state type has a separate advertisement\nformat.\n\nNote: External link state advertisements are permitted\nfor backward compatibility, but should be displayed\nin the ospfAsLsdbTable rather than here.")
ospfLsdbLsid = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbLsid.setDescription("The Link State ID is an LS Type Specific field\ncontaining either a Router ID or an IP address;\nit identifies the piece of the routing domain\nthat is being described by the advertisement.")
ospfLsdbRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 4), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbRouterId.setDescription("The 32-bit number that uniquely identifies the\noriginating router in the Autonomous System.")
ospfLsdbSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbSequence.setDescription("The sequence number field is a signed 32-bit\ninteger. It starts with the value '80000001'h,\nor -'7FFFFFFF'h, and increments until '7FFFFFFF'h.\nThus, a typical sequence number will be very negative.\nIt is used to detect old and duplicate Link State\nAdvertisements. The space of sequence numbers is linearly\nordered. The larger the sequence number, the more recent\nthe advertisement.")
ospfLsdbAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbAge.setDescription("This field is the age of the link state advertisement\nin seconds.")
ospfLsdbChecksum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbChecksum.setDescription("This field is the checksum of the complete contents of\nthe advertisement, excepting the age field. The age field\nis excepted so that an advertisement's age can be\nincremented without updating the checksum. The checksum\nused is the same that is used for ISO connectionless\n\n\n\ndatagrams; it is commonly referred to as the\nFletcher checksum.")
ospfLsdbAdvertisement = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 4, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLsdbAdvertisement.setDescription("The entire link state advertisement, including\nits header.\n\nNote that for variable length LSAs, SNMP agents\nmay not be able to return the largest string size.")
ospfAreaRangeTable = MibTable((1, 3, 6, 1, 2, 1, 14, 5))
if mibBuilder.loadTexts: ospfAreaRangeTable.setDescription("The Address Range Table acts as an adjunct to the Area\nTable. It describes those Address Range Summaries that\nare configured to be propagated from an Area to reduce\nthe amount of information about it that is known beyond\nits borders. It contains a set of IP address ranges\nspecified by an IP address/IP network mask pair.\nFor example, class B address range of X.X.X.X\nwith a network mask of 255.255.0.0 includes all IP\naddresses from X.X.0.0 to X.X.255.255.\n\nNote that this table is obsoleted and is replaced\nby the Area Aggregate Table.")
ospfAreaRangeEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 5, 1)).setIndexNames((0, "OSPF-MIB", "ospfAreaRangeAreaId"), (0, "OSPF-MIB", "ospfAreaRangeNet"))
if mibBuilder.loadTexts: ospfAreaRangeEntry.setDescription("A single area address range.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfAreaRangeAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 5, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaRangeAreaId.setDescription("The area that the address range is to be found\nwithin.")
ospfAreaRangeNet = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaRangeNet.setDescription("The IP address of the net or subnet indicated\nby the range.")
ospfAreaRangeMask = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 5, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaRangeMask.setDescription("The subnet mask that pertains to the net or\nsubnet.")
ospfAreaRangeStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 5, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaRangeStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfAreaRangeEffect = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 5, 1, 5), Integer().subtype(subtypeSpec=SingleValueConstraint(1,2,)).subtype(namedValues=NamedValues(("advertiseMatching", 1), ("doNotAdvertiseMatching", 2), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaRangeEffect.setDescription("Subnets subsumed by ranges either trigger the\nadvertisement of the indicated summary\n(advertiseMatching) or result in the subnet's not\nbeing advertised at all outside the area.")
ospfHostTable = MibTable((1, 3, 6, 1, 2, 1, 14, 6))
if mibBuilder.loadTexts: ospfHostTable.setDescription("The Host/Metric Table indicates what hosts are directly\n\n\n\nattached to the router, what metrics and types\nof service should be advertised for them,\nand what areas they are found within.")
ospfHostEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 6, 1)).setIndexNames((0, "OSPF-MIB", "ospfHostIpAddress"), (0, "OSPF-MIB", "ospfHostTOS"))
if mibBuilder.loadTexts: ospfHostEntry.setDescription("A metric to be advertised, for a given type of\nservice, when a given host is reachable.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfHostIpAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfHostIpAddress.setDescription("The IP address of the host.")
ospfHostTOS = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 2), TOSType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfHostTOS.setDescription("The Type of Service of the route being configured.")
ospfHostMetric = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 3), Metric()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfHostMetric.setDescription("The metric to be advertised.")
ospfHostStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfHostStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfHostAreaID = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 5), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfHostAreaID.setDescription("The OSPF area to which the host belongs.\nDeprecated by ospfHostCfgAreaID.")
ospfHostCfgAreaID = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 6, 1, 6), AreaID()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfHostCfgAreaID.setDescription("To configure the OSPF area to which the host belongs.")
ospfIfTable = MibTable((1, 3, 6, 1, 2, 1, 14, 7))
if mibBuilder.loadTexts: ospfIfTable.setDescription("The OSPF Interface Table describes the interfaces\nfrom the viewpoint of OSPF.\nIt augments the ipAddrTable with OSPF specific information.")
ospfIfEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 7, 1)).setIndexNames((0, "OSPF-MIB", "ospfIfIpAddress"), (0, "OSPF-MIB", "ospfAddressLessIf"))
if mibBuilder.loadTexts: ospfIfEntry.setDescription("The OSPF interface entry describes one interface\nfrom the viewpoint of OSPF.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfIfIpAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfIpAddress.setDescription("The IP address of this OSPF interface.")
ospfAddressLessIf = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 2), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAddressLessIf.setDescription("For the purpose of easing the instancing of\naddressed and addressless interfaces; this\nvariable takes the value 0 on interfaces with\nIP addresses and the corresponding value of\nifIndex for interfaces having no IP address.")
ospfIfAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 3), AreaID().clone(hexValue='00000000')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfAreaId.setDescription("A 32-bit integer uniquely identifying the area\nto which the interface connects. Area ID\n0.0.0.0 is used for the OSPF backbone.")
ospfIfType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 4), Integer().subtype(subtypeSpec=SingleValueConstraint(1,2,5,3,)).subtype(namedValues=NamedValues(("broadcast", 1), ("nbma", 2), ("pointToPoint", 3), ("pointToMultipoint", 5), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfType.setDescription("The OSPF interface type.\nBy way of a default, this field may be intuited\nfrom the corresponding value of ifType.\nBroadcast LANs, such as Ethernet and IEEE 802.5,\ntake the value 'broadcast', X.25 and similar\ntechnologies take the value 'nbma', and links\nthat are definitively point to point take the\nvalue 'pointToPoint'.")
ospfIfAdminStat = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 5), Status().clone('enabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfAdminStat.setDescription("The OSPF interface's administrative status.\nThe value formed on the interface, and the interface\nwill be advertised as an internal route to some area.\nThe value 'disabled' denotes that the interface is\nexternal to OSPF.")
ospfIfRtrPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 6), DesignatedRouterPriority().clone('1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfRtrPriority.setDescription("The priority of this interface. Used in\nmulti-access networks, this field is used in\nthe designated router election algorithm. The\nvalue 0 signifies that the router is not eligible\nto become the designated router on this particular\nnetwork. In the event of a tie in this value,\nrouters will use their Router ID as a tie breaker.")
ospfIfTransitDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 7), UpToMaxAge().clone('1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfTransitDelay.setDescription("The estimated number of seconds it takes to\ntransmit a link state update packet over this\ninterface. Note that the minimal value SHOULD be\n1 second.")
ospfIfRetransInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 8), UpToMaxAge().clone('5')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfRetransInterval.setDescription("The number of seconds between link state advertisement\nretransmissions, for adjacencies belonging to this\ninterface. This value is also used when retransmitting\n\n\n\ndatabase description and Link State request packets.\nNote that minimal value SHOULD be 1 second.")
ospfIfHelloInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 9), HelloRange().clone('10')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfHelloInterval.setDescription("The length of time, in seconds, between the Hello packets\nthat the router sends on the interface. This value must be\nthe same for all routers attached to a common network.")
ospfIfRtrDeadInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 10), PositiveInteger().clone('40')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfRtrDeadInterval.setDescription("The number of seconds that a router's Hello packets have\nnot been seen before its neighbors declare the router down.\nThis should be some multiple of the Hello interval. This\nvalue must be the same for all routers attached to a common\nnetwork.")
ospfIfPollInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 11), PositiveInteger().clone('120')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfPollInterval.setDescription("The larger time interval, in seconds, between the Hello\npackets sent to an inactive non-broadcast multi-access\nneighbor.")
ospfIfState = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 12), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,6,7,2,4,)).subtype(namedValues=NamedValues(("down", 1), ("loopback", 2), ("waiting", 3), ("pointToPoint", 4), ("designatedRouter", 5), ("backupDesignatedRouter", 6), ("otherDesignatedRouter", 7), )).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfState.setDescription("The OSPF Interface State.")
ospfIfDesignatedRouter = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 13), IpAddress().clone("0.0.0.0")).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfDesignatedRouter.setDescription("The IP address of the designated router.")
ospfIfBackupDesignatedRouter = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 14), IpAddress().clone("0.0.0.0")).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfBackupDesignatedRouter.setDescription("The IP address of the backup designated\nrouter.")
ospfIfEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfEvents.setDescription("The number of times this OSPF interface has\nchanged its state or an error has occurred.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfIfAuthKey = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 16), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256)).clone(hexValue='0000000000000000')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfAuthKey.setDescription("The cleartext password used as an OSPF\nauthentication key when simplePassword security\nis enabled. This object does not access any OSPF\ncryptogaphic (e.g., MD5) authentication key under\nany circumstance.\n\nIf the key length is shorter than 8 octets, the\nagent will left adjust and zero fill to 8 octets.\n\nUnauthenticated interfaces need no authentication\nkey, and simple password authentication cannot use\na key of more than 8 octets.\n\nNote that the use of simplePassword authentication\nis NOT recommended when there is concern regarding\nattack upon the OSPF system. SimplePassword\nauthentication is only sufficient to protect against\naccidental misconfigurations because it re-uses\ncleartext passwords [RFC1704].\n\nWhen read, ospfIfAuthKey always returns an octet\nstring of length zero.")
ospfIfStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 17), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfIfMulticastForwarding = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 18), Integer().subtype(subtypeSpec=SingleValueConstraint(3,1,2,)).subtype(namedValues=NamedValues(("blocked", 1), ("multicast", 2), ("unicast", 3), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfMulticastForwarding.setDescription("The way multicasts should be forwarded on this\ninterface: not forwarded, forwarded as data\nlink multicasts, or forwarded as data link\nunicasts. Data link multicasting is not\nmeaningful on point-to-point and NBMA interfaces,\nand setting ospfMulticastForwarding to 0 effectively\ndisables all multicast forwarding.")
ospfIfDemand = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 19), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfDemand.setDescription("Indicates whether Demand OSPF procedures (hello\nsuppression to FULL neighbors and setting the\nDoNotAge flag on propagated LSAs) should be\nperformed on this interface.")
ospfIfAuthType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 20), OspfAuthenticationType().clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfAuthType.setDescription("The authentication type specified for an interface.\n\nNote that this object can be used to engage\nin significant attacks against an OSPF router.")
ospfIfLsaCount = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfLsaCount.setDescription("The total number of link-local link state advertisements\nin this interface's link-local link state database.")
ospfIfLsaCksumSum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 22), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfLsaCksumSum.setDescription("The 32-bit unsigned sum of the Link State\nAdvertisements' LS checksums contained in this\ninterface's link-local link state database.\nThe sum can be used to determine if there has\nbeen a change in the interface's link state\ndatabase and to compare the interface link state\ndatabase of routers attached to the same subnet.")
ospfIfDesignatedRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 23), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfDesignatedRouterId.setDescription("The Router ID of the designated router.")
ospfIfBackupDesignatedRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 7, 1, 24), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfBackupDesignatedRouterId.setDescription("The Router ID of the backup designated router.")
ospfIfMetricTable = MibTable((1, 3, 6, 1, 2, 1, 14, 8))
if mibBuilder.loadTexts: ospfIfMetricTable.setDescription("The Metric Table describes the metrics to be advertised\nfor a specified interface at the various types of service.\nAs such, this table is an adjunct of the OSPF Interface\nTable.\n\nTypes of service, as defined by RFC 791, have the ability\nto request low delay, high bandwidth, or reliable linkage.\n\nFor the purposes of this specification, the measure of\nbandwidth:\n\n\n\n\nMetric = referenceBandwidth / ifSpeed\n\nis the default value.\nThe default reference bandwidth is 10^8.\nFor multiple link interfaces, note that ifSpeed is the sum\nof the individual link speeds. This yields a number having\nthe following typical values:\n\nNetwork Type/bit rate Metric\n\n>= 100 MBPS 1\nEthernet/802.3 10\nE1 48\nT1 (ESF) 65\n64 KBPS 1562\n56 KBPS 1785\n19.2 KBPS 5208\n9.6 KBPS 10416\n\nRoutes that are not specified use the default\n(TOS 0) metric.\n\nNote that the default reference bandwidth can be configured\nusing the general group object ospfReferenceBandwidth.")
ospfIfMetricEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 8, 1)).setIndexNames((0, "OSPF-MIB", "ospfIfMetricIpAddress"), (0, "OSPF-MIB", "ospfIfMetricAddressLessIf"), (0, "OSPF-MIB", "ospfIfMetricTOS"))
if mibBuilder.loadTexts: ospfIfMetricEntry.setDescription("A particular TOS metric for a non-virtual interface\nidentified by the interface index.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfIfMetricIpAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 8, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfMetricIpAddress.setDescription("The IP address of this OSPF interface. On row\ncreation, this can be derived from the instance.")
ospfIfMetricAddressLessIf = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 8, 1, 2), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfMetricAddressLessIf.setDescription("For the purpose of easing the instancing of\naddressed and addressless interfaces; this\nvariable takes the value 0 on interfaces with\nIP addresses and the value of ifIndex for\ninterfaces having no IP address. On row\ncreation, this can be derived from the instance.")
ospfIfMetricTOS = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 8, 1, 3), TOSType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfIfMetricTOS.setDescription("The Type of Service metric being referenced.\nOn row creation, this can be derived from the\ninstance.")
ospfIfMetricValue = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 8, 1, 4), Metric()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfMetricValue.setDescription("The metric of using this Type of Service on\nthis interface. The default value of the TOS 0\nmetric is 10^8 / ifSpeed.")
ospfIfMetricStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 8, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfIfMetricStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfVirtIfTable = MibTable((1, 3, 6, 1, 2, 1, 14, 9))
if mibBuilder.loadTexts: ospfVirtIfTable.setDescription("Information about this router's virtual interfaces\nthat the OSPF Process is configured to carry on.")
ospfVirtIfEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 9, 1)).setIndexNames((0, "OSPF-MIB", "ospfVirtIfAreaId"), (0, "OSPF-MIB", "ospfVirtIfNeighbor"))
if mibBuilder.loadTexts: ospfVirtIfEntry.setDescription("Information about a single virtual interface.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfVirtIfAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfAreaId.setDescription("The transit area that the virtual link\ntraverses. By definition, this is not 0.0.0.0.")
ospfVirtIfNeighbor = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 2), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfNeighbor.setDescription("The Router ID of the virtual neighbor.")
ospfVirtIfTransitDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 3), UpToMaxAge().clone('1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfTransitDelay.setDescription("The estimated number of seconds it takes to\ntransmit a Link State update packet over this\ninterface. Note that the minimal value SHOULD be\n1 second.")
ospfVirtIfRetransInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 4), UpToMaxAge().clone('5')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfRetransInterval.setDescription("The number of seconds between link state\navertisement retransmissions, for adjacencies\nbelonging to this interface. This value is\nalso used when retransmitting database\ndescription and Link State request packets. This\nvalue should be well over the expected\nround-trip time. Note that the minimal value SHOULD be\n1 second.")
ospfVirtIfHelloInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 5), HelloRange().clone('10')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfHelloInterval.setDescription("The length of time, in seconds, between the\nHello packets that the router sends on the\ninterface. This value must be the same for the\nvirtual neighbor.")
ospfVirtIfRtrDeadInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 6), PositiveInteger().clone('60')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfRtrDeadInterval.setDescription("The number of seconds that a router's Hello\npackets have not been seen before its\nneighbors declare the router down. This should be\nsome multiple of the Hello interval. This\nvalue must be the same for the virtual neighbor.")
ospfVirtIfState = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 7), Integer().subtype(subtypeSpec=SingleValueConstraint(1,4,)).subtype(namedValues=NamedValues(("down", 1), ("pointToPoint", 4), )).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfState.setDescription("OSPF virtual interface states.")
ospfVirtIfEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfEvents.setDescription("The number of state changes or error events on\nthis virtual link.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfVirtIfAuthKey = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256)).clone(hexValue='0000000000000000')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfAuthKey.setDescription("The cleartext password used as an OSPF\nauthentication key when simplePassword security\nis enabled. This object does not access any OSPF\ncryptogaphic (e.g., MD5) authentication key under\nany circumstance.\n\n\n\nIf the key length is shorter than 8 octets, the\nagent will left adjust and zero fill to 8 octets.\n\nUnauthenticated interfaces need no authentication\nkey, and simple password authentication cannot use\na key of more than 8 octets.\n\nNote that the use of simplePassword authentication\nis NOT recommended when there is concern regarding\nattack upon the OSPF system. SimplePassword\nauthentication is only sufficient to protect against\naccidental misconfigurations because it re-uses\ncleartext passwords. [RFC1704]\n\nWhen read, ospfIfAuthKey always returns an octet\nstring of length zero.")
ospfVirtIfStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfVirtIfAuthType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 11), OspfAuthenticationType().clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfVirtIfAuthType.setDescription("The authentication type specified for a virtual interface.\n\nNote that this object can be used to engage\nin significant attacks against an OSPF router.")
ospfVirtIfLsaCount = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfLsaCount.setDescription("The total number of link-local link state advertisements\nin this virtual interface's link-local link state database.")
ospfVirtIfLsaCksumSum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 9, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtIfLsaCksumSum.setDescription("The 32-bit unsigned sum of the link state\nadvertisements' LS checksums contained in this\nvirtual interface's link-local link state database.\nThe sum can be used to determine if there has\nbeen a change in the virtual interface's link state\ndatabase, and to compare the virtual interface\nlink state database of the virtual neighbors.")
ospfNbrTable = MibTable((1, 3, 6, 1, 2, 1, 14, 10))
if mibBuilder.loadTexts: ospfNbrTable.setDescription("A table describing all non-virtual neighbors\nin the locality of the OSPF router.")
ospfNbrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 10, 1)).setIndexNames((0, "OSPF-MIB", "ospfNbrIpAddr"), (0, "OSPF-MIB", "ospfNbrAddressLessIndex"))
if mibBuilder.loadTexts: ospfNbrEntry.setDescription("The information regarding a single neighbor.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\n\n\n\nstorage.")
ospfNbrIpAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrIpAddr.setDescription("The IP address this neighbor is using in its\nIP source address. Note that, on addressless\nlinks, this will not be 0.0.0.0 but the\n\n\n\naddress of another of the neighbor's interfaces.")
ospfNbrAddressLessIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 2), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrAddressLessIndex.setDescription("On an interface having an IP address, zero.\nOn addressless interfaces, the corresponding\nvalue of ifIndex in the Internet Standard MIB.\nOn row creation, this can be derived from the\ninstance.")
ospfNbrRtrId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 3), RouterID().clone(hexValue='00000000')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrRtrId.setDescription("A 32-bit integer (represented as a type\nIpAddress) uniquely identifying the neighboring\nrouter in the Autonomous System.")
ospfNbrOptions = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 4), Integer32().clone(0)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrOptions.setDescription("A bit mask corresponding to the neighbor's\noptions field.\n\nBit 0, if set, indicates that the system will\noperate on Type of Service metrics other than\nTOS 0. If zero, the neighbor will ignore all\nmetrics except the TOS 0 metric.\n\nBit 1, if set, indicates that the associated\narea accepts and operates on external\ninformation; if zero, it is a stub area.\n\nBit 2, if set, indicates that the system is\ncapable of routing IP multicast datagrams, that is\nthat it implements the multicast extensions to\nOSPF.\n\n\n\nBit 3, if set, indicates that the associated\narea is an NSSA. These areas are capable of\ncarrying type-7 external advertisements, which\nare translated into type-5 external advertisements\nat NSSA borders.")
ospfNbrPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 5), DesignatedRouterPriority().clone('1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfNbrPriority.setDescription("The priority of this neighbor in the designated\nrouter election algorithm. The value 0 signifies\nthat the neighbor is not eligible to become\nthe designated router on this particular network.")
ospfNbrState = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 6), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,7,2,8,6,4,)).subtype(namedValues=NamedValues(("down", 1), ("attempt", 2), ("init", 3), ("twoWay", 4), ("exchangeStart", 5), ("exchange", 6), ("loading", 7), ("full", 8), )).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrState.setDescription("The state of the relationship with this neighbor.")
ospfNbrEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrEvents.setDescription("The number of times this neighbor relationship\nhas changed state or an error has occurred.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfNbrLsRetransQLen = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrLsRetransQLen.setDescription("The current length of the retransmission\nqueue.")
ospfNbmaNbrStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfNbmaNbrStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfNbmaNbrPermanence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 10), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("dynamic", 1), ("permanent", 2), )).clone(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbmaNbrPermanence.setDescription("This variable displays the status of the entry;\n'dynamic' and 'permanent' refer to how the neighbor\nbecame known.")
ospfNbrHelloSuppressed = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrHelloSuppressed.setDescription("Indicates whether Hellos are being suppressed\nto the neighbor.")
ospfNbrRestartHelperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 12), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("notHelping", 1), ("helping", 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrRestartHelperStatus.setDescription("Indicates whether the router is acting\nas a graceful restart helper for the neighbor.")
ospfNbrRestartHelperAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrRestartHelperAge.setDescription("Remaining time in current OSPF graceful restart\ninterval, if the router is acting as a restart\nhelper for the neighbor.")
ospfNbrRestartHelperExitReason = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 10, 1, 14), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("inProgress", 2), ("completed", 3), ("timedOut", 4), ("topologyChanged", 5), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfNbrRestartHelperExitReason.setDescription("Describes the outcome of the last attempt at acting\nas a graceful restart helper for the neighbor.")
ospfVirtNbrTable = MibTable((1, 3, 6, 1, 2, 1, 14, 11))
if mibBuilder.loadTexts: ospfVirtNbrTable.setDescription("This table describes all virtual neighbors.\nSince virtual links are configured\nin the Virtual Interface Table, this table is read-only.")
ospfVirtNbrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 11, 1)).setIndexNames((0, "OSPF-MIB", "ospfVirtNbrArea"), (0, "OSPF-MIB", "ospfVirtNbrRtrId"))
if mibBuilder.loadTexts: ospfVirtNbrEntry.setDescription("Virtual neighbor information.")
ospfVirtNbrArea = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrArea.setDescription("The Transit Area Identifier.")
ospfVirtNbrRtrId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 2), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrRtrId.setDescription("A 32-bit integer uniquely identifying the\nneighboring router in the Autonomous System.")
ospfVirtNbrIpAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrIpAddr.setDescription("The IP address this virtual neighbor is using.")
ospfVirtNbrOptions = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrOptions.setDescription("A bit mask corresponding to the neighbor's\noptions field.\n\nBit 1, if set, indicates that the system will\noperate on Type of Service metrics other than\nTOS 0. If zero, the neighbor will ignore all\nmetrics except the TOS 0 metric.\n\nBit 2, if set, indicates that the system is\nnetwork multicast capable, i.e., that it\nimplements OSPF multicast routing.")
ospfVirtNbrState = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 5), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,7,2,8,6,4,)).subtype(namedValues=NamedValues(("down", 1), ("attempt", 2), ("init", 3), ("twoWay", 4), ("exchangeStart", 5), ("exchange", 6), ("loading", 7), ("full", 8), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrState.setDescription("The state of the virtual neighbor relationship.")
ospfVirtNbrEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrEvents.setDescription("The number of times this virtual link has\nchanged its state or an error has occurred.\n\nDiscontinuities in the value of this counter can occur\nat re-initialization of the management system, and at other\ntimes as indicated by the value of ospfDiscontinuityTime.")
ospfVirtNbrLsRetransQLen = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrLsRetransQLen.setDescription("The current length of the retransmission\nqueue.")
ospfVirtNbrHelloSuppressed = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 8), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrHelloSuppressed.setDescription("Indicates whether Hellos are being suppressed\nto the neighbor.")
ospfVirtNbrRestartHelperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 9), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("notHelping", 1), ("helping", 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrRestartHelperStatus.setDescription("Indicates whether the router is acting\nas a graceful restart helper for the neighbor.")
ospfVirtNbrRestartHelperAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrRestartHelperAge.setDescription("Remaining time in current OSPF graceful restart\ninterval, if the router is acting as a restart\nhelper for the neighbor.")
ospfVirtNbrRestartHelperExitReason = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 11, 1, 11), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("inProgress", 2), ("completed", 3), ("timedOut", 4), ("topologyChanged", 5), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtNbrRestartHelperExitReason.setDescription("Describes the outcome of the last attempt at acting\nas a graceful restart helper for the neighbor.")
ospfExtLsdbTable = MibTable((1, 3, 6, 1, 2, 1, 14, 12))
if mibBuilder.loadTexts: ospfExtLsdbTable.setDescription("The OSPF Process's external LSA link state database.\n\nThis table is identical to the OSPF LSDB Table\nin format, but contains only external link state\nadvertisements. The purpose is to allow external\n\n\n\nLSAs to be displayed once for the router rather\nthan once in each non-stub area.\n\nNote that external LSAs are also in the AS-scope link state\ndatabase.")
ospfExtLsdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 12, 1)).setIndexNames((0, "OSPF-MIB", "ospfExtLsdbType"), (0, "OSPF-MIB", "ospfExtLsdbLsid"), (0, "OSPF-MIB", "ospfExtLsdbRouterId"))
if mibBuilder.loadTexts: ospfExtLsdbEntry.setDescription("A single link state advertisement.")
ospfExtLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(5,)).subtype(namedValues=NamedValues(("asExternalLink", 5), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbType.setDescription("The type of the link state advertisement.\nEach link state type has a separate advertisement\nformat.")
ospfExtLsdbLsid = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbLsid.setDescription("The Link State ID is an LS Type Specific field\ncontaining either a Router ID or an IP address;\nit identifies the piece of the routing domain\nthat is being described by the advertisement.")
ospfExtLsdbRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 3), RouterID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbRouterId.setDescription("The 32-bit number that uniquely identifies the\noriginating router in the Autonomous System.")
ospfExtLsdbSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbSequence.setDescription("The sequence number field is a signed 32-bit\ninteger. It starts with the value '80000001'h,\nor -'7FFFFFFF'h, and increments until '7FFFFFFF'h.\nThus, a typical sequence number will be very negative.\nIt is used to detect old and duplicate link state\nadvertisements. The space of sequence numbers is linearly\nordered. The larger the sequence number, the more recent\nthe advertisement.")
ospfExtLsdbAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbAge.setDescription("This field is the age of the link state\nadvertisement in seconds.")
ospfExtLsdbChecksum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbChecksum.setDescription("This field is the checksum of the complete\ncontents of the advertisement, excepting the\nage field. The age field is excepted so that\nan advertisement's age can be incremented\nwithout updating the checksum. The checksum\nused is the same that is used for ISO\nconnectionless datagrams; it is commonly referred\nto as the Fletcher checksum.")
ospfExtLsdbAdvertisement = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 12, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(36, 36)).setFixedLength(36)).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfExtLsdbAdvertisement.setDescription("The entire link state advertisement, including\nits header.")
ospfRouteGroup = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 13))
ospfIntraArea = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 13, 1))
ospfInterArea = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 13, 2))
ospfExternalType1 = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 13, 3))
ospfExternalType2 = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 13, 4))
ospfAreaAggregateTable = MibTable((1, 3, 6, 1, 2, 1, 14, 14))
if mibBuilder.loadTexts: ospfAreaAggregateTable.setDescription("The Area Aggregate Table acts as an adjunct\nto the Area Table. It describes those address aggregates\nthat are configured to be propagated from an area.\nIts purpose is to reduce the amount of information\nthat is known beyond an Area's borders.\n\nIt contains a set of IP address ranges\nspecified by an IP address/IP network mask pair.\nFor example, a class B address range of X.X.X.X\nwith a network mask of 255.255.0.0 includes all IP\naddresses from X.X.0.0 to X.X.255.255.\n\nNote that if ranges are configured such that one range\nsubsumes another range (e.g., 10.0.0.0 mask 255.0.0.0\nand 10.1.0.0 mask 255.255.0.0),\nthe most specific match is the preferred one.")
ospfAreaAggregateEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 14, 1)).setIndexNames((0, "OSPF-MIB", "ospfAreaAggregateAreaID"), (0, "OSPF-MIB", "ospfAreaAggregateLsdbType"), (0, "OSPF-MIB", "ospfAreaAggregateNet"), (0, "OSPF-MIB", "ospfAreaAggregateMask"))
if mibBuilder.loadTexts: ospfAreaAggregateEntry.setDescription("A single area aggregate entry.\n\nInformation in this table is persistent and when this object\nis written the entity SHOULD save the change to non-volatile\nstorage.")
ospfAreaAggregateAreaID = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 1), AreaID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaAggregateAreaID.setDescription("The area within which the address aggregate is to be\nfound.")
ospfAreaAggregateLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(7,3,)).subtype(namedValues=NamedValues(("summaryLink", 3), ("nssaExternalLink", 7), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaAggregateLsdbType.setDescription("The type of the address aggregate. This field\nspecifies the Lsdb type that this address\naggregate applies to.")
ospfAreaAggregateNet = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaAggregateNet.setDescription("The IP address of the net or subnet indicated\nby the range.")
ospfAreaAggregateMask = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaAggregateMask.setDescription("The subnet mask that pertains to the net or\nsubnet.")
ospfAreaAggregateStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaAggregateStatus.setDescription("This object permits management of the table by\nfacilitating actions such as row creation,\nconstruction, and destruction.\n\nThe value of this object has no effect on\nwhether other objects in this conceptual row can be\nmodified.")
ospfAreaAggregateEffect = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 6), Integer().subtype(subtypeSpec=SingleValueConstraint(1,2,)).subtype(namedValues=NamedValues(("advertiseMatching", 1), ("doNotAdvertiseMatching", 2), )).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaAggregateEffect.setDescription("Subnets subsumed by ranges either trigger the\nadvertisement of the indicated aggregate\n(advertiseMatching) or result in the subnet's not\nbeing advertised at all outside the area.")
ospfAreaAggregateExtRouteTag = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 14, 1, 7), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ospfAreaAggregateExtRouteTag.setDescription("External route tag to be included in NSSA (type-7)\nLSAs.")
ospfConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 15))
ospfGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 15, 1))
ospfCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 14, 15, 2))
ospfLocalLsdbTable = MibTable((1, 3, 6, 1, 2, 1, 14, 17))
if mibBuilder.loadTexts: ospfLocalLsdbTable.setDescription("The OSPF Process's link-local link state database\nfor non-virtual links.\nThis table is identical to the OSPF LSDB Table\nin format, but contains only link-local Link State\nAdvertisements for non-virtual links. The purpose is\nto allow link-local LSAs to be displayed for each\nnon-virtual interface. This table is implemented to\nsupport type-9 LSAs that are defined\nin 'The OSPF Opaque LSA Option'.")
ospfLocalLsdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 17, 1)).setIndexNames((0, "OSPF-MIB", "ospfLocalLsdbIpAddress"), (0, "OSPF-MIB", "ospfLocalLsdbAddressLessIf"), (0, "OSPF-MIB", "ospfLocalLsdbType"), (0, "OSPF-MIB", "ospfLocalLsdbLsid"), (0, "OSPF-MIB", "ospfLocalLsdbRouterId"))
if mibBuilder.loadTexts: ospfLocalLsdbEntry.setDescription("A single link state advertisement.")
ospfLocalLsdbIpAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 1), IpAddress()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfLocalLsdbIpAddress.setDescription("The IP address of the interface from\nwhich the LSA was received if the interface is\nnumbered.")
ospfLocalLsdbAddressLessIf = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 2), InterfaceIndexOrZero()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfLocalLsdbAddressLessIf.setDescription("The interface index of the interface from\nwhich the LSA was received if the interface is\nunnumbered.")
ospfLocalLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 3), Integer().subtype(subtypeSpec=SingleValueConstraint(9,)).subtype(namedValues=NamedValues(("localOpaqueLink", 9), ))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfLocalLsdbType.setDescription("The type of the link state advertisement.\nEach link state type has a separate\nadvertisement format.")
ospfLocalLsdbLsid = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 4), IpAddress()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfLocalLsdbLsid.setDescription("The Link State ID is an LS Type Specific field\ncontaining a 32-bit identifier in IP address format;\nit identifies the piece of the routing domain\nthat is being described by the advertisement.")
ospfLocalLsdbRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 5), RouterID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfLocalLsdbRouterId.setDescription("The 32-bit number that uniquely identifies the\noriginating router in the Autonomous System.")
ospfLocalLsdbSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLocalLsdbSequence.setDescription("The sequence number field is a signed 32-bit\ninteger. It starts with the value '80000001'h,\nor -'7FFFFFFF'h, and increments until '7FFFFFFF'h.\nThus, a typical sequence number will be very negative.\nIt is used to detect old and duplicate link state\nadvertisements. The space of sequence numbers is linearly\nordered. The larger the sequence number, the more recent\nthe advertisement.")
ospfLocalLsdbAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLocalLsdbAge.setDescription("This field is the age of the link state\nadvertisement in seconds.")
ospfLocalLsdbChecksum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLocalLsdbChecksum.setDescription("This field is the checksum of the complete\ncontents of the advertisement, excepting the\nage field. The age field is excepted so that\nan advertisement's age can be incremented\nwithout updating the checksum. The checksum\nused is the same that is used for ISO\nconnectionless datagrams; it is commonly referred\nto as the Fletcher checksum.")
ospfLocalLsdbAdvertisement = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 17, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfLocalLsdbAdvertisement.setDescription("The entire link state advertisement, including\nits header.\n\nNote that for variable length LSAs, SNMP agents\nmay not be able to return the largest string size.")
ospfVirtLocalLsdbTable = MibTable((1, 3, 6, 1, 2, 1, 14, 18))
if mibBuilder.loadTexts: ospfVirtLocalLsdbTable.setDescription("The OSPF Process's link-local link state database\nfor virtual links.\n\n\n\n\nThis table is identical to the OSPF LSDB Table\nin format, but contains only link-local Link State\nAdvertisements for virtual links. The purpose is to\nallow link-local LSAs to be displayed for each virtual\ninterface. This table is implemented to support type-9 LSAs\nthat are defined in 'The OSPF Opaque LSA Option'.")
ospfVirtLocalLsdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 18, 1)).setIndexNames((0, "OSPF-MIB", "ospfVirtLocalLsdbTransitArea"), (0, "OSPF-MIB", "ospfVirtLocalLsdbNeighbor"), (0, "OSPF-MIB", "ospfVirtLocalLsdbType"), (0, "OSPF-MIB", "ospfVirtLocalLsdbLsid"), (0, "OSPF-MIB", "ospfVirtLocalLsdbRouterId"))
if mibBuilder.loadTexts: ospfVirtLocalLsdbEntry.setDescription("A single link state advertisement.")
ospfVirtLocalLsdbTransitArea = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 1), AreaID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfVirtLocalLsdbTransitArea.setDescription("The transit area that the virtual link\ntraverses. By definition, this is not 0.0.0.0.")
ospfVirtLocalLsdbNeighbor = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 2), RouterID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfVirtLocalLsdbNeighbor.setDescription("The Router ID of the virtual neighbor.")
ospfVirtLocalLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 3), Integer().subtype(subtypeSpec=SingleValueConstraint(9,)).subtype(namedValues=NamedValues(("localOpaqueLink", 9), ))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfVirtLocalLsdbType.setDescription("The type of the link state advertisement.\nEach link state type has a separate\nadvertisement format.")
ospfVirtLocalLsdbLsid = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 4), IpAddress()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfVirtLocalLsdbLsid.setDescription("The Link State ID is an LS Type Specific field\ncontaining a 32-bit identifier in IP address format;\nit identifies the piece of the routing domain\nthat is being described by the advertisement.")
ospfVirtLocalLsdbRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 5), RouterID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfVirtLocalLsdbRouterId.setDescription("The 32-bit number that uniquely identifies the\noriginating router in the Autonomous System.")
ospfVirtLocalLsdbSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtLocalLsdbSequence.setDescription("The sequence number field is a signed 32-bit\ninteger. It starts with the value '80000001'h,\nor -'7FFFFFFF'h, and increments until '7FFFFFFF'h.\nThus, a typical sequence number will be very negative.\nIt is used to detect old and duplicate link state\nadvertisements. The space of sequence numbers is linearly\nordered. The larger the sequence number, the more recent\nthe advertisement.")
ospfVirtLocalLsdbAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtLocalLsdbAge.setDescription("This field is the age of the link state\nadvertisement in seconds.")
ospfVirtLocalLsdbChecksum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtLocalLsdbChecksum.setDescription("This field is the checksum of the complete\ncontents of the advertisement, excepting the\nage field. The age field is excepted so that\n\n\n\nan advertisement's age can be incremented\nwithout updating the checksum. The checksum\nused is the same that is used for ISO\nconnectionless datagrams; it is commonly\nreferred to as the Fletcher checksum.")
ospfVirtLocalLsdbAdvertisement = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 18, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfVirtLocalLsdbAdvertisement.setDescription("The entire link state advertisement, including\nits header.")
ospfAsLsdbTable = MibTable((1, 3, 6, 1, 2, 1, 14, 19))
if mibBuilder.loadTexts: ospfAsLsdbTable.setDescription("The OSPF Process's AS-scope LSA link state database.\nThe database contains the AS-scope Link State\nAdvertisements from throughout the areas that\nthe device is attached to.\n\nThis table is identical to the OSPF LSDB Table\nin format, but contains only AS-scope Link State\nAdvertisements. The purpose is to allow AS-scope\nLSAs to be displayed once for the router rather\nthan once in each non-stub area.")
ospfAsLsdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 19, 1)).setIndexNames((0, "OSPF-MIB", "ospfAsLsdbType"), (0, "OSPF-MIB", "ospfAsLsdbLsid"), (0, "OSPF-MIB", "ospfAsLsdbRouterId"))
if mibBuilder.loadTexts: ospfAsLsdbEntry.setDescription("A single link state advertisement.")
ospfAsLsdbType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(11,5,)).subtype(namedValues=NamedValues(("asOpaqueLink", 11), ("asExternalLink", 5), ))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfAsLsdbType.setDescription("The type of the link state advertisement.\nEach link state type has a separate\nadvertisement format.")
ospfAsLsdbLsid = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 2), IpAddress()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfAsLsdbLsid.setDescription("The Link State ID is an LS Type Specific field\ncontaining either a Router ID or an IP address;\n\n\n\nit identifies the piece of the routing domain\nthat is being described by the advertisement.")
ospfAsLsdbRouterId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 3), RouterID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfAsLsdbRouterId.setDescription("The 32-bit number that uniquely identifies the\noriginating router in the Autonomous System.")
ospfAsLsdbSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsdbSequence.setDescription("The sequence number field is a signed 32-bit\ninteger. It starts with the value '80000001'h,\nor -'7FFFFFFF'h, and increments until '7FFFFFFF'h.\nThus, a typical sequence number will be very negative.\nIt is used to detect old and duplicate link state\nadvertisements. The space of sequence numbers is linearly\nordered. The larger the sequence number, the more recent\nthe advertisement.")
ospfAsLsdbAge = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsdbAge.setDescription("This field is the age of the link state\nadvertisement in seconds.")
ospfAsLsdbChecksum = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsdbChecksum.setDescription("This field is the checksum of the complete\ncontents of the advertisement, excepting the\nage field. The age field is excepted so that\nan advertisement's age can be incremented\nwithout updating the checksum. The checksum\nused is the same that is used for ISO\nconnectionless datagrams; it is commonly referred\nto as the Fletcher checksum.")
ospfAsLsdbAdvertisement = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 19, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAsLsdbAdvertisement.setDescription("The entire link state advertisement, including\nits header.")
ospfAreaLsaCountTable = MibTable((1, 3, 6, 1, 2, 1, 14, 20))
if mibBuilder.loadTexts: ospfAreaLsaCountTable.setDescription("This table maintains per-area, per-LSA-type counters")
ospfAreaLsaCountEntry = MibTableRow((1, 3, 6, 1, 2, 1, 14, 20, 1)).setIndexNames((0, "OSPF-MIB", "ospfAreaLsaCountAreaId"), (0, "OSPF-MIB", "ospfAreaLsaCountLsaType"))
if mibBuilder.loadTexts: ospfAreaLsaCountEntry.setDescription("An entry with a number of link advertisements\n\n\n\nof a given type for a given area.")
ospfAreaLsaCountAreaId = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 20, 1, 1), AreaID()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfAreaLsaCountAreaId.setDescription("This entry Area ID.")
ospfAreaLsaCountLsaType = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 20, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,4,10,7,2,6,)).subtype(namedValues=NamedValues(("routerLink", 1), ("areaOpaqueLink", 10), ("networkLink", 2), ("summaryLink", 3), ("asSummaryLink", 4), ("multicastLink", 6), ("nssaExternalLink", 7), ))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: ospfAreaLsaCountLsaType.setDescription("This entry LSA type.")
ospfAreaLsaCountNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 14, 20, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ospfAreaLsaCountNumber.setDescription("Number of LSAs of a given type for a given area.")
# Augmentions
# Groups
ospfBasicGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 1)).setObjects(*(("OSPF-MIB", "ospfExternLsaCksumSum"), ("OSPF-MIB", "ospfASBdrRtrStatus"), ("OSPF-MIB", "ospfRxNewLsas"), ("OSPF-MIB", "ospfTOSSupport"), ("OSPF-MIB", "ospfExternLsaCount"), ("OSPF-MIB", "ospfExtLsdbLimit"), ("OSPF-MIB", "ospfAreaBdrRtrStatus"), ("OSPF-MIB", "ospfExitOverflowInterval"), ("OSPF-MIB", "ospfOriginateNewLsas"), ("OSPF-MIB", "ospfVersionNumber"), ("OSPF-MIB", "ospfDemandExtensions"), ("OSPF-MIB", "ospfAdminStat"), ("OSPF-MIB", "ospfMulticastExtensions"), ("OSPF-MIB", "ospfRouterId"), ) )
if mibBuilder.loadTexts: ospfBasicGroup.setDescription("These objects are used to monitor/manage\nglobal OSPF parameters. This object group\nconforms to RFC 1850.")
ospfAreaGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 2)).setObjects(*(("OSPF-MIB", "ospfSpfRuns"), ("OSPF-MIB", "ospfAreaLsaCksumSum"), ("OSPF-MIB", "ospfAsBdrRtrCount"), ("OSPF-MIB", "ospfAreaId"), ("OSPF-MIB", "ospfImportAsExtern"), ("OSPF-MIB", "ospfAreaLsaCount"), ("OSPF-MIB", "ospfAreaBdrRtrCount"), ("OSPF-MIB", "ospfAreaSummary"), ("OSPF-MIB", "ospfAreaStatus"), ) )
if mibBuilder.loadTexts: ospfAreaGroup.setDescription("These objects are used for OSPF systems\nsupporting areas per RFC 1850.")
ospfStubAreaGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 3)).setObjects(*(("OSPF-MIB", "ospfStubMetricType"), ("OSPF-MIB", "ospfStubMetric"), ("OSPF-MIB", "ospfStubStatus"), ("OSPF-MIB", "ospfStubTOS"), ("OSPF-MIB", "ospfStubAreaId"), ) )
if mibBuilder.loadTexts: ospfStubAreaGroup.setDescription("These objects are used for OSPF systems\nsupporting stub areas.")
ospfLsdbGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 4)).setObjects(*(("OSPF-MIB", "ospfLsdbChecksum"), ("OSPF-MIB", "ospfLsdbRouterId"), ("OSPF-MIB", "ospfLsdbAge"), ("OSPF-MIB", "ospfLsdbAreaId"), ("OSPF-MIB", "ospfLsdbLsid"), ("OSPF-MIB", "ospfLsdbType"), ("OSPF-MIB", "ospfLsdbSequence"), ("OSPF-MIB", "ospfLsdbAdvertisement"), ) )
if mibBuilder.loadTexts: ospfLsdbGroup.setDescription("These objects are used for OSPF systems\nthat display their link state database.")
ospfAreaRangeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 5)).setObjects(*(("OSPF-MIB", "ospfAreaRangeAreaId"), ("OSPF-MIB", "ospfAreaRangeEffect"), ("OSPF-MIB", "ospfAreaRangeNet"), ("OSPF-MIB", "ospfAreaRangeStatus"), ("OSPF-MIB", "ospfAreaRangeMask"), ) )
if mibBuilder.loadTexts: ospfAreaRangeGroup.setDescription("These objects are used for non-CIDR OSPF\nsystems that support multiple areas. This\nobject group is obsolete.")
ospfHostGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 6)).setObjects(*(("OSPF-MIB", "ospfHostAreaID"), ("OSPF-MIB", "ospfHostIpAddress"), ("OSPF-MIB", "ospfHostMetric"), ("OSPF-MIB", "ospfHostStatus"), ("OSPF-MIB", "ospfHostTOS"), ) )
if mibBuilder.loadTexts: ospfHostGroup.setDescription("These objects are used for OSPF systems\nthat support attached hosts.")
ospfIfGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 7)).setObjects(*(("OSPF-MIB", "ospfIfPollInterval"), ("OSPF-MIB", "ospfIfEvents"), ("OSPF-MIB", "ospfIfAdminStat"), ("OSPF-MIB", "ospfIfStatus"), ("OSPF-MIB", "ospfIfIpAddress"), ("OSPF-MIB", "ospfIfRtrPriority"), ("OSPF-MIB", "ospfIfTransitDelay"), ("OSPF-MIB", "ospfIfState"), ("OSPF-MIB", "ospfIfAuthKey"), ("OSPF-MIB", "ospfIfDesignatedRouter"), ("OSPF-MIB", "ospfAddressLessIf"), ("OSPF-MIB", "ospfIfDemand"), ("OSPF-MIB", "ospfIfRtrDeadInterval"), ("OSPF-MIB", "ospfIfAuthType"), ("OSPF-MIB", "ospfIfBackupDesignatedRouter"), ("OSPF-MIB", "ospfIfType"), ("OSPF-MIB", "ospfIfAreaId"), ("OSPF-MIB", "ospfIfHelloInterval"), ("OSPF-MIB", "ospfIfRetransInterval"), ("OSPF-MIB", "ospfIfMulticastForwarding"), ) )
if mibBuilder.loadTexts: ospfIfGroup.setDescription("These objects are used to monitor/manage OSPF\ninterfaces. This object group conforms to RFC 1850.")
ospfIfMetricGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 8)).setObjects(*(("OSPF-MIB", "ospfIfMetricAddressLessIf"), ("OSPF-MIB", "ospfIfMetricValue"), ("OSPF-MIB", "ospfIfMetricTOS"), ("OSPF-MIB", "ospfIfMetricIpAddress"), ("OSPF-MIB", "ospfIfMetricStatus"), ) )
if mibBuilder.loadTexts: ospfIfMetricGroup.setDescription("These objects are used for OSPF systems for supporting\n\n\n\ninterface metrics.")
ospfVirtIfGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 9)).setObjects(*(("OSPF-MIB", "ospfVirtIfState"), ("OSPF-MIB", "ospfVirtIfRtrDeadInterval"), ("OSPF-MIB", "ospfVirtIfRetransInterval"), ("OSPF-MIB", "ospfVirtIfAuthKey"), ("OSPF-MIB", "ospfVirtIfTransitDelay"), ("OSPF-MIB", "ospfVirtIfEvents"), ("OSPF-MIB", "ospfVirtIfHelloInterval"), ("OSPF-MIB", "ospfVirtIfNeighbor"), ("OSPF-MIB", "ospfVirtIfStatus"), ("OSPF-MIB", "ospfVirtIfAreaId"), ("OSPF-MIB", "ospfVirtIfAuthType"), ) )
if mibBuilder.loadTexts: ospfVirtIfGroup.setDescription("These objects are used for OSPF systems for supporting\nvirtual interfaces. This object group conforms\nto RFC 1850.")
ospfNbrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 10)).setObjects(*(("OSPF-MIB", "ospfNbrHelloSuppressed"), ("OSPF-MIB", "ospfNbrRtrId"), ("OSPF-MIB", "ospfNbrPriority"), ("OSPF-MIB", "ospfNbrLsRetransQLen"), ("OSPF-MIB", "ospfNbmaNbrPermanence"), ("OSPF-MIB", "ospfNbrOptions"), ("OSPF-MIB", "ospfNbrEvents"), ("OSPF-MIB", "ospfNbrState"), ("OSPF-MIB", "ospfNbmaNbrStatus"), ("OSPF-MIB", "ospfNbrAddressLessIndex"), ("OSPF-MIB", "ospfNbrIpAddr"), ) )
if mibBuilder.loadTexts: ospfNbrGroup.setDescription("These objects are used to monitor/manage OSPF neighbors.\nThis object group conforms to RFC 1850.")
ospfVirtNbrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 11)).setObjects(*(("OSPF-MIB", "ospfVirtNbrRtrId"), ("OSPF-MIB", "ospfVirtNbrIpAddr"), ("OSPF-MIB", "ospfVirtNbrEvents"), ("OSPF-MIB", "ospfVirtNbrArea"), ("OSPF-MIB", "ospfVirtNbrHelloSuppressed"), ("OSPF-MIB", "ospfVirtNbrLsRetransQLen"), ("OSPF-MIB", "ospfVirtNbrState"), ("OSPF-MIB", "ospfVirtNbrOptions"), ) )
if mibBuilder.loadTexts: ospfVirtNbrGroup.setDescription("These objects are used to monitor/manage OSPF virtual\nneighbors. This object group conforms to RFC 1850.")
ospfExtLsdbGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 12)).setObjects(*(("OSPF-MIB", "ospfExtLsdbChecksum"), ("OSPF-MIB", "ospfExtLsdbType"), ("OSPF-MIB", "ospfExtLsdbSequence"), ("OSPF-MIB", "ospfExtLsdbAdvertisement"), ("OSPF-MIB", "ospfExtLsdbAge"), ("OSPF-MIB", "ospfExtLsdbRouterId"), ("OSPF-MIB", "ospfExtLsdbLsid"), ) )
if mibBuilder.loadTexts: ospfExtLsdbGroup.setDescription("These objects are used for OSPF systems that display\ntheir link state database. This object group\nconforms to RFC 1850.\n\nThis object group is replaced by the ospfAsLsdbGroup\nin order to support any AS-scope LSA type in a single\ntable.")
ospfAreaAggregateGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 13)).setObjects(*(("OSPF-MIB", "ospfAreaAggregateLsdbType"), ("OSPF-MIB", "ospfAreaAggregateStatus"), ("OSPF-MIB", "ospfAreaAggregateAreaID"), ("OSPF-MIB", "ospfAreaAggregateEffect"), ("OSPF-MIB", "ospfAreaAggregateMask"), ("OSPF-MIB", "ospfAreaAggregateNet"), ) )
if mibBuilder.loadTexts: ospfAreaAggregateGroup.setDescription("These objects are used for OSPF systems to support\nnetwork prefix aggregation across areas.")
ospfLocalLsdbGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 14)).setObjects(*(("OSPF-MIB", "ospfLocalLsdbAge"), ("OSPF-MIB", "ospfLocalLsdbSequence"), ("OSPF-MIB", "ospfLocalLsdbChecksum"), ("OSPF-MIB", "ospfLocalLsdbAdvertisement"), ) )
if mibBuilder.loadTexts: ospfLocalLsdbGroup.setDescription("These objects are used for OSPF systems\nthat display their link-local link state databases\nfor non-virtual links.")
ospfVirtLocalLsdbGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 15)).setObjects(*(("OSPF-MIB", "ospfVirtLocalLsdbAdvertisement"), ("OSPF-MIB", "ospfVirtLocalLsdbChecksum"), ("OSPF-MIB", "ospfVirtLocalLsdbAge"), ("OSPF-MIB", "ospfVirtLocalLsdbSequence"), ) )
if mibBuilder.loadTexts: ospfVirtLocalLsdbGroup.setDescription("These objects are used for OSPF systems\nthat display their link-local link state databases\nfor virtual links.")
ospfAsLsdbGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 16)).setObjects(*(("OSPF-MIB", "ospfAsLsdbChecksum"), ("OSPF-MIB", "ospfAsLsdbAge"), ("OSPF-MIB", "ospfAsLsdbSequence"), ("OSPF-MIB", "ospfAsLsdbAdvertisement"), ) )
if mibBuilder.loadTexts: ospfAsLsdbGroup.setDescription("These objects are used for OSPF systems\nthat display their AS-scope link state database.")
ospfBasicGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 17)).setObjects(*(("OSPF-MIB", "ospfReferenceBandwidth"), ("OSPF-MIB", "ospfRxNewLsas"), ("OSPF-MIB", "ospfRestartExitReason"), ("OSPF-MIB", "ospfExternLsaCount"), ("OSPF-MIB", "ospfExitOverflowInterval"), ("OSPF-MIB", "ospfOriginateNewLsas"), ("OSPF-MIB", "ospfAsLsaCount"), ("OSPF-MIB", "ospfStubRouterSupport"), ("OSPF-MIB", "ospfDemandExtensions"), ("OSPF-MIB", "ospfAsLsaCksumSum"), ("OSPF-MIB", "ospfMulticastExtensions"), ("OSPF-MIB", "ospfExternLsaCksumSum"), ("OSPF-MIB", "ospfASBdrRtrStatus"), ("OSPF-MIB", "ospfRestartStatus"), ("OSPF-MIB", "ospfTOSSupport"), ("OSPF-MIB", "ospfRFC1583Compatibility"), ("OSPF-MIB", "ospfRestartInterval"), ("OSPF-MIB", "ospfExtLsdbLimit"), ("OSPF-MIB", "ospfAreaBdrRtrStatus"), ("OSPF-MIB", "ospfRestartStrictLsaChecking"), ("OSPF-MIB", "ospfStubRouterAdvertisement"), ("OSPF-MIB", "ospfVersionNumber"), ("OSPF-MIB", "ospfRestartAge"), ("OSPF-MIB", "ospfDiscontinuityTime"), ("OSPF-MIB", "ospfOpaqueLsaSupport"), ("OSPF-MIB", "ospfRestartSupport"), ("OSPF-MIB", "ospfAdminStat"), ("OSPF-MIB", "ospfRouterId"), ) )
if mibBuilder.loadTexts: ospfBasicGroup2.setDescription("These objects are used to monitor/manage OSPF global\nparameters.")
ospfAreaGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 18)).setObjects(*(("OSPF-MIB", "ospfSpfRuns"), ("OSPF-MIB", "ospfAsBdrRtrCount"), ("OSPF-MIB", "ospfAreaNssaTranslatorRole"), ("OSPF-MIB", "ospfAreaSummary"), ("OSPF-MIB", "ospfAreaNssaTranslatorState"), ("OSPF-MIB", "ospfAreaId"), ("OSPF-MIB", "ospfAreaLsaCount"), ("OSPF-MIB", "ospfAreaStatus"), ("OSPF-MIB", "ospfAreaLsaCksumSum"), ("OSPF-MIB", "ospfImportAsExtern"), ("OSPF-MIB", "ospfAreaNssaTranslatorStabilityInterval"), ("OSPF-MIB", "ospfAreaBdrRtrCount"), ("OSPF-MIB", "ospfAreaNssaTranslatorEvents"), ) )
if mibBuilder.loadTexts: ospfAreaGroup2.setDescription("These objects are used by OSPF systems\nto support areas.")
ospfIfGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 19)).setObjects(*(("OSPF-MIB", "ospfIfStatus"), ("OSPF-MIB", "ospfIfDemand"), ("OSPF-MIB", "ospfIfPollInterval"), ("OSPF-MIB", "ospfIfAreaId"), ("OSPF-MIB", "ospfIfLsaCount"), ("OSPF-MIB", "ospfIfEvents"), ("OSPF-MIB", "ospfIfRtrDeadInterval"), ("OSPF-MIB", "ospfIfRtrPriority"), ("OSPF-MIB", "ospfAddressLessIf"), ("OSPF-MIB", "ospfIfAuthType"), ("OSPF-MIB", "ospfIfDesignatedRouter"), ("OSPF-MIB", "ospfIfLsaCksumSum"), ("OSPF-MIB", "ospfIfState"), ("OSPF-MIB", "ospfIfBackupDesignatedRouter"), ("OSPF-MIB", "ospfIfHelloInterval"), ("OSPF-MIB", "ospfIfRetransInterval"), ("OSPF-MIB", "ospfIfMulticastForwarding"), ("OSPF-MIB", "ospfIfAdminStat"), ("OSPF-MIB", "ospfIfIpAddress"), ("OSPF-MIB", "ospfIfTransitDelay"), ("OSPF-MIB", "ospfIfAuthKey"), ("OSPF-MIB", "ospfIfType"), ) )
if mibBuilder.loadTexts: ospfIfGroup2.setDescription("These objects are used to monitor/manage OSPF interfaces.")
ospfVirtIfGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 20)).setObjects(*(("OSPF-MIB", "ospfVirtIfState"), ("OSPF-MIB", "ospfVirtIfLsaCksumSum"), ("OSPF-MIB", "ospfVirtIfRtrDeadInterval"), ("OSPF-MIB", "ospfVirtIfRetransInterval"), ("OSPF-MIB", "ospfVirtIfAuthKey"), ("OSPF-MIB", "ospfVirtIfLsaCount"), ("OSPF-MIB", "ospfVirtIfTransitDelay"), ("OSPF-MIB", "ospfVirtIfEvents"), ("OSPF-MIB", "ospfVirtIfHelloInterval"), ("OSPF-MIB", "ospfVirtIfNeighbor"), ("OSPF-MIB", "ospfVirtIfStatus"), ("OSPF-MIB", "ospfIfDesignatedRouterId"), ("OSPF-MIB", "ospfVirtIfAreaId"), ("OSPF-MIB", "ospfVirtIfAuthType"), ("OSPF-MIB", "ospfIfBackupDesignatedRouterId"), ) )
if mibBuilder.loadTexts: ospfVirtIfGroup2.setDescription("These objects are used to monitor/manage OSPF\nvirtual interfaces.")
ospfNbrGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 21)).setObjects(*(("OSPF-MIB", "ospfNbrHelloSuppressed"), ("OSPF-MIB", "ospfNbrRestartHelperExitReason"), ("OSPF-MIB", "ospfNbrRtrId"), ("OSPF-MIB", "ospfNbrPriority"), ("OSPF-MIB", "ospfNbrLsRetransQLen"), ("OSPF-MIB", "ospfNbmaNbrPermanence"), ("OSPF-MIB", "ospfNbrRestartHelperAge"), ("OSPF-MIB", "ospfNbrRestartHelperStatus"), ("OSPF-MIB", "ospfNbrOptions"), ("OSPF-MIB", "ospfNbrEvents"), ("OSPF-MIB", "ospfNbrState"), ("OSPF-MIB", "ospfNbmaNbrStatus"), ("OSPF-MIB", "ospfNbrAddressLessIndex"), ("OSPF-MIB", "ospfNbrIpAddr"), ) )
if mibBuilder.loadTexts: ospfNbrGroup2.setDescription("These objects are used to monitor/manage OSPF\nneighbors.")
ospfVirtNbrGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 22)).setObjects(*(("OSPF-MIB", "ospfVirtNbrIpAddr"), ("OSPF-MIB", "ospfVirtNbrEvents"), ("OSPF-MIB", "ospfVirtNbrState"), ("OSPF-MIB", "ospfVirtNbrLsRetransQLen"), ("OSPF-MIB", "ospfVirtNbrRestartHelperAge"), ("OSPF-MIB", "ospfVirtNbrRestartHelperExitReason"), ("OSPF-MIB", "ospfVirtNbrOptions"), ("OSPF-MIB", "ospfVirtNbrRtrId"), ("OSPF-MIB", "ospfVirtNbrArea"), ("OSPF-MIB", "ospfVirtNbrHelloSuppressed"), ("OSPF-MIB", "ospfVirtNbrRestartHelperStatus"), ) )
if mibBuilder.loadTexts: ospfVirtNbrGroup2.setDescription("These objects are used to monitor/manage OSPF\nvirtual neighbors.")
ospfAreaAggregateGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 23)).setObjects(*(("OSPF-MIB", "ospfAreaAggregateExtRouteTag"), ("OSPF-MIB", "ospfAreaAggregateLsdbType"), ("OSPF-MIB", "ospfAreaAggregateStatus"), ("OSPF-MIB", "ospfAreaAggregateAreaID"), ("OSPF-MIB", "ospfAreaAggregateEffect"), ("OSPF-MIB", "ospfAreaAggregateMask"), ("OSPF-MIB", "ospfAreaAggregateNet"), ) )
if mibBuilder.loadTexts: ospfAreaAggregateGroup2.setDescription("These objects are used for OSPF systems to support\nnetwork prefix aggregation across areas.")
ospfAreaLsaCountGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 24)).setObjects(*(("OSPF-MIB", "ospfAreaLsaCountNumber"), ) )
if mibBuilder.loadTexts: ospfAreaLsaCountGroup.setDescription("These objects are used for OSPF systems that display\nper-area, per-LSA-type counters.")
ospfHostGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 25)).setObjects(*(("OSPF-MIB", "ospfHostIpAddress"), ("OSPF-MIB", "ospfHostCfgAreaID"), ("OSPF-MIB", "ospfHostMetric"), ("OSPF-MIB", "ospfHostStatus"), ("OSPF-MIB", "ospfHostTOS"), ) )
if mibBuilder.loadTexts: ospfHostGroup2.setDescription("These objects are used for OSPF systems\nthat support attached hosts.")
ospfObsoleteGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 14, 15, 1, 26)).setObjects(*(("OSPF-MIB", "ospfAuthType"), ) )
if mibBuilder.loadTexts: ospfObsoleteGroup.setDescription("These objects are obsolete and are no longer required for\nOSPF systems. They are placed into this group for SMI\nconformance.")
# Compliances
ospfCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 14, 15, 2, 1)).setObjects(*(("OSPF-MIB", "ospfStubAreaGroup"), ("OSPF-MIB", "ospfIfMetricGroup"), ("OSPF-MIB", "ospfAreaAggregateGroup"), ("OSPF-MIB", "ospfBasicGroup"), ("OSPF-MIB", "ospfVirtIfGroup"), ("OSPF-MIB", "ospfLsdbGroup"), ("OSPF-MIB", "ospfVirtNbrGroup"), ("OSPF-MIB", "ospfExtLsdbGroup"), ("OSPF-MIB", "ospfAreaGroup"), ("OSPF-MIB", "ospfIfGroup"), ("OSPF-MIB", "ospfHostGroup"), ("OSPF-MIB", "ospfNbrGroup"), ) )
if mibBuilder.loadTexts: ospfCompliance.setDescription("The compliance statement for OSPF systems\nconforming to RFC 1850.")
ospfCompliance2 = ModuleCompliance((1, 3, 6, 1, 2, 1, 14, 15, 2, 2)).setObjects(*(("OSPF-MIB", "ospfHostGroup2"), ("OSPF-MIB", "ospfAreaGroup2"), ("OSPF-MIB", "ospfStubAreaGroup"), ("OSPF-MIB", "ospfVirtLocalLsdbGroup"), ("OSPF-MIB", "ospfVirtNbrGroup2"), ("OSPF-MIB", "ospfIfMetricGroup"), ("OSPF-MIB", "ospfIfGroup2"), ("OSPF-MIB", "ospfLsdbGroup"), ("OSPF-MIB", "ospfVirtIfGroup2"), ("OSPF-MIB", "ospfAreaLsaCountGroup"), ("OSPF-MIB", "ospfNbrGroup2"), ("OSPF-MIB", "ospfAreaAggregateGroup2"), ("OSPF-MIB", "ospfBasicGroup2"), ("OSPF-MIB", "ospfAsLsdbGroup"), ("OSPF-MIB", "ospfLocalLsdbGroup"), ) )
if mibBuilder.loadTexts: ospfCompliance2.setDescription("The compliance statement.")
ospfComplianceObsolete = ModuleCompliance((1, 3, 6, 1, 2, 1, 14, 15, 2, 3)).setObjects(*(("OSPF-MIB", "ospfAreaRangeGroup"), ("OSPF-MIB", "ospfObsoleteGroup"), ) )
if mibBuilder.loadTexts: ospfComplianceObsolete.setDescription("Contains obsolete object groups.")
# Exports
# Module identity
mibBuilder.exportSymbols("OSPF-MIB", PYSNMP_MODULE_ID=ospf)
# Types
mibBuilder.exportSymbols("OSPF-MIB", BigMetric=BigMetric, DesignatedRouterPriority=DesignatedRouterPriority, HelloRange=HelloRange, Metric=Metric, OspfAuthenticationType=OspfAuthenticationType, PositiveInteger=PositiveInteger, Status=Status, TOSType=TOSType, UpToMaxAge=UpToMaxAge, AreaID=AreaID, RouterID=RouterID)
# Objects
mibBuilder.exportSymbols("OSPF-MIB", ospf=ospf, ospfGeneralGroup=ospfGeneralGroup, ospfRouterId=ospfRouterId, ospfAdminStat=ospfAdminStat, ospfVersionNumber=ospfVersionNumber, ospfAreaBdrRtrStatus=ospfAreaBdrRtrStatus, ospfASBdrRtrStatus=ospfASBdrRtrStatus, ospfExternLsaCount=ospfExternLsaCount, ospfExternLsaCksumSum=ospfExternLsaCksumSum, ospfTOSSupport=ospfTOSSupport, ospfOriginateNewLsas=ospfOriginateNewLsas, ospfRxNewLsas=ospfRxNewLsas, ospfExtLsdbLimit=ospfExtLsdbLimit, ospfMulticastExtensions=ospfMulticastExtensions, ospfExitOverflowInterval=ospfExitOverflowInterval, ospfDemandExtensions=ospfDemandExtensions, ospfRFC1583Compatibility=ospfRFC1583Compatibility, ospfOpaqueLsaSupport=ospfOpaqueLsaSupport, ospfReferenceBandwidth=ospfReferenceBandwidth, ospfRestartSupport=ospfRestartSupport, ospfRestartInterval=ospfRestartInterval, ospfRestartStrictLsaChecking=ospfRestartStrictLsaChecking, ospfRestartStatus=ospfRestartStatus, ospfRestartAge=ospfRestartAge, ospfRestartExitReason=ospfRestartExitReason, ospfAsLsaCount=ospfAsLsaCount, ospfAsLsaCksumSum=ospfAsLsaCksumSum, ospfStubRouterSupport=ospfStubRouterSupport, ospfStubRouterAdvertisement=ospfStubRouterAdvertisement, ospfDiscontinuityTime=ospfDiscontinuityTime, ospfAreaTable=ospfAreaTable, ospfAreaEntry=ospfAreaEntry, ospfAreaId=ospfAreaId, ospfAuthType=ospfAuthType, ospfImportAsExtern=ospfImportAsExtern, ospfSpfRuns=ospfSpfRuns, ospfAreaBdrRtrCount=ospfAreaBdrRtrCount, ospfAsBdrRtrCount=ospfAsBdrRtrCount, ospfAreaLsaCount=ospfAreaLsaCount, ospfAreaLsaCksumSum=ospfAreaLsaCksumSum, ospfAreaSummary=ospfAreaSummary, ospfAreaStatus=ospfAreaStatus, ospfAreaNssaTranslatorRole=ospfAreaNssaTranslatorRole, ospfAreaNssaTranslatorState=ospfAreaNssaTranslatorState, ospfAreaNssaTranslatorStabilityInterval=ospfAreaNssaTranslatorStabilityInterval, ospfAreaNssaTranslatorEvents=ospfAreaNssaTranslatorEvents, ospfStubAreaTable=ospfStubAreaTable, ospfStubAreaEntry=ospfStubAreaEntry, ospfStubAreaId=ospfStubAreaId, ospfStubTOS=ospfStubTOS, ospfStubMetric=ospfStubMetric, ospfStubStatus=ospfStubStatus, ospfStubMetricType=ospfStubMetricType, ospfLsdbTable=ospfLsdbTable, ospfLsdbEntry=ospfLsdbEntry, ospfLsdbAreaId=ospfLsdbAreaId, ospfLsdbType=ospfLsdbType, ospfLsdbLsid=ospfLsdbLsid, ospfLsdbRouterId=ospfLsdbRouterId, ospfLsdbSequence=ospfLsdbSequence, ospfLsdbAge=ospfLsdbAge, ospfLsdbChecksum=ospfLsdbChecksum, ospfLsdbAdvertisement=ospfLsdbAdvertisement, ospfAreaRangeTable=ospfAreaRangeTable, ospfAreaRangeEntry=ospfAreaRangeEntry, ospfAreaRangeAreaId=ospfAreaRangeAreaId, ospfAreaRangeNet=ospfAreaRangeNet, ospfAreaRangeMask=ospfAreaRangeMask, ospfAreaRangeStatus=ospfAreaRangeStatus, ospfAreaRangeEffect=ospfAreaRangeEffect, ospfHostTable=ospfHostTable, ospfHostEntry=ospfHostEntry, ospfHostIpAddress=ospfHostIpAddress, ospfHostTOS=ospfHostTOS, ospfHostMetric=ospfHostMetric, ospfHostStatus=ospfHostStatus, ospfHostAreaID=ospfHostAreaID, ospfHostCfgAreaID=ospfHostCfgAreaID, ospfIfTable=ospfIfTable, ospfIfEntry=ospfIfEntry, ospfIfIpAddress=ospfIfIpAddress, ospfAddressLessIf=ospfAddressLessIf, ospfIfAreaId=ospfIfAreaId, ospfIfType=ospfIfType, ospfIfAdminStat=ospfIfAdminStat, ospfIfRtrPriority=ospfIfRtrPriority, ospfIfTransitDelay=ospfIfTransitDelay, ospfIfRetransInterval=ospfIfRetransInterval, ospfIfHelloInterval=ospfIfHelloInterval, ospfIfRtrDeadInterval=ospfIfRtrDeadInterval, ospfIfPollInterval=ospfIfPollInterval, ospfIfState=ospfIfState, ospfIfDesignatedRouter=ospfIfDesignatedRouter, ospfIfBackupDesignatedRouter=ospfIfBackupDesignatedRouter, ospfIfEvents=ospfIfEvents, ospfIfAuthKey=ospfIfAuthKey, ospfIfStatus=ospfIfStatus, ospfIfMulticastForwarding=ospfIfMulticastForwarding, ospfIfDemand=ospfIfDemand, ospfIfAuthType=ospfIfAuthType, ospfIfLsaCount=ospfIfLsaCount, ospfIfLsaCksumSum=ospfIfLsaCksumSum, ospfIfDesignatedRouterId=ospfIfDesignatedRouterId, ospfIfBackupDesignatedRouterId=ospfIfBackupDesignatedRouterId, ospfIfMetricTable=ospfIfMetricTable, ospfIfMetricEntry=ospfIfMetricEntry, ospfIfMetricIpAddress=ospfIfMetricIpAddress, ospfIfMetricAddressLessIf=ospfIfMetricAddressLessIf, ospfIfMetricTOS=ospfIfMetricTOS, ospfIfMetricValue=ospfIfMetricValue, ospfIfMetricStatus=ospfIfMetricStatus, ospfVirtIfTable=ospfVirtIfTable, ospfVirtIfEntry=ospfVirtIfEntry, ospfVirtIfAreaId=ospfVirtIfAreaId, ospfVirtIfNeighbor=ospfVirtIfNeighbor, ospfVirtIfTransitDelay=ospfVirtIfTransitDelay, ospfVirtIfRetransInterval=ospfVirtIfRetransInterval, ospfVirtIfHelloInterval=ospfVirtIfHelloInterval, ospfVirtIfRtrDeadInterval=ospfVirtIfRtrDeadInterval, ospfVirtIfState=ospfVirtIfState, ospfVirtIfEvents=ospfVirtIfEvents, ospfVirtIfAuthKey=ospfVirtIfAuthKey, ospfVirtIfStatus=ospfVirtIfStatus, ospfVirtIfAuthType=ospfVirtIfAuthType, ospfVirtIfLsaCount=ospfVirtIfLsaCount, ospfVirtIfLsaCksumSum=ospfVirtIfLsaCksumSum)
mibBuilder.exportSymbols("OSPF-MIB", ospfNbrTable=ospfNbrTable, ospfNbrEntry=ospfNbrEntry, ospfNbrIpAddr=ospfNbrIpAddr, ospfNbrAddressLessIndex=ospfNbrAddressLessIndex, ospfNbrRtrId=ospfNbrRtrId, ospfNbrOptions=ospfNbrOptions, ospfNbrPriority=ospfNbrPriority, ospfNbrState=ospfNbrState, ospfNbrEvents=ospfNbrEvents, ospfNbrLsRetransQLen=ospfNbrLsRetransQLen, ospfNbmaNbrStatus=ospfNbmaNbrStatus, ospfNbmaNbrPermanence=ospfNbmaNbrPermanence, ospfNbrHelloSuppressed=ospfNbrHelloSuppressed, ospfNbrRestartHelperStatus=ospfNbrRestartHelperStatus, ospfNbrRestartHelperAge=ospfNbrRestartHelperAge, ospfNbrRestartHelperExitReason=ospfNbrRestartHelperExitReason, ospfVirtNbrTable=ospfVirtNbrTable, ospfVirtNbrEntry=ospfVirtNbrEntry, ospfVirtNbrArea=ospfVirtNbrArea, ospfVirtNbrRtrId=ospfVirtNbrRtrId, ospfVirtNbrIpAddr=ospfVirtNbrIpAddr, ospfVirtNbrOptions=ospfVirtNbrOptions, ospfVirtNbrState=ospfVirtNbrState, ospfVirtNbrEvents=ospfVirtNbrEvents, ospfVirtNbrLsRetransQLen=ospfVirtNbrLsRetransQLen, ospfVirtNbrHelloSuppressed=ospfVirtNbrHelloSuppressed, ospfVirtNbrRestartHelperStatus=ospfVirtNbrRestartHelperStatus, ospfVirtNbrRestartHelperAge=ospfVirtNbrRestartHelperAge, ospfVirtNbrRestartHelperExitReason=ospfVirtNbrRestartHelperExitReason, ospfExtLsdbTable=ospfExtLsdbTable, ospfExtLsdbEntry=ospfExtLsdbEntry, ospfExtLsdbType=ospfExtLsdbType, ospfExtLsdbLsid=ospfExtLsdbLsid, ospfExtLsdbRouterId=ospfExtLsdbRouterId, ospfExtLsdbSequence=ospfExtLsdbSequence, ospfExtLsdbAge=ospfExtLsdbAge, ospfExtLsdbChecksum=ospfExtLsdbChecksum, ospfExtLsdbAdvertisement=ospfExtLsdbAdvertisement, ospfRouteGroup=ospfRouteGroup, ospfIntraArea=ospfIntraArea, ospfInterArea=ospfInterArea, ospfExternalType1=ospfExternalType1, ospfExternalType2=ospfExternalType2, ospfAreaAggregateTable=ospfAreaAggregateTable, ospfAreaAggregateEntry=ospfAreaAggregateEntry, ospfAreaAggregateAreaID=ospfAreaAggregateAreaID, ospfAreaAggregateLsdbType=ospfAreaAggregateLsdbType, ospfAreaAggregateNet=ospfAreaAggregateNet, ospfAreaAggregateMask=ospfAreaAggregateMask, ospfAreaAggregateStatus=ospfAreaAggregateStatus, ospfAreaAggregateEffect=ospfAreaAggregateEffect, ospfAreaAggregateExtRouteTag=ospfAreaAggregateExtRouteTag, ospfConformance=ospfConformance, ospfGroups=ospfGroups, ospfCompliances=ospfCompliances, ospfLocalLsdbTable=ospfLocalLsdbTable, ospfLocalLsdbEntry=ospfLocalLsdbEntry, ospfLocalLsdbIpAddress=ospfLocalLsdbIpAddress, ospfLocalLsdbAddressLessIf=ospfLocalLsdbAddressLessIf, ospfLocalLsdbType=ospfLocalLsdbType, ospfLocalLsdbLsid=ospfLocalLsdbLsid, ospfLocalLsdbRouterId=ospfLocalLsdbRouterId, ospfLocalLsdbSequence=ospfLocalLsdbSequence, ospfLocalLsdbAge=ospfLocalLsdbAge, ospfLocalLsdbChecksum=ospfLocalLsdbChecksum, ospfLocalLsdbAdvertisement=ospfLocalLsdbAdvertisement, ospfVirtLocalLsdbTable=ospfVirtLocalLsdbTable, ospfVirtLocalLsdbEntry=ospfVirtLocalLsdbEntry, ospfVirtLocalLsdbTransitArea=ospfVirtLocalLsdbTransitArea, ospfVirtLocalLsdbNeighbor=ospfVirtLocalLsdbNeighbor, ospfVirtLocalLsdbType=ospfVirtLocalLsdbType, ospfVirtLocalLsdbLsid=ospfVirtLocalLsdbLsid, ospfVirtLocalLsdbRouterId=ospfVirtLocalLsdbRouterId, ospfVirtLocalLsdbSequence=ospfVirtLocalLsdbSequence, ospfVirtLocalLsdbAge=ospfVirtLocalLsdbAge, ospfVirtLocalLsdbChecksum=ospfVirtLocalLsdbChecksum, ospfVirtLocalLsdbAdvertisement=ospfVirtLocalLsdbAdvertisement, ospfAsLsdbTable=ospfAsLsdbTable, ospfAsLsdbEntry=ospfAsLsdbEntry, ospfAsLsdbType=ospfAsLsdbType, ospfAsLsdbLsid=ospfAsLsdbLsid, ospfAsLsdbRouterId=ospfAsLsdbRouterId, ospfAsLsdbSequence=ospfAsLsdbSequence, ospfAsLsdbAge=ospfAsLsdbAge, ospfAsLsdbChecksum=ospfAsLsdbChecksum, ospfAsLsdbAdvertisement=ospfAsLsdbAdvertisement, ospfAreaLsaCountTable=ospfAreaLsaCountTable, ospfAreaLsaCountEntry=ospfAreaLsaCountEntry, ospfAreaLsaCountAreaId=ospfAreaLsaCountAreaId, ospfAreaLsaCountLsaType=ospfAreaLsaCountLsaType, ospfAreaLsaCountNumber=ospfAreaLsaCountNumber)
# Groups
mibBuilder.exportSymbols("OSPF-MIB", ospfBasicGroup=ospfBasicGroup, ospfAreaGroup=ospfAreaGroup, ospfStubAreaGroup=ospfStubAreaGroup, ospfLsdbGroup=ospfLsdbGroup, ospfAreaRangeGroup=ospfAreaRangeGroup, ospfHostGroup=ospfHostGroup, ospfIfGroup=ospfIfGroup, ospfIfMetricGroup=ospfIfMetricGroup, ospfVirtIfGroup=ospfVirtIfGroup, ospfNbrGroup=ospfNbrGroup, ospfVirtNbrGroup=ospfVirtNbrGroup, ospfExtLsdbGroup=ospfExtLsdbGroup, ospfAreaAggregateGroup=ospfAreaAggregateGroup, ospfLocalLsdbGroup=ospfLocalLsdbGroup, ospfVirtLocalLsdbGroup=ospfVirtLocalLsdbGroup, ospfAsLsdbGroup=ospfAsLsdbGroup, ospfBasicGroup2=ospfBasicGroup2, ospfAreaGroup2=ospfAreaGroup2, ospfIfGroup2=ospfIfGroup2, ospfVirtIfGroup2=ospfVirtIfGroup2, ospfNbrGroup2=ospfNbrGroup2, ospfVirtNbrGroup2=ospfVirtNbrGroup2, ospfAreaAggregateGroup2=ospfAreaAggregateGroup2, ospfAreaLsaCountGroup=ospfAreaLsaCountGroup, ospfHostGroup2=ospfHostGroup2, ospfObsoleteGroup=ospfObsoleteGroup)
# Compliances
mibBuilder.exportSymbols("OSPF-MIB", ospfCompliance=ospfCompliance, ospfCompliance2=ospfCompliance2, ospfComplianceObsolete=ospfComplianceObsolete)
|
Python
|
CL
|
38f6a2b426600eb4e819eeff1553b2c81deeedb7b89cae639e9317722c89cf6e
|
#!/usr/bin/env python3.7
'''
Python >=3.7 script to generate a human-readable XLSX
dump on a specified Nutanix Prism Central instance
'''
import os
import os.path
import sys
import socket
import getpass
import argparse
from time import localtime, strftime
from string import Template
try:
import urllib3
import requests
import xlsxwriter
from requests.auth import HTTPBasicAuth
except ModuleNotFoundError as error:
# Output expected ImportErrors.
print(f'''
{error.__class__.__name__} exception has been caught.
This typically indicates a required module is not installed.
Please ensure you are running this script within a
virtual development environment and that you have run the
setup script as per the readme. Detailed exception info follows:
{error}
''')
sys.exit()
class DetailsMissingException(Exception):
'''
basic custom exception for when things "don't work"
this is something that has been added simply to make extending
the script much easier later
'''
pass
class EnvironmentOptions():
'''
this class is provided as an easy way to package the settings
the script will use
this isn't strictly necessary but does clean things up and removes
the need for a bunch of individual global variables
'''
def __init__(self):
self.cluster_ip = ""
self.username = ""
self.password = ""
self.debug = False
self.read_timeout = 3
self.entity_response_length = 20
# these are the supported entities for this environment
self.supported_entities = ['vm', 'subnet', 'cluster', 'project',
'network_security_rule', 'image',
'host', 'blueprint', 'app']
def __repr__(self):
'''
decent __repr__ for debuggability
this is something recommended by Raymond Hettinger
'''
return (f'{self.__class__.__name__}(cluster_ip={self.cluster_ip},'
f'username={self.username},password=<hidden>,'
f'entity_response_length={self.entity_response_length},'
f'read_timeout={self.read_timeout},debug={self.debug})')
def get_options(self):
'''
method to make sure our environment options class holds the
settings provided by the user
'''
parser = argparse.ArgumentParser()
'''
pc_ip is the only mandatory command-line parameter for this script
username and password have been left as optional so that we have
the opportunity to prompt for them later - this is better for
security and avoids the need to hard-code anything
'''
parser.add_argument(
'pc_ip',
help='Prism Central IP address'
)
parser.add_argument(
'-u',
'--username',
help='Prism Central username'
)
parser.add_argument(
'-p',
'--password',
help='Prism Central password'
)
parser.add_argument(
'-d',
'--debug',
help='Enable/disable debug mode'
)
args = parser.parse_args()
'''
do some checking to see which parameters we still need to prompt for
conditional statements make sense here because a) we're doing a few of
them and b) they're more 'Pythonic'
'''
self.username = (args.username if args.username else
input('Please enter your Prism Central username: '))
self.password = args.password if args.password else getpass.getpass()
'''
conditional statement isn't required for the Prism Central IP since
it is a required parameter managed by argparse
'''
self.cluster_ip = args.pc_ip
self.debug = bool(args.debug == 'enable')
class ApiClient():
'''
the most important class in our script
here we carry out the actual API request and process the
responses, as well as any errors that returned from the
response
'''
def __init__(self, cluster_ip, request, body,
username, password, timeout=10):
self.cluster_ip = cluster_ip
self.username = username
self.password = password
self.base_url = f'https://{self.cluster_ip}:9440/api/nutanix/v3'
self.entity_type = request
self.request_url = f'{self.base_url}/{request}'
self.timeout = timeout
self.body = body
def __repr__(self):
'''
decent __repr__ for debuggability
this is something recommended by Raymond Hettinger
'''
return (f'{self.__class__.__name__}(cluster_ip={self.cluster_ip},'
f'username={self.username},password=<hidden>,'
f'base_url={self.base_url},entity_type={self.entity_type},'
f'request_url={self.request_url},'
f'body (payload)={self.body})')
def send_request(self):
'''
send the API request based on the parameters we
have already collected
'''
headers = {'Content-Type': 'application/json; charset=utf-8'}
try:
api_request = requests.post(
self.request_url,
data=self.body,
verify=False,
headers=headers,
auth=HTTPBasicAuth(self.username, self.password),
timeout=self.timeout,
)
except requests.ConnectTimeout:
print('Connection timed out while connecting to '
f'{self.cluster_ip}. Please check your connection, '
'then try again.')
sys.exit()
except requests.ConnectionError:
print('An error occurred while connecting to '
f'{self.cluster_ip}. Please check your connection, '
'then try again.')
sys.exit()
except requests.HTTPError:
print('An HTTP error occurred while connecting to '
f'{self.cluster_ip}. Please check your connection, '
'then try again.')
sys.exit()
except Exception as error:
'''
catching generic Exception will throw warnings if you
are running the script through something like flake8
or pylint
that's fine for what we're doing here, though :)
'''
print(f'An unhandled exception has occurred: {error}')
print(f'Exception: {error.__class__.__name__}')
print('Exiting ...')
sys.exit()
if api_request.status_code >= 500:
print('An HTTP server error has occurred ('
f'{api_request.status_code})')
else:
if api_request.status_code == 401:
# authentication error
print('An authentication error occurred while connecting to '
f'{self.cluster_ip}. Please check your credentials, '
'then try again.')
sys.exit()
if api_request.status_code >= 401:
print('An HTTP client error has occurred ('
f'{api_request.status_code})')
sys.exit()
else:
print("Connected and authenticated successfully.")
return api_request.json()
HTML_ROWS = {}
ENTITY_TOTALS = {}
def generate_xlsx(json_results):
'''
generate the actual XLSX doc
this is where xlsxwriter is used
'''
day = strftime('%d-%b-%Y', localtime())
time = strftime('%H%M%S', localtime())
now = f'{day}_{time}'
xlsx_filename = f'{now}_prism_central.xlsx'
workbook = xlsxwriter.Workbook(xlsx_filename)
'''
the next block parses some of the Prism Central info that
currently exists as individual lists
'''
'''
these are entity types the script currently supports
if new features or entity types become available in future,
it should be a relatively simple task to update this list
to support those entities
'''
# supported_entities = [
# 'vm', 'subnet', 'cluster', 'project', 'network_security_rule',
# 'image', 'host', 'blueprint', 'app']
supported_entities = ['vm', 'host']
for row_label in supported_entities:
ENTITY_TOTALS[row_label] = 0
'''
Create some worksheets. Probably a better way to do this,
xlsxwriter doesnt like dictionaries though.
'''
worksheetvm = workbook.add_worksheet("vm")
worksheetvm.write(0, 0, "VM Name")
worksheetvm.write(0, 1, "Cluster")
worksheetvm.write(0, 2, "Description")
worksheetvm.write(0, 3, "vCPU Sockets")
worksheetvm.write(0, 4, "Cores per Socket")
worksheetvm.write(0, 5, "Memory(MiB)")
worksheethost = workbook.add_worksheet("host")
worksheethost.write(0, 0, "Host Serial")
worksheethost.write(0, 1, "Host Name")
worksheethost.write(0, 2, "Host IP")
worksheethost.write(0, 3, "CVM IP")
worksheethost.write(0, 4, "Memory (MiB)")
worksheethost.write(0, 5, "CPU Sockets")
worksheethost.write(0, 6, "Cores Per Socket")
worksheethost.write(0, 7, "Hypervisor Version")
worksheethost.write(0, 8, "Num VMs")
print('\n')
for json_result in json_results:
# collect info that is common to all entity types
for entity in json_result:
if entity in supported_entities:
ENTITY_TOTALS[f'{entity}'] = (json_result[1]["metadata"]
["total_matches"])
print(f'Count of entity type {entity}: '
f'{json_result[1]["metadata"]["total_matches"]}')
'''
note that the next long section seems a little repetitive, but the
string formatting for each entity is different enough to do it this way
if each entity's info 'block' was the same, we could setup an iterator
or use common formatting, but then the generated PDF wouldn't be very
useful
'''
##########
# VM #
##########
if json_result[0] == 'vm':
wkscell = 1
try:
for vm in json_result[1]['entities']:
vm_name = vm["spec"]["name"]
cluster_name = vm["spec"]["cluster_reference"]["name"]
description = (vm['spec']['description']
if 'description' in vm['spec']
else 'None provided')
vcpus = vm['spec']['resources']['num_sockets']
coresper = vm['spec']['resources']['num_vcpus_per_socket']
memory = vm['spec']['resources']['memory_size_mib']
worksheetvm.write(wkscell, 0, vm_name)
worksheetvm.write(wkscell, 1, cluster_name)
worksheetvm.write(wkscell, 2, description)
worksheetvm.write(wkscell, 3, vcpus)
worksheetvm.write(wkscell, 4, coresper)
worksheetvm.write(wkscell, 5, memory)
wkscell += 1
except KeyError:
worksheetvm.write(wkscell, 0, "Data missing or malformed")
########
# HOST #
########
elif json_result[0] == 'host':
wkscell = 1
try:
for host in json_result[1]['entities']:
if 'name' in host['status']:
host_serial = (host["status"]["resources"]
["serial_number"])
host_name = (host["status"]["name"])
host_ip = (host["status"]["resources"]
["hypervisor"]["ip"])
hypervisor_ver = (host["status"]["resources"]
["hypervisor"]["hypervisor_full_name"])
cvm_ip = (host["status"]["resources"]
["controller_vm"]["ip"])
num_vms = (host["status"]["resources"]
["hypervisor"]["num_vms"])
memory = (host["status"]["resources"]
["memory_capacity_mib"])
cpusock = (host["status"]["resources"]
["num_cpu_sockets"])
corepersock = (host["status"]["resources"]
["num_cpu_cores"])
worksheethost.write(wkscell, 0, host_serial)
worksheethost.write(wkscell, 1, host_name)
worksheethost.write(wkscell, 2, host_ip)
worksheethost.write(wkscell, 3, cvm_ip)
worksheethost.write(wkscell, 4, memory)
worksheethost.write(wkscell, 5, cpusock)
worksheethost.write(wkscell, 6, corepersock)
worksheethost.write(wkscell, 7, hypervisor_ver)
worksheethost.write(wkscell, 8, num_vms)
wkscell += 1
else:
host_serial = (host["status"]
["resources"]["serial_number"])
cvm_ip = (host["status"]["resources"]
["controller_vm"]["ip"])
worksheethost.write(wkscell, 0, host_serial)
worksheethost.write(wkscell, 2, cvm_ip)
wkscell += 1
except KeyError:
worksheethost.write(wkscell, 0, "Data missing or malformed")
print('\n')
workbook.close()
def show_intro():
'''
function to simply show an extended help intro when the script
is run - definitely not required but useful for development
scripts like this one
'''
print(
f'''
{sys.argv[0]}:
Connect to a Nutanix Prism Central instance, grab some high-level details then
generate a PDF from it
Intended to generate a very high-level and *unofficial* as-built document for
an existing Prism Central instance.
This script is GPL and there is *NO WARRANTY* provided with this script ...
AT ALL. You can use and modify this script as you wish, but please make
sure the changes are appropriate for the intended environment.
Formal documentation should always be generated using best-practice methods
that suit your environment.
'''
)
def main():
'''
main entry point into the 'app'
every function needs a Docstring in order to follow best
practices
'''
show_intro()
environment_options = EnvironmentOptions()
environment_options.get_options()
if environment_options.debug:
print(f'{environment_options}\n')
'''
disable insecure connection warnings
please be advised and aware of the implications of doing this
in a production environment!
'''
if environment_options.debug:
print('Disabling urllib3 insecure request warnings ...\n')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# make sure all required info has been provided
if not environment_options.cluster_ip:
raise DetailsMissingException('Cluster IP is required.')
elif not environment_options.username:
raise DetailsMissingException('Username is required.')
elif not environment_options.password:
raise DetailsMissingException('Password is required.')
else:
if environment_options.debug:
print('All parameters OK.\n')
'''
'length' in Nutanix v3 API requests dictates how many entities
will be returned in each request
'''
length = environment_options.entity_response_length
if environment_options.debug:
print(f'{length} entities will be returned for each request.')
json_results = []
endpoints = []
for entity in environment_options.supported_entities:
entity_plural = f'{entity}s'
endpoints.append({'name': f'{entity}',
'name_plural': f'{entity_plural}',
'length': length})
if environment_options.debug:
print('Iterating over all supported endpoints ...\n')
for endpoint in endpoints:
client = ApiClient(
environment_options.cluster_ip,
f'{endpoint["name_plural"]}/list',
(f'{{ "kind": "{endpoint["""name"""]}",'
f'"length": {endpoint["""length"""]}}}'),
environment_options.username,
environment_options.password,
environment_options.read_timeout
)
if environment_options.debug:
print(f'Client info: {client}\n')
print(f'Requesting "{client.entity_type}" ...\n')
results = client.send_request()
json_results.append([endpoint['name'], results])
if environment_options.debug:
print('Generating XLSX ...\n')
generate_xlsx(json_results)
if __name__ == '__main__':
main()
|
Python
|
CL
|
b9420795be63d1f906125edc5fdb3a69a105f1ecec154da41397439eae4ceb99
|
import json
from vkwave.api.methods import APIOptionsRequestContext
from vkwave.bots.core.types.json_types import JSONDecoder
from vkwave.http import AbstractHTTPClient
from vkwave.types.responses import DocsSaveResponseModel
class VoiceUploader:
def __init__(
self, api_context: APIOptionsRequestContext, json_serialize: JSONDecoder = json.loads,
):
self.api_context = api_context
self.client: AbstractHTTPClient = api_context.api_options.get_client().http_client
self.json_serialize = json_serialize
async def get_server(self, peer_id: int) -> str:
server_data = await self.api_context.docs.get_messages_upload_server(
type="audio_message", peer_id=peer_id
)
return server_data.response.upload_url
async def upload(self, upload_url: str, file_data) -> DocsSaveResponseModel:
upload_data = self.json_serialize(
await self.client.request_text(method="POST", url=upload_url, data={"file": file_data})
)
photo_sizes = (await self.api_context.docs.save(file=upload_data["file"])).response
return photo_sizes
async def get_attachment_from_path(self, peer_id: int, file_path: str) -> str:
upload_url = await self.get_server(peer_id)
file_data = open(file_path, "rb")
doc = await self.upload(upload_url, file_data)
file_data.close()
if doc.audio_message is None:
raise TypeError("This is not a voice message")
return f"doc{doc.audio_message.owner_id}_{doc.audio_message.id}"
|
Python
|
CL
|
0c41be37f2a32504bddf217f368751f01e6693d58d60d935a8675be3f274e8a8
|
"""Create the object store groupworkspace names, gws manager, quota and email
from the elastic tape quotas and create user records, groupworkspace records
and object store quotas.
This script is run via ./manage runscript"""
import jdma_site.settings as settings
from jdma_control.models import User, Groupworkspace, StorageQuota
from jdma_control.scripts.import_et_gws import get_et_gws_from_url
from jdma_control.scripts.import_et_gws import create_user_entry, create_quota_entry
ET_EXPORT_URL = "http://cedadb.ceda.ac.uk/gws/etexport/"
def create_user_gws_quotas(data):
# Create the User, GroupWorkspace and StorageQuota from each line of the
# data
storageid = StorageQuota.get_storage_index("ftp")
for line in data:
if len(line) == 4:
# create user entry
new_gws = create_user_entry(line)
# create the new storage quota
create_quota_entry(storageid, new_gws, 32 * 10**12, 0)
def run():
data = get_et_gws_from_url(ET_EXPORT_URL)
create_user_gws_quotas(data)
|
Python
|
CL
|
7630837e5a2c8369370257741691147b4b0fb0c8f6970f5fdd9c41e141129f79
|
# -*- coding: utf-8 -*-
import re
import numpy as np
from typing import List
def append_EOS(texts: List[str], eos: str) -> List[str]:
"""
Appends EOS to each text.
>>> texts = ["a\\n", "b\\n"]
>>> eos = "<EOS>"
>>> append_EOS(texts=texts, eos=eos)
['a<EOS>', 'b<EOS>']
"""
return [text.strip() + eos for text in texts]
def preprocess(texts, puts_padding=False, padding_char="\t"):
"""
Preprocessor for text to char vector
:param texts: raw corpus
:param puts_padding: pad with padding_char to align text length.
:return: corpus: corpus
char_to_id: char in corpus to id map
id_to_char: id to char in corpus map
>>> texts = ['IKEA港北', 'IKEA新三郷']
>>> corpus, char_to_id, id_to_char = preprocess(texts)
>>> corpus
array([1, 2, 3, 4, 7, 8, 9])
>>> char_to_id
{'\\t': 0, 'I': 1, 'K': 2, 'E': 3, 'A': 4, '港': 5, '北': 6, '新': 7, '三': 8, '郷': 9}
>>> id_to_char
{0: '\\t', 1: 'I', 2: 'K', 3: 'E', 4: 'A', 5: '港', 6: '北', 7: '新', 8: '三', 9: '郷'}
>>> corpus, char_to_id, id_to_char = preprocess(texts, puts_padding=True)
>>> corpus
array([1, 2, 3, 4, 7, 8, 9])
>>> char_to_id
{'\\t': 0, 'I': 1, 'K': 2, 'E': 3, 'A': 4, '港': 5, '北': 6, '新': 7, '三': 8, '郷': 9}
>>> id_to_char
{0: '\\t', 1: 'I', 2: 'K', 3: 'E', 4: 'A', 5: '港', 6: '北', 7: '新', 8: '三', 9: '郷'}
"""
if puts_padding:
max_text_len = calc_max_len(texts)
texts = [put_padding(text, max_text_len, padding_char) for text in texts]
chars_list = [text_to_chars(text) for text in texts]
# Initialize with padding char.
char_to_id = {padding_char: 0}
id_to_char = {0 : padding_char}
for chars in chars_list:
for char in chars:
if char not in char_to_id:
new_id = len(char_to_id)
char_to_id[char] = new_id
id_to_char[new_id] = char
corpus = np.array([char_to_id[char] for char in chars])
return corpus, char_to_id, id_to_char
def text_to_chars(text):
"""
Splits text to chars list.
>>> text = "IKEA港北"
>>> text_to_chars(text)
['I', 'K', 'E', 'A', '港', '北']
"""
ignore_chars = ['']
# TODO: normalize Japanese.
return [char for char in list(text) if char not in ignore_chars]
def calc_max_len(texts):
"""
Calculates max length of texts.
:param texts: list of texts.
:return: max lenght in texts.
>>> texts = ["1", "1234", "123456789"]
>>> calc_max_len(texts)
9
"""
return max(len(text) for text in texts)
def put_padding(text, max_length, filling_char="\t"):
"""
Puts filling_char to text to align to max_length.
:param text: text to be aligned
:param max_length: length to align
:param filling_char: charactor to align with.
:return: text filled with :filling_char: aligns to max_length.
>>> text = "1234"
>>> max_length = 10
>>> filling_char = "|"
>>> put_padding(text, max_length, filling_char)
'1234||||||'
"""
text_len = len(text)
if text_len >= max_length:
return text
filling_len = max_length - text_len
filling_chars = filling_char * filling_len
return text + filling_chars
def text_to_vec(text, term_to_vec):
"""
Converts text to vector in corpus.
When char in text is not in corpus, then raises KeyError.
Facebook fastText processes terms are split with white space,
so ignore whitespace in text.
:param text: text to vector
:param char_to_id: dictionary of character to id
:return: vector (dim 1 of numpy array)
>>> text = "IKEA"
>>> term_to_vec = {'I': np.array([0]), 'K': np.array([1]), 'E': np.array([2]), 'A': np.array([3]), '港': np.array([4]), '北': np.array([5]), '\\t': np.array([6])}
>>> text_to_vec(text, term_to_vec)
array([[0],
[1],
[2],
[3]])
>>> text = "IKE!" # "!" is not in char_to_id.
>>> term_to_vec = {'I': np.array([0]), 'K': np.array([1]), 'E': np.array([2]), 'A': np.array([3]), '港': np.array([4]), '北': np.array([5]), '\\t': np.array([6])}
>>> text_to_vec(text, term_to_vec)
array([[0],
[1],
[2],
[0]])
"""
chars = [char for char in list(text) if char != ' ']
return np.array([char_to_vec(char, term_to_vec) for char in chars])
def char_to_vec(char, term_to_vec):
"""
>>> char = 'a'
>>> term_to_vec = {'a': np.array([[0], [1]])}
>>> char_to_vec(char, term_to_vec)
array([[0],
[1]])
>>> char = ' '
>>> char_to_vec(char, term_to_vec)
array([[0],
[0]])
"""
return term_to_vec.get(char, np.zeros_like(term_to_vec[list(term_to_vec)[0]]))
def convert_one_hot(corpus, vocabulary_size, max_length):
"""
Converts to one-hot vectors.
:param corpus: list of word ids (dim 1 of numpy array)
:param vocabulary_size: vocabulary size
:return: one-hot vectors (dim 2 of numpy array)
>>> corpus = np.array([1, 2, 1, 0])
>>> vocabulary_size = 5
>>> max_length = 10
>>> convert_one_hot(corpus, vocabulary_size, max_length)
array([[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=int32)
>>> corpus = np.array([[1, 2, 1, 0], [1, 2, 3, 0]])
>>> vocabulary_size = 5
>>> max_length = 10
>>> convert_one_hot(corpus, vocabulary_size, max_length)
Traceback (most recent call last):
...
ValueError: Corpus must be dim 1, but actual is 2
>>> corpus = np.array([1, 2, 6])
>>> vocabulary_size = 5
>>> max_length = 10
>>> convert_one_hot(corpus, vocabulary_size, max_length)
Traceback (most recent call last):
...
IndexError: index 6 is out of bounds for axis 1 with size 5
"""
N = max_length
if corpus.ndim != 1:
raise ValueError("Corpus must be dim 1, but actual is %d" % corpus.ndim)
one_hot = np.zeros((N, vocabulary_size), dtype=np.int32)
for idx, word_id in enumerate(corpus):
one_hot[idx, word_id] = 1
return one_hot
def train_data_to_train_and_target_vector(train_data):
"""
Converts training data to training vectors (X) and target vectors (Y).
>>> train_data = np.array([[0, 1, 0], [0, 0, 1]])
>>> train_vectors, target_vectors = train_data_to_train_and_target_vector(train_data)
>>> train_vectors
[[array([0, 1])], [array([0, 0])]]
>>> train_vectors[0][0]
array([0, 1])
>>> target_vectors
[array([1, 0]), array([0, 1])]
"""
train_vectors = [[vector[:-1]] for vector in train_data]
target_vectors = [vector[1:] for vector in train_data]
return train_vectors, target_vectors
def char_to_one_hot(char: str, char_to_id, vocabulary_size):
"""
Convert char to one hot vector.
>>> char = 'a'
>>> char_to_id = {'a': 1, 'b': 2}
>>> vocabulary_size = 5
>>> char_to_one_hot(char, char_to_id, vocabulary_size)
array([[0, 1, 0, 0, 0]], dtype=int32)
"""
input_char_id = char_to_id[char]
return convert_one_hot(np.array([input_char_id]), vocabulary_size, max_length=1)
def generate_npy(texts, char_to_id, output_file):
"""
>>> texts = ["IKEA港北", "IKEA三郷", "IKEA立川"]
>>> _, char_to_id, _ = preprocess(texts)
>>> output_file = "/tmp/generate_npy.npy"
>>> generate_npy(texts, char_to_id, output_file)
array([[ 1, 2, 3, 4, 5, 6],
[ 1, 2, 3, 4, 7, 8],
[ 1, 2, 3, 4, 9, 10]])
"""
# TODO: text の長さが違っていてもできるようにする
# もしかしたら、np.arrayのpython.listでもよいのかもしれない
# corpus : [array([0, 1, 2, 3, 4, 5]), array([0, 1, 2, 3, 6, 7]), array([0, 1, 2, 3, 8, 9])]
texts_vec = [np.array([char_to_id[char] for char in text]).reshape(len(text)) for text in texts]
corpus = np.array(texts_vec)
np.save(output_file, corpus)
return corpus
|
Python
|
CL
|
d87842211d02e8779f4e1e2b97629d5086874566bebb2a0292a2f7aaff20bf1f
|
#!/usr/bin/env python3
"""Handle graphing of Azure network deployments.
Copyright 2021 Alexander Kuemmel
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = '0.1.0'
__author__ = 'Alexander Kuemmel <akisys@github>'
__license__ = 'Apache License 2.0'
import sys
assert sys.version_info >= (3, 7)
import json
import argparse
import shutil
import subprocess
import logging
import textwrap
import dataclasses
import zlib
from string import Template
from pathlib import Path
from contextlib import contextmanager
from collections import namedtuple
from pprint import pprint as pp
# mini semver setup
semver = namedtuple('semver', ['major', 'minor', 'patch'])
# mermaid templates and defaults
mmd_vnet_label = Template('$node{{"$label [p:$pc]"}}')
mmd_vnet_label_rnd = Template('$node("$label [p:$pc]")')
mmd_vnet_peering_wl = Template('$node ---|"$label"| $peer')
mmd_vnet_peering_nl = Template('$node --- $peer')
mmd_vnet_style = Template('style $node $style')
mmd_subgraph_begin = Template('subgraph "$label"')
mmd_subgraph_end = "end"
mmd_header_data = """
graph LR
"""
mmd_footer_data = """
"""
mmd_vnet_styles_by_peer_count = {
range(0,1): "fill:#8cbed6",
range(1,10): "fill:#f8de7e",
range(10,20): "fill:#ff8243",
range(20,50): "fill:#ff5349",
range(50,255): "fill:#c90016",
}
# mini azure res setup
@dataclasses.dataclass
class az_res:
id: str
resourceGroup: str = None
name: str = None
hash: str = None
def __post_init__(self):
self.hash = str(zlib.adler32(self.id.encode('ascii')))
def __str__(self) -> str:
return self.hash
def _fields() -> list:
return [f.name for f in dataclasses.fields(__class__)]
@dataclasses.dataclass
class az_vnet(az_res):
resourceGroup: str = None
name: str = None
sub_id: str = None
peers: list = dataclasses.field(default_factory=list)
def __post_init__(self):
id_split = self.id.split('/')
self.resourceGroup = id_split[4]
self.name = id_split[8]
self.sub_id = id_split[2]
super().__post_init__()
@dataclasses.dataclass
class az_vnet_peering(az_res):
thisVnet: az_vnet = None
peeredVnet: az_vnet = None
def __post_init__(self):
super().__post_init__()
self.hash = str(int(self.thisVnet.hash)+int(self.peeredVnet.hash))
@dataclasses.dataclass
class runtime_data:
vnet_map: dict = dataclasses.field(default_factory=dict)
peer_map: dict = dataclasses.field(default_factory=dict)
subscription_data: list = dataclasses.field(default_factory=list)
render_as_subgraph: bool = False
render_with_edge_labels: bool = True
# logging setup
format = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(format=format, level=logging.DEBUG, datefmt="%H:%M:%S")
# requirements presets
azcmd = "az"
azver = semver._make("2.27.0".split('.'))
@contextmanager
def use_azure_account(sub_id: str):
curr_sub_id = None
try:
logging.debug("Retrieving az default account")
current_account = json.loads(subprocess.check_output("az account show".split(), shell=False))
curr_sub_id = current_account.get('id')
if current_account.get('isDefault') and curr_sub_id == sub_id:
logging.debug(f"Skipping az account change, already active on correct subscription {curr_sub_id}")
else:
logging.debug(f"Changing az default account to subsription {sub_id}")
subprocess.check_call(f"az account set --subscription {sub_id}".split(), shell=False)
current_account = json.loads(subprocess.check_output("az account show".split(), shell=False))
yield current_account
finally:
logging.debug(f"Resetting az default to account subscription {curr_sub_id}")
subprocess.check_call(f"az account set --subscription {curr_sub_id}".split(), shell=False)
def check_local_requirements():
if not shutil.which(azcmd):
raise Exception(f"Could not find executable Azure CLI ({azcmd}) in $PATH")
cmd_base = f"{azcmd} version".split()
cmd_output = json.loads(subprocess.check_output(cmd_base, shell=False))
az_check_ver = semver._make(cmd_output.get('azure-cli-core').split('.'))
if not az_check_ver >= azver:
raise Exception(f"Minimum version requirement for 'az' command not met, expecting at least {azver._asdict()}, but got {az_check_ver._asdict()}")
def get_az_vnet(data: dict) -> az_vnet:
vnet_extract = {key: data.get(key) for key in data.keys() if key in az_vnet._fields()}
return az_vnet(**vnet_extract)
def get_az_vnet_peers(data: dict) -> az_vnet_peering:
azvnet = get_az_vnet(data)
for raw_peering in data.get('virtualNetworkPeerings'):
peered_vnet = get_az_vnet(raw_peering.get('remoteVirtualNetwork'))
peering_extract = {key: raw_peering.get(key) for key in raw_peering.keys() if key in az_vnet_peering._fields()}
peering = az_vnet_peering(**peering_extract, peeredVnet=peered_vnet, thisVnet=azvnet)
yield peering
def get_mmd_vnet_style(peer_count: int = 0):
logging.debug(f"Getting style for peer count {peer_count}")
for r,v in mmd_vnet_styles_by_peer_count.items():
if peer_count in r:
return v
return None
def render_data(runtime: runtime_data) -> str:
output_buffer = []
processed_vnet_refs = []
for subdata in runtime.subscription_data:
render_data = []
subscription = subdata.get('subscription')
vnet_refs = subdata.get('vnet_refs')
if runtime.render_as_subgraph:
render_data.append(mmd_subgraph_begin.substitute(label=subscription.get('name')))
for node_hash in vnet_refs:
node = runtime.vnet_map[node_hash]
# render vnet's differently if they don't have any peers
if len(node.peers) > 0:
node_label = mmd_vnet_label
else:
node_label = mmd_vnet_label_rnd
peer_count = len(node.peers)
render_data.append(node_label.substitute(node=node.hash, label=node.name, pc=peer_count))
render_data.append(mmd_vnet_style.substitute(node=node, style=get_mmd_vnet_style(peer_count)))
if runtime.render_as_subgraph:
render_data.append(mmd_subgraph_end)
output_buffer.extend(render_data)
processed_vnet_refs.extend(vnet_refs)
pass
# render all nodes not belonging to visited subscriptions
unprocessed_vnet_refs = set(runtime.vnet_map.keys()).difference(processed_vnet_refs)
if runtime.render_as_subgraph:
output_buffer.append(mmd_subgraph_begin.substitute(label="__EXTERNAL__"))
for node_hash in unprocessed_vnet_refs:
node = runtime.vnet_map[node_hash]
peer_count = len(node.peers)
node_label = mmd_vnet_label_rnd
output_buffer.append(node_label.substitute(node=node, label=node.name, pc=peer_count))
output_buffer.append(mmd_vnet_style.substitute(node=node, style=get_mmd_vnet_style(peer_count)))
if runtime.render_as_subgraph:
output_buffer.append(mmd_subgraph_end)
# render all peering connections at last
for peer in runtime.peer_map.values():
if runtime.render_with_edge_labels:
output_buffer.append(mmd_vnet_peering_wl.substitute(node=peer.thisVnet, label=peer.name, peer=peer.peeredVnet))
else:
output_buffer.append(mmd_vnet_peering_nl.substitute(node=peer.thisVnet, peer=peer.peeredVnet))
return "{0}\n{1}\n{2}".format(mmd_header_data, '\n'.join(output_buffer), mmd_footer_data)
def aggregate_subscription(sub_id: str, runtime: runtime_data):
data_map = {}
sub_vnet_refs = []
with use_azure_account(sub_id) as active_sub:
cmd_base = "az network vnet list".split()
cmd_output = json.loads(subprocess.check_output(cmd_base, shell=False))
for raw_vnet in cmd_output:
azvnet = get_az_vnet(raw_vnet)
if azvnet.hash not in runtime.vnet_map:
runtime.vnet_map[azvnet.hash] = azvnet
for peer in get_az_vnet_peers(raw_vnet):
if peer.hash not in runtime.vnet_map[azvnet.hash].peers:
runtime.vnet_map[azvnet.hash].peers.append(peer.hash)
if peer.peeredVnet.hash not in runtime.vnet_map:
runtime.vnet_map[peer.peeredVnet.hash] = peer.peeredVnet
if peer.hash not in runtime.vnet_map[peer.peeredVnet.hash].peers:
runtime.vnet_map[peer.peeredVnet.hash].peers.append(peer.hash)
if peer.hash not in runtime.peer_map:
runtime.peer_map[peer.hash] = peer
sub_vnet_refs.append(azvnet.hash)
data_map.update(subscription=active_sub, vnet_refs=set(sub_vnet_refs))
runtime.subscription_data.append(data_map)
if __name__ == "__main__":
logo = '''
__ __ (Iron) _ _
| \/ | ___ _ __ _ __ ___ __ _(_) __| | ___ _ __
| |\/| |/ _ \ '__| '_ ` _ \ / _` | |/ _` |/ _ \ '_ \
| | | | __/ | | | | | | | (_| | | (_| | __/ | | |
|_| |_|\___|_| |_| |_| |_|\__,_|_|\__,_|\___|_| |_|
by akisys
'''
formatter = lambda prog:argparse.RawDescriptionHelpFormatter(prog,max_help_position=70)
parser = argparse.ArgumentParser(description=textwrap.dedent(logo), formatter_class=formatter)
parser.add_argument('-v', dest="verbosity", action="count", help="Stackable verbosity level indicator, e.g. -vv")
subs_input_group = parser.add_mutually_exclusive_group()
subs_input_group.add_argument('-s', dest="subs", action="append", help="Subscription to render out, can be used multiple times")
subs_input_group.add_argument('-sf', dest="subs_file", type=argparse.FileType('r'), help="Subscriptions to render out, one ID per line")
parser.add_argument('-o', dest="outfile", type=Path, required=True)
parser.add_argument('-el', dest="render_edge_labels", action="store_true", default=False)
parser.add_argument('-sg', dest="render_sub_graphs", action="store_true", default=False)
try:
args = parser.parse_args()
# logging level setup
logger = logging.getLogger()
if not args.verbosity:
logger.setLevel(logging.WARN)
elif args.verbosity == 1:
logger.setLevel(logging.INFO)
elif args.verbosity >= 2:
logger.setLevel(logging.DEBUG)
check_local_requirements()
runtime = runtime_data()
sub_ids = args.subs or args.subs_file.readlines()
sub_ids = [line.strip() for line in sub_ids if not line.startswith('#')]
logging.info("Collecting data")
for sub_id in sub_ids:
logging.debug(f"Processing {sub_id}")
aggregate_subscription(sub_id=sub_id, runtime=runtime)
logging.info("Rendering data")
runtime.render_with_edge_labels = args.render_edge_labels
runtime.render_as_subgraph = args.render_sub_graphs
output_text = render_data(runtime)
logging.info("Writing output")
with open(args.outfile, "w") as fp:
fp.write(output_text)
pass
except Exception as ex:
logging.fatal(ex)
sys.exit(1)
|
Python
|
CL
|
3d1271c9b1e9c93110676f1ffd7039d75cd2f66050fcba2a76b9b1d4d2387b6a
|
from django.db.models import Count
from django.shortcuts import get_object_or_404
from django.http import Http404
from django.urls import reverse_lazy
from django.views.generic import DetailView, TemplateView, RedirectView, ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from blog.forms import PostForm
from blog.models import Post, Tag
from blog.permission import AdminPermissionMixin
class PostListView(ListView):
queryset = Post.objects.filter(published_at__isnull=False).order_by('-published_at')
context_object_name = 'posts'
paginate_by = 4
def get_context_data(self, **kwargs):
context = super(PostListView, self).get_context_data(**kwargs)
context['tags'] = Tag.objects.filter(posts__published_at__isnull=False).values('pk', 'name').annotate(
tags_count=Count('posts')).order_by('-tags_count')[:5]
return context
class PostDetailView(DetailView):
template_name = 'blog/post_detail.html'
model = Tag
def get_object(self, queryset=None):
post = get_object_or_404(Post, pk=self.kwargs['pk'])
return post
class TagMixin(object):
def save_tags(self, post_getlist_tags):
"""
return objects list of tags.
allow to create if the tag is doesn't exist.
this function bassed on slug field.
:param `post_getlist_tags` is request.POST.getlist('fake_tags', [])
"""
cleaned_tags = []
for name in post_getlist_tags:
if Tag.objects.filter(name=name).exists():
tag = Tag.objects.filter(name=name).first()
cleaned_tags.append(tag)
else:
if bool(name.strip()):
tag = Tag.objects.create(name=name)
tag.save()
cleaned_tags.append(tag)
return cleaned_tags
class PostCreateView(AdminPermissionMixin, CreateView, TagMixin):
model = Post
form_class = PostForm
success_url = reverse_lazy('post_draft_list')
template_name_suffix = '_edit'
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
post.save()
saved_tags = self.save_tags(self.request.POST.getlist('tags', []))
post.tags.add(*saved_tags)
return super(PostCreateView, self).form_valid(form)
class PostUpdateView(AdminPermissionMixin, UpdateView, TagMixin):
model = Post
form_class = PostForm
success_url = reverse_lazy('post_list')
template_name_suffix = '_edit'
def get_object(self, queryset=None):
post = get_object_or_404(Post, pk=self.kwargs['pk'])
return post
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
post.save()
saved_tags = self.save_tags(self.request.POST.getlist('tags', []))
post.tags.add(*saved_tags)
return super(PostUpdateView, self).form_valid(form)
class PostDraftListView(AdminPermissionMixin, TemplateView):
model = Post
form_class = PostForm
success_url = reverse_lazy('post_draft_list')
template_name = 'blog/post_draft_list.html'
def get_context_data(self, **kwargs):
context = super(PostDraftListView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.filter(published_at__isnull=True).order_by('created_at')
return context
class PostPublishView(AdminPermissionMixin, RedirectView):
url = reverse_lazy('post_list')
def get(self, request, *args, **kwargs):
try:
post = Post.objects.get(pk=self.kwargs['pk'])
post.publish()
except Exception, e:
raise Http404('Post does not exist')
return super(PostPublishView, self).get(request, *args, **kwargs)
class PostDeleteView(AdminPermissionMixin, DeleteView):
model = Post
success_url = reverse_lazy('post_list')
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
|
Python
|
CL
|
88d78450a570a0b39aeb4f7fd6d82d142de87e81e2449e18cd52c403ebbd2b69
|
#! /usr/bin/env python3
#
import numpy as np
import matplotlib.pyplot as plt
import platform
import time
import sys
import os
import math
from mpl_toolkits.mplot3d import Axes3D
from sys import exit
sys.path.append(os.path.join("../"))
from base import plot2d, plotocc
from timestamp.timestamp import timestamp
from i4lib.i4vec_print import i4vec_print
from i4lib.i4mat_print import i4mat_print
from r8lib.r8vec_print import r8vec_print
from r8lib.r8mat_print import r8mat_print, r8mat_print_some
from r8lib.r8mat_write import r8mat_write
from monomial.mono_between_enum import mono_between_enum_test
from monomial.mono_between_next_grevlex import mono_between_next_grevlex_test
from monomial.mono_between_next_grlex import mono_between_next_grlex_test
from monomial.mono_between_random import mono_between_random_test
from monomial.mono_next_grevlex import mono_next_grevlex_test
from monomial.mono_next_grlex import mono_next_grlex_test
from monomial.mono_print import mono_print_test
from monomial.mono_rank_grlex import mono_rank_grlex_test
from monomial.mono_total_enum import mono_total_enum_test
from monomial.mono_total_next_grevlex import mono_total_next_grevlex_test
from monomial.mono_total_next_grlex import mono_total_next_grlex_test
from monomial.mono_total_random import mono_total_random_test
from monomial.mono_unrank_grlex import mono_unrank_grlex_test
from monomial.mono_upto_enum import mono_upto_enum_test
from monomial.mono_upto_next_grevlex import mono_upto_next_grevlex_test
from monomial.mono_upto_next_grlex import mono_upto_next_grlex_test
from monomial.mono_upto_random import mono_upto_random_test
from monomial.mono_value import mono_value_test
def monomial_test():
# *****************************************************************************80
#
# MONOMIAL_TEST tests the MONOMIAL library.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 October 2014
#
# Author:
#
# John Burkardt
#
print('')
print('MONOMIAL_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' Test the MONOMIAL library.')
mono_between_enum_test()
mono_total_enum_test()
mono_upto_enum_test()
mono_next_grevlex_test()
mono_next_grlex_test()
mono_between_next_grevlex_test()
mono_between_next_grlex_test()
mono_total_next_grevlex_test()
mono_total_next_grlex_test()
mono_upto_next_grevlex_test()
mono_upto_next_grlex_test()
mono_rank_grlex_test()
mono_unrank_grlex_test()
mono_between_random_test()
mono_total_random_test()
mono_upto_random_test()
mono_value_test()
mono_print_test()
print('')
print('MONOMIAL_TEST:')
print(' Normal end of execution.')
if (__name__ == '__main__'):
timestamp()
monomial_test()
timestamp()
|
Python
|
CL
|
3a01b451ac800031e3112891b2fcccc27674f08ebecc211f6c22311b928f3a88
|
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import sys
import click
from typing import List
import nbformat
import os
from nbconvert import RSTExporter
from nbconvert.preprocessors import ExecutePreprocessor
exporter = RSTExporter()
ep = ExecutePreprocessor(timeout=600, kernel_name="python3", store_widget_state=True)
def open_nb(name: str, src: str) -> nbformat:
"""
Open notebook file
:param name: name of notebook to open
:param src: source directory
:return: notebook object
"""
print("Reading...", end=" ")
nb = nbformat.read(
"{src}/{name}/{name}.ipynb".format(name=name, src=src), as_version=4
)
print("OK", end=" ")
return nb
def execute(nb: nbformat, name: str, src: str) -> nbformat:
"""
Execute notebook and store widget state.
:param nb: notebook object to execute
:param name: notebook name (for setup directory context purpose)
:param src: notebook source directory (for setup context)
:return: notebook object with computed and stored output widget state
"""
print("Executing...", end=" ")
ep.preprocess(nb, {"metadata": {"path": "%s/%s/" % (src, name)}})
print("OK", end=" ")
return nb
def copy_image(name: str, export: str, src: str):
"""
Copy images present next to notebook file to exported folder.
:param name: notebook name
:param export: export directory
:param src: source directory
:return: None
"""
src = "%s/%s" % (src, name)
dest = "%s/%s" % (export, name)
images = [f for f in os.listdir(src) if f.split(".")[-1] in ["png"]]
for img in images:
os.rename("%s/%s" % (src, img), "%s/%s" % (dest, img))
def to_export(nb: nbformat, name: str, export: str):
"""
Export notebook into HTML format.
:param nb: notebook with result state
:param name: notebook name
:param export: directory to export
:return: None
"""
print("Exporting...", end=" ")
rst, _ = exporter.from_notebook_node(nb)
path = "%s/%s" % (export, name)
if not os.path.exists(path):
os.makedirs(path)
with open("%s/%s.rst" % (path, name), "w") as f:
f.write(rst)
print("OK", end=" ")
def list_notebook(src: str) -> List[str]:
"""
List available notebook in directory.
:return:
"""
dirs = os.listdir(src)
return [
d
for d in dirs
if os.path.isfile("{src}/{name}/{name}.ipynb".format(name=d, src=src))
]
@click.command("Check and export notebooks")
@click.option("--src", nargs=1, help="Notebook directory")
@click.option("--check", nargs=1, help="check notebook according to result file given")
@click.option("--export", nargs=1, help="export notebooks to directory given")
def main(src: str, check: str, export: str):
for name in list_notebook(src):
print("{:30}".format(name), ":", end="")
nb = open_nb(name, src)
nb = execute(nb, name, src)
if check:
pass # Implement check
if export:
to_export(nb, name, export)
copy_image(name, export, src)
print("")
if __name__ == "__main__":
main()
|
Python
|
CL
|
0160a1441a243427d7edaa7bb4342008d090d4d46627a295a66bce48173b26ad
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import xml.etree.ElementTree as ET
__all__ = ["Message", "User", "Location"]
def pop_from_etree(xml, tag):
ele = xml.find(tag)
if ele is not None:
xml.remove(ele)
return ele
return None
class DictAccess(object):
__availabe_keys__ = set()
def __init__(self, *args, **kwargs):
for key in self.__availabe_keys__:
setattr(self, key, kwargs.pop(key, None))
def __getitem__(self, key):
if key in self.__availabe_keys__:
return getattr(self, key, None)
raise AttributeError()
def __setitem__(self, key, value):
if key in self.__availabe_keys__:
return setattr(self, key, value)
raise AttributeError()
def __iter__(self):
return iter(self.__availabe_keys__)
def __contains__(self, key):
return key in self.__availabe_keys__
def __repr__(self):
d = dict((k, getattr(self, k, None)) for k in self.__availabe_keys__)
return repr(d)
def __eq__(self, value):
return (
isinstance(value, self.__class__) and
all(getattr(self, k) == getattr(value, k)
for k in self.__availabe_keys__)
)
class Message(DictAccess):
'''微信消息类,包括接收消息和发送消息,事件也属于消息
:ivar to_id: 如果是接收的用户消息,则为公众号appid,
如果是发送给用户的消息,则为用户openid
:ivar from_id: 如果是接收的用户消息,则为用户openid,
如果是发送给用户的消息,则为公众号appid
:ivar create_time: 消息的创建时间,整型unix时间戳
:ivar msg_type: 消息的类型,如 text, voice, 事件消息类型为event_加事件类型,如
event_LOCATION, event_VIEW
:ivar content: 消息除去头部自动构成的xml字符串,例如:
.. code-block:: xml
<Location_X>39.915119</Location_X>
<Location_Y>116.403963</Location_Y>
<Scale>16</Scale>
<Label><![CDATA[北京市东城区东长安街]]></Label>
'''
__availabe_keys__ = set([
"to_id", "from_id", "create_time",
"msg_id", "msg_type", "content"])
def __init__(self, to_id, from_id, msg_type,
content, msg_id=None, create_time=None):
self.to_id = to_id
self.from_id = from_id
self.msg_id = msg_id
self.msg_type = msg_type
self.content = content
self.create_time = create_time or int(time.time())
def __str__(self):
return self.build_xml()
@classmethod
def from_string(cls, xml_str):
'''从文本字符串构造 :class:`Message` 对象
:param xml_str: xml文本字符串
:rtype: Message
'''
if not isinstance(xml_str, bytes):
xml_str = xml_str.encode("utf8")
xml = ET.fromstring(xml_str)
to_id = pop_from_etree(xml, 'ToUserName').text
from_id = pop_from_etree(xml, 'FromUserName').text
msg_type = pop_from_etree(xml, 'MsgType').text
if msg_type == "event":
sub_type = pop_from_etree(xml, 'Event').text
msg_type = "event_%s" % sub_type
create_time = int(pop_from_etree(xml, 'CreateTime').text)
msg_id_node = pop_from_etree(xml, 'MsgId')
if msg_id_node is not None:
msg_id = int(msg_id_node.text)
else:
msg_id = None
return cls(to_id, from_id, msg_type, ET.tostring(xml).decode(),
msg_id, create_time=create_time)
def build_xml(self):
'''生成此消息的xml字符
:returns: 此消息对应的微信xml格式消息字符串
:rtype: str
'''
texts = [
'''<xml>
<ToUserName><![CDATA[%s]]></ToUserName>
<FromUserName><![CDATA[%s]]></FromUserName>
<CreateTime>%s</CreateTime>''' % (
self.to_id,
self.from_id, self.create_time)]
if self.msg_type.startswith("event_"):
msg_type = "event"
sub_type = self.msg_type[6:]
texts.append('''<MsgType><![CDATA[%s]]></MsgType>
<Event><![CDATA[%s]]></Event>''' % (msg_type, sub_type))
else:
texts.append(
'''<MsgType><![CDATA[%s]]></MsgType>''' % self.msg_type)
if self.msg_id is not None:
texts.append("<MsgId>%s</MsgId>" % self.msg_id)
texts.append(self.content)
texts.append("</xml>")
return "\n".join(texts)
class User(DictAccess):
'''公众号用户类
:ivar subscribe: 是否订阅该公众号
:ivar openid: 用户的标识
:ivar nickname: 用户的昵称
:ivar sex: 用户的性别,值为1时是男性,值为2时是女性,
值为0时是未知
:ivar city: 用户所在城市
:ivar country: 用户所在国家
:ivar province: 用户所在省份
:ivar language: 用户的语言,简体中文为zh_CN
:ivar headimgurl: 用户头像,最后一个数值代表正方形头像大小(有0、46、64、
96、132数值可选,0代表640*640正方形头像),用户没有头像时该项为空。
若用户更换头像,原有头像URL将失效。
:ivar subscribe_time: 用户关注时间,为时间戳
:ivar union: 只有在用户将公众号绑定到微信开放平台帐号后,才会出现该字段
:ivar remark: 公众号运营者对粉丝的备注
:ivar groupid: 用户所在的分组ID
:ivar tagid_list: 用户被打上的标签ID列表,是逗号分割的字符串
'''
__availabe_keys__ = set([
"subscribe", "openid", "nickname", "sex", "city",
"country", "province", "headimgurl", "subscribe_time",
"unionid", "remark", "groupid", "tagid_list", "language"
])
def __init__(self, _d=None, **kwargs):
if _d is None:
_d = {}
if kwargs:
_d.update(kwargs)
super(User, self).__init__(**_d)
@property
def tagids(self):
'''类型为整型list的标签id,如 ``[1,2,3]`` , 对应的 :attr:`tagid_list` 为 `'1,2,3'`'''
return (
list(map(int, self.tagid_list.split(",")))
if self.tagid_list else []
)
def __setattr__(self, name, value):
if name == "tagid_list":
if isinstance(value, list):
value = ",".join(map(str, value))
elif value is not None:
value = str(value)
super(User, self).__setattr__(name, value)
def update(self, info):
'''更新用户信息'''
for key in self.__availabe_keys__:
if key in info:
val = info[key]
if val is not None and val != getattr(self, key):
setattr(self, key, val)
class Location(DictAccess):
'''用户上报地址类
:ivar latitude: 位置纬度
:ivar longitude: 位置经度
:ivar precision: 位置精确度
:ivar openid: 在次位置的用户openid
:ivar create_time: 用户在此位置的时间
'''
__availabe_keys__ = set([
"latitude", "longitude", "precision",
"openid", "create_time"])
def __init__(self, latitude, longitude, precision=None,
openid=None, create_time=None):
self.latitude = latitude
self.longitude = longitude
self.precision = precision or 50
self.openid = openid
self.create_time = create_time or time.time()
|
Python
|
CL
|
dc5bb4500e3d0200208b420a95463ac3f0d8355c8a95628f6bddbb7cf6bbc62d
|
# use dig to validate DNS record then add valid records to Smokeping Target
# convert valid FQDNs from `dig -f +noall +answer` to SmokePing targets
#
# example input format
# dl.t6.lixian.vip.xunlei.com. 1783 IN CNAME t0540.sandai.net.
# t0540.sandai.net. 1783 IN A 61.147.76.41
# dl.t13.lixian.vip.xunlei.com. 1785 IN CNAME t30a14.sandai.net.
# t30a14.sandai.net. 1785 IN A 180.153.91.143
#
# Usage
# python3 smokeping-target-generator.py dig_output.txt target_output.txt
#
# 20161009
#
# To do
# update/replace on Target file
# customized node description
# make it more extensible/useable for other DNS records
import sys,logging,re
from more_itertools import unique_everseen
logging.basicConfig(level=logging.DEBUG)
def read_dig_output(filename:str) -> str:
"""convert dig output text file to string
#
"""
file_dig_output = open(filename, 'r')
dig_output = file_dig_output.read()
file_dig_output.close()
logging.debug('dig_output: \n%s', dig_output)
return dig_output
def get_cdn_nodes(string_dig_output:str)->list:
"""use regex to match URL pattern, put unique nodes into list of
dict {'nodename':'t6', 'fqdn':'dl.t6.lixian.vip.xunlei.com.'}
"""
list_nodes = []
list_nodes_raw = list(unique_everseen(re.findall(r'dl\.\w+\.lixian\.vip\.xunlei\.com', string_dig_output)))
for node_entry in list_nodes_raw:
list_nodes.append({'node_name':re.search(r'dl\.(\w+)\.lixian\.vip\.xunlei\.com',node_entry).group(1),
'fqdn':node_entry})
print(len(list_nodes), 'nodes found', )
return list_nodes
def generate_target(cdn_nodes:list)->str:
"""generate SmokePing Target configuration block"""
string_target = '''+ Xunlei_lixian
menu = Xunelei lixian
title = Xunlei lixian CDN nodes\n\n'''
for cdn_nodes_entry in cdn_nodes:
target_block_template = '''++ {0}
menu = {0}
title = Xunlei lixian CDN node {0}
host = {1}\n\n'''
target_block=target_block_template.format(cdn_nodes_entry['node_name'], cdn_nodes_entry['fqdn'])
logging.debug('target_block:\n%s',target_block)
string_target += target_block
return string_target
def write_target(path_target:str, string_target:str):
"""write Target config block to file"""
file_target_output = open(path_target, 'w')
file_target_output.write(string_target)
file_target_output.close()
if __name__=='__main__':
"""
"""
write_target(sys.argv[2], generate_target(get_cdn_nodes(read_dig_output(sys.argv[1]))))
|
Python
|
CL
|
62cebdd374ec14563a12f2999581f91674d0cf69f53905427cea5370a59b005b
|
import untangle
import collections
import json
import operator
import os
import csv
import io
cube_visualstudio_path = os.path.join('..', '..', 'Contract_r')
output_folder = os.path.join('.', 'output')
def enumerable_diffs(ea, eb):
return [a for a in ea if a not in eb]
create_tables_using_friendly_names = True
stats_num_tables = None
stats_num_columns = None
stats_num_foreign_keys = None
stats_num_primary_keys = 0
stats_sql_lines_drop_tables = None
stats_sql_lines_create_tables = None
stats_sql_lines_add_foreign_keys = None
SSAS_DSV_TableDef = collections.namedtuple('SSAS_DSV_TableDef', ['dsv_table_id', 'Name', 'FriendlyName', 'DbTableName', 'QueryDefinition', 'Columns'])
SSAS_DSV_ColumnDef = collections.namedtuple('SSAS_DSV_ColumnDef', ['dsv_column_id', 'dsv_table_id', 'Name', 'FriendlyName', 'DataType', 'Description', 'AllowNull', 'Length', 'DbColumnName'])
SSAS_DSV_ForeignKeyDef = collections.namedtuple('SSAS_DSV_ForeignKeyDef', ['Name', 'Parent', 'Child', 'ParentKey', 'ChildKey', 'Description'])
SSAS_DIM_TableDef = collections.namedtuple('SSAS_DIM_TableDef', ['dim_table_id', 'ID', 'Name', 'FileName', 'UnknownMember', 'UnknownMemberName', 'Columns', 'Type', 'ErrorConfiguration', 'Hierarchies'])
SSAS_DIM_ErrorConfigurationDef = collections.namedtuple('SSAS_DIM_ErrorConfigurationDef', ['dim_table_id', 'Name', 'Value'])
SSAS_DIM_ColumnDef = collections.namedtuple('SSAS_DIM_ColumnDef', ['dim_column_id', 'dim_table_id', 'ID', 'Name', 'Usage', 'OrderBy', 'IsKeyColumn', 'KeyColumns', 'NameColumn_DataType', 'NameColumn_DataSize', 'NameColumn_NullProcessing', 'NameColumn_TableName', 'NameColumn_ColumnName'])
SSAS_DIM_KeyColumnDef = collections.namedtuple('SSAS_DIM_KeyColumnDef', ['dim_column_id', 'sort_order', 'DataType', 'TableId', 'ColumnID'])
SSAS_DIM_HierarchyDef = collections.namedtuple('SSAS_DIM_HierarchyDef', ['dim_hierarchy_id', 'dim_table_id', 'sort_order', 'ID', 'Name', 'Levels'])
SSAS_DIM_LevelDef = collections.namedtuple('SSAS_DIM_LevelDef', ['dim_hierarchy_id', 'sort_order', 'ID', 'Name', 'ColumnName'])
def ParseDimensionFiles(folder_path, dimension_files: []):
dim_table_id = 0
dim_column_id = 0
dim_hierarchy_id = 0
dim_tables_asdict = collections.OrderedDict()
for f in dimension_files:
obj = untangle.parse(os.path.join(folder_path, f))
dim_table_id += 1
dim_table_def_asdict = collections.OrderedDict()
dim_table_def_asdict['dim_table_id'] = dim_table_id
dim_table_def_asdict['ID'] = obj.Dimension.ID.cdata
dim_table_def_asdict['Name'] = obj.Dimension.Name.cdata
dim_table_def_asdict['FileName'] = f
dim_table_def_asdict['UnknownMember'] = obj.Dimension.UnknownMember.cdata if obj.Dimension.__hasattribute__('UnknownMember') else 'None'
dim_table_def_asdict['UnknownMemberName'] = obj.Dimension.UnknownMemberName.cdata if obj.Dimension.__hasattribute__('UnknownMemberName') else None
dim_columns_asdict = collections.OrderedDict()
for c in obj.Dimension.Attributes.Attribute:
dim_column_id += 1
dim_column_def_asdict = collections.OrderedDict()
dim_column_def_asdict['dim_column_id'] = dim_column_id
dim_column_def_asdict['dim_table_id'] = dim_table_id
dim_column_def_asdict['ID'] = c.ID.cdata
dim_column_def_asdict['Name'] = c.Name.cdata
dim_column_def_asdict['Usage'] = c.Usage.cdata if c.__hasattribute__('Usage') else 'Regular'
dim_column_def_asdict['OrderBy'] = c.OrderBy.cdata if c.__hasattribute__('OrderBy') else 'Name'
dim_column_def_asdict['IsKeyColumn'] =(1 if dim_column_def_asdict['Usage'] == 'Key' else 0)
dim_keycolumns_def = []
sort_order = 0
for kc in c.KeyColumns.KeyColumn:
sort_order += 1
dim_keycolumn_def_asdict = collections.OrderedDict()
dim_keycolumn_def_asdict['dim_column_id'] = dim_column_id
dim_keycolumn_def_asdict['sort_order'] = sort_order
dim_keycolumn_def_asdict['DataType'] = kc.DataType.cdata
dim_keycolumn_def_asdict['TableID'] = kc.Source.TableID.cdata
dim_keycolumn_def_asdict['ColumnID'] = kc.Source.ColumnID.cdata
dim_keycolumns_def.append(dim_keycolumn_def_asdict)
dim_column_def_asdict['KeyColumns'] = dim_keycolumns_def
dim_column_def_asdict['NameColumn_DataType'] = None
dim_column_def_asdict['NameColumn_DataSize'] = None
dim_column_def_asdict['NameColumn_NullProcessing'] = None
dim_column_def_asdict['NameColumn_TableName'] = None
dim_column_def_asdict['NameColumn_ColumnName'] = None
for nc in c.get_elements('NameColumn'):
dim_column_def_asdict['NameColumn_DataType'] = nc.DataType.cdata if nc.__hasattribute__('DataType') else None
dim_column_def_asdict['NameColumn_DataSize'] = nc.DataSize.cdata if nc.__hasattribute__('DataSize') else None
dim_column_def_asdict['NameColumn_NullProcessing'] = nc.NullProcessing.cdata if nc.__hasattribute__('NullProcessing') else None
dim_column_def_asdict['NameColumn_TableName'] = nc.Source.TableID.cdata
dim_column_def_asdict['NameColumn_ColumnName'] = nc.Source.ColumnID.cdata
dim_columns_asdict[ dim_column_def_asdict['ID'] ] = dim_column_def_asdict
dim_table_def_asdict['Columns'] = dim_columns_asdict
dim_column_def_asdict['Type'] = obj.Dimension.Type.cdata if obj.Dimension.__hasattribute__('Type') else None
errorconfigs = []
for ec in obj.Dimension.ErrorConfiguration.children:
errorconfig = collections.OrderedDict()
errorconfig['dim_table_id'] = dim_table_id
errorconfig['Name'] = ec._name
errorconfig['Value'] = ec.cdata
errorconfigs.append(errorconfig)
dim_table_def_asdict['ErrorConfiguration'] = errorconfigs
hierarchies = []
for hs in obj.Dimension.get_elements('Hierarchies'):
h_sort_order = 0
for h in hs.Hierarchy:
h_sort_order += 1
dim_hierarchy_id += 1
dim_hierachy_def_asdict = collections.OrderedDict()
dim_hierachy_def_asdict['dim_hierarchy_id'] = dim_hierarchy_id
dim_hierachy_def_asdict['dim_table_id'] = dim_table_id
dim_hierachy_def_asdict['sort_order'] = sort_order
dim_hierachy_def_asdict['ID'] = h.ID.cdata
dim_hierachy_def_asdict['Name'] = h.Name.cdata
l_sort_order = 0
levels = []
for l in h.Levels.Level:
l_sort_order += 1
dim_level_def_asdict = collections.OrderedDict([('dim_hierarchy_id', dim_hierarchy_id), ('sort_order', sort_order),
('ID', l.ID.cdata), ('Name', l.Name.cdata), ('ColumnName', l.SourceAttributeID.cdata)])
levels.append(dim_level_def_asdict)
dim_hierachy_def_asdict['Levels'] = levels
hierarchies.append(dim_hierachy_def_asdict)
dim_table_def_asdict['Hierarchies'] = hierarchies
dim_tables_asdict[f] = dim_table_def_asdict
return dim_tables_asdict
dimension_files = [f for f in os.listdir(cube_visualstudio_path) if f.lower().endswith(r'.dim') and os.path.isfile(os.path.join(cube_visualstudio_path, f))]
dimension_files.sort()
#dimension_files = ['date.dim']
dim_tables_asdict = ParseDimensionFiles(cube_visualstudio_path, dimension_files)
obj = untangle.parse( os.path.join(cube_visualstudio_path, 'Carilion.dsv') )
primary_keys = {}
foreign_keys = []
foreign_keys_asdict = []
for xe in obj.DataSourceView.Schema.xs_schema.xs_annotation.xs_appinfo.msdata_Relationship:
fkey = SSAS_DSV_ForeignKeyDef(Name=xe['name'], Parent=xe['msdata:parent'], Child=xe['msdata:child'],
ParentKey=xe['msdata:parentkey'], ChildKey=xe['msdata:childkey'], Description=xe['msprop:Description']
)
foreign_keys.append(fkey)
foreign_keys_asdict.append(fkey._asdict())
primary_keys[fkey.Parent] = fkey.ParentKey
#print(fkey)
foreign_keys.sort(key=lambda x: x.Name)
foreign_keys_asdict.sort(key=lambda x: x['Name'])
tables = []
tables_asdict = []
dsv_table_id = 0
dsv_column_id = 0
for xe in obj.DataSourceView.Schema.xs_schema.xs_element.xs_complexType.xs_choice.xs_element:
dsv_table_id += 1
cols = []
cols_asdict = []
for xcol in xe.xs_complexType.xs_sequence.xs_element:
xDescription = None
xAllowNull = 0
xLength = -1
xDataType = None
if xcol.get_attribute('msprop:Description') != None:
xdescription = xcol['msprop:Description']
if xcol.get_attribute('minOccurs') != None:
xAllowNull = 1
if xcol.get_attribute('type') != None:
xDataType = xcol['type']
else:
xDataType = xcol.xs_simpleType.xs_restriction['base']
xLength = int(xcol.xs_simpleType.xs_restriction.xs_maxLength['value'])
# omit the leading "xs:"
xDataType = xDataType[3:]
dsv_column_id += 1
col_def = SSAS_DSV_ColumnDef(dsv_column_id=dsv_column_id, dsv_table_id=dsv_table_id, Name=xcol['name'],
FriendlyName=xcol['msprop:FriendlyName'], DataType=xDataType,
Description=xDescription, AllowNull=xAllowNull, Length=xLength, DbColumnName=xcol['msprop:DbColumnName']
)
cols.append(col_def)
cols_asdict.append(col_def._asdict())
#print(col_def)
tabdef = SSAS_DSV_TableDef(
dsv_table_id=dsv_table_id, Name=xe['name'], FriendlyName=xe['msprop:FriendlyName'], DbTableName=xe['msprop:DbTableName'],
QueryDefinition=xe['msprop:QueryDefinition'], Columns=cols
)
tabdef_asdict = SSAS_DSV_TableDef(
dsv_table_id=dsv_table_id, Name=tabdef.Name, FriendlyName=tabdef.FriendlyName, DbTableName=tabdef.DbTableName,
QueryDefinition=tabdef.QueryDefinition, Columns=cols_asdict
)
tables.append(tabdef)
tables_asdict.append(tabdef_asdict._asdict())
#print(tabdef)
tables.sort(key=lambda x: x.FriendlyName)
tables_asdict.sort(key=lambda x: x['FriendlyName'])
# tables.sort(key=lambda x: x.Name)
# tables_asdict.sort(key=lambda x: x['Name'])
with open(os.path.join(output_folder, 'tables.json'), 'w') as f:
json.dump(tables_asdict, fp=f, indent=4)
with open(os.path.join(output_folder, 'dimensions.json'), 'w') as f:
json.dump(dim_tables_asdict, fp=f, indent=4)
with open(os.path.join(output_folder, 'foreignkeys.json'), 'w') as f:
json.dump(foreign_keys_asdict, fp=f, indent=4)
datatypes = set()
for t in tables:
for c in t.Columns:
datatypes.add(c.DataType)
datatypes = list(datatypes)
datatypes.sort()
with open(os.path.join(output_folder, 'datatypes.txt'), 'w') as f:
f.write('\n'.join(datatypes))
def GetFriendlyTableName(o):
if isinstance(o, str):
if create_tables_using_friendly_names:
for t in tables:
if o == t.Name:
return t.FriendlyName
raise Exception(str.format('Could not find table with name [{0}]', o))
else:
return o
elif isinstance(o, SSAS_DSV_TableDef):
if create_tables_using_friendly_names:
return o.FriendlyName
else:
return t.Name
print('=======================ERROR=======================')
print(o)
print('=======================ERROR=======================')
raise Exception(str.format('Unhandled object type [{0}] type [{1}]', o, type(o)))
def encode_ssas_datatype_to_sql(c):
'''
Map the SSAS data types to the SQL data types.
Based on the data type currently in use.
'''
if c.DataType == 'boolean':
return 'bit'
elif c.DataType == 'dateTime':
return 'datetime'
elif c.DataType == 'decimal':
return 'numeric(18,2)'
elif c.DataType == 'double':
return 'float'
elif c.DataType == 'duration':
return 'time(0)'
elif c.DataType == 'int':
return 'int'
elif c.DataType == 'long':
return 'bigint'
elif c.DataType == 'currency':
return 'money'
elif c.DataType == 'short':
return 'smallint'
elif c.DataType == 'unsignedByte':
return 'tinyint'
elif c.DataType == 'string':
return str.format('varchar({0})', c.Length)
else:
raise Exception(str.format('Unhandled data type [{0}]. Needs to be mapped to a SQL data type.', c.DataType))
def IsIdentityColumn(c, primary_key_column_name):
if c.DbColumnName == primary_key_column_name and c.DataType in ['long', 'int', 'short', 'unsignedByte']:
return True
else:
return False
#Build SQL create statements
sql_drop_tables = []
sql_drop_tables.append('use carilion_dw')
sql_drop_tables.append('go')
sql_drop_tables.append('\n\n\n')
sql_drop_tables.append('-- drop all foreign keys')
sql_drop_tables.append('exec dbo.usp_DropAllForeignKeyConstraints')
sql_drop_tables.append('\n\n\n')
sql_drop_tables.append('-- drop table statements')
for t in tables:
sql_drop_tables.append(str.format("drop table if exists {0}", GetFriendlyTableName(t.Name)))
sql_create_tables = []
sql_create_tables.append('use carilion_dw')
sql_create_tables.append('go')
sql_create_tables.append('\n\n\n')
sql_create_tables.append('-- create table statements')
for t in tables:
# if 'LabTestFact' in [t.Name, t.FriendlyName]:
# print('break')
if t.Name in primary_keys.keys():
primary_key_column_name = primary_keys[t.Name]
else:
primary_key_column_name = None
sql_create_tables.append(str.format("create table {0}(", GetFriendlyTableName(t.Name)))
for c in t.Columns:
sql_create_tables.append(str.format("\t{0.DbColumnName} {1} {2} {3} null,",
c,
encode_ssas_datatype_to_sql(c),
'identity(1,1)' if IsIdentityColumn(c, primary_key_column_name) else '',
'not' if c.AllowNull == 0 or IsIdentityColumn(c, primary_key_column_name) else ''
))
if primary_key_column_name != None:
sql_create_tables.append(str.format('\tCONSTRAINT PK_{0}_{1} PRIMARY KEY CLUSTERED ({1})', GetFriendlyTableName(t.Name), primary_key_column_name))
stats_num_primary_keys += 1
sql_create_tables.append(');\n\n')
sql_add_fkeys = []
sql_add_fkeys.append('use carilion_dw')
sql_add_fkeys.append('go')
sql_add_fkeys.append('\n\n\n')
sql_add_fkeys.append('-- drop all foreign keys')
sql_add_fkeys.append('exec dbo.usp_DropAllForeignKeyConstraints')
sql_add_fkeys.append('\n\n\n')
sql_add_fkeys.append('-- add foreign keys')
for fk in foreign_keys:
if fk.Parent in primary_keys.keys() and fk.ParentKey == primary_keys[fk.Parent]:
sql_add_fkeys.append(str.format("ALTER TABLE {1} ADD CONSTRAINT [{0.Name}] FOREIGN KEY ({0.ChildKey}) "
"REFERENCES {2} ({0.ParentKey});", fk, GetFriendlyTableName(fk.Child), GetFriendlyTableName(fk.Parent)))
foreign_keys.sort(key=lambda fk: (GetFriendlyTableName(fk.Parent), fk.ParentKey, GetFriendlyTableName(fk.Child), fk.ChildKey))
sql_update = []
sql_update.append('use carilion_dw')
sql_update.append('go')
sql_update.append('\n\n\n')
sql_update.append('-- fix foreign keys that are not mapped to the primary key')
for fk in foreign_keys:
#if fk.Parent in primary_keys.keys() and fk.ParentKey != primary_keys[fk.Parent]:
sql_update.append(str.format("exec dbo.usp_FixForeignKeys '{0}', '{1}', '{2}', '{3}', '{4}', '{5}'",
'dbo', GetFriendlyTableName(fk.Parent), fk.ParentKey, 'dbo', GetFriendlyTableName(fk.Child), fk.ChildKey))
sql_update.append('\n\n\n')
with open(os.path.join(output_folder, 'drop_tables.sql'), 'w') as f:
for s in sql_drop_tables:
f.write(s + '\n')
with open(os.path.join(output_folder, 'create_src_tables.sql'), 'w') as f:
for s in sql_create_tables:
f.write(s + '\n')
with open(os.path.join(output_folder, 'add_foreign_keys.sql'), 'w') as f:
for s in sql_add_fkeys:
f.write(s + '\n')
with open(os.path.join(output_folder, 'fix_foreign_keys.sql'), 'w') as f:
for s in sql_update:
f.write(s + '\n')
stats_num_tables = len(tables)
stats_num_columns = sum([len(t.Columns) for t in tables])
stats_num_foreign_keys = len(foreign_keys)
stats_sql_lines_drop_tables = len(sql_drop_tables)
stats_sql_lines_create_tables = len(sql_create_tables)
stats_sql_lines_add_foreign_keys = len(sql_add_fkeys)
with open(os.path.join(output_folder, 'stats.txt'), 'w') as f:
f.write(str.format('{0}: {1}\n', 'stats_num_tables', stats_num_tables))
f.write(str.format('{0}: {1}\n', 'stats_num_columns', stats_num_columns))
f.write(str.format('{0}: {1}\n', 'stats_num_foreign_keys', stats_num_foreign_keys))
f.write(str.format('{0}: {1}\n', 'stats_num_primary_keys', stats_num_primary_keys))
f.write(str.format('{0}: {1}\n', 'stats_sql_lines_drop_tables', stats_sql_lines_drop_tables))
f.write(str.format('{0}: {1}\n', 'stats_sql_lines_create_tables', stats_sql_lines_create_tables))
f.write(str.format('{0}: {1}\n', 'stats_sql_lines_add_foreign_keys', stats_sql_lines_add_foreign_keys))
class SqlDictWriter:
_is_first = True
_dictwriter = None
_f = None
_dialect = None
_membuf = None
_lineterminator = '\n'
_num_lines_written = 0
# SQL server allows for 1000 values in the INSERT statement. So we need to keep track
_rewrite_header_at_line = 1000
_table_name = None
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self._membuf = io.StringIO()
self._dictwriter = csv.DictWriter(self._membuf, fieldnames, restval, extrasaction, dialect, *args, **kwds)
self._f = f
self._dialect = csv.get_dialect(dialect)
def writeheader(self, table_name):
self._membuf.seek(io.SEEK_SET)
self._membuf.truncate()
if table_name != None:
self._table_name = table_name
if self._num_lines_written == 0:
header = str.format('truncate table {0}{1}{1}', self._table_name, self._lineterminator)
self._f.write(header)
header = str.format('insert into {0}({1}) values{2}',
self._table_name,
', '.join(['[' + x + ']' for x in self._dictwriter.fieldnames]),
self._lineterminator
)
self._f.write(header)
def writerow(self, rowdict):
if self._num_lines_written > 0 and (self._num_lines_written % self._rewrite_header_at_line == 0):
self._f.write(self._lineterminator)
self.writeheader(None)
self._is_first = True
self._membuf.seek(io.SEEK_SET)
self._membuf.truncate()
self._dictwriter.writerow(rowdict)
if self._is_first:
self._f.write('(' + self._membuf.getvalue() + ')' + self._lineterminator)
else:
self._f.write(',(' + self._membuf.getvalue() + ')' + self._lineterminator)
self._is_first = False
self._num_lines_written += 1
csv.register_dialect('sql_values',
delimiter = ',',
skipinitialspace = 0,
doublequote = 1,
quoting = csv.QUOTE_NONNUMERIC,
quotechar = "'",
lineterminator = '',
escapechar = None
)
def WriteToCsvSql_DimensionTables_Helper(dim_tables_asdict,
ftables, ferror_configs, fcolumns, fkey_columns, fhierarchies, flevels,
ftables_sql, ferror_configs_sql, fcolumns_sql, fkey_columns_sql, fhierarchies_sql, flevels_sql
):
fields = enumerable_diffs(SSAS_DIM_TableDef._fields, ['Columns', 'ErrorConfiguration', 'Hierarchies'])
csv_tables = csv.DictWriter(ftables, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_tables.writeheader()
sql_tables = SqlDictWriter(ftables_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_tables.writeheader('[dbo].[tblVS_dim_tables]')
fields = SSAS_DIM_ErrorConfigurationDef._fields
csv_error_configs = csv.DictWriter(ferror_configs, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_error_configs.writeheader()
sql_error_configs = SqlDictWriter(ferror_configs_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_error_configs.writeheader('[dbo].[tblVS_dim_error_configs]')
fields = enumerable_diffs(SSAS_DIM_ColumnDef._fields, ['KeyColumns'])
csv_columns = csv.DictWriter(fcolumns, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_columns.writeheader()
sql_columns = SqlDictWriter(fcolumns_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_columns.writeheader('[dbo].[tblVS_dim_columns]')
fields = SSAS_DIM_KeyColumnDef._fields
csv_key_columns = csv.DictWriter(fkey_columns, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_key_columns.writeheader()
sql_key_columns = SqlDictWriter(fkey_columns_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_key_columns.writeheader('[dbo].[tblVS_dim_key_columns]')
fields = enumerable_diffs(SSAS_DIM_HierarchyDef._fields, ['Levels'])
csv_hierarchies = csv.DictWriter(fhierarchies, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_hierarchies.writeheader()
sql_hierarchies = SqlDictWriter(fhierarchies_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_hierarchies.writeheader('[dbo].[tblVS_dim_hierarchies]')
fields = SSAS_DIM_LevelDef._fields
csv_levels = csv.DictWriter(flevels, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_levels.writeheader()
sql_levels = SqlDictWriter(flevels_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_levels.writeheader('[dbo].[tblVS_dim_levels]')
for tk, tv in dim_tables_asdict.items():
csv_tables.writerow(tv)
sql_tables.writerow(tv)
for ec in tv['ErrorConfiguration']:
csv_error_configs.writerow(ec)
sql_error_configs.writerow(ec)
for ck, cv in tv['Columns'].items():
csv_columns.writerow(cv)
sql_columns.writerow(cv)
for k in cv['KeyColumns']:
csv_key_columns.writerow(k)
sql_key_columns.writerow(k)
for h in tv['Hierarchies']:
csv_hierarchies.writerow(h)
sql_hierarchies.writerow(h)
for l in h['Levels']:
csv_levels.writerow(l)
sql_levels.writerow(l)
def WriteToCsv_DimensionTables(dim_tables_asdict, output_folder):
with open(os.path.join(output_folder, 'dim_tables.csv'), 'w') as ftables:
with open(os.path.join(output_folder, 'dim_error_configs.csv'), 'w') as ferror_configs:
with open(os.path.join(output_folder, 'dim_columns.csv'), 'w') as fcolumns:
with open(os.path.join(output_folder, 'dim_key_columns.csv'), 'w') as fkey_columns:
with open(os.path.join(output_folder, 'dim_hierarchies.csv'), 'w') as fhierarchies:
with open(os.path.join(output_folder, 'dim_levels.csv'), 'w') as flevels:
with open(os.path.join(output_folder, 'dim_tables.sql'), 'w') as ftables_sql:
with open(os.path.join(output_folder, 'dim_error_configs.sql'), 'w') as ferror_configs_sql:
with open(os.path.join(output_folder, 'dim_columns.sql'), 'w') as fcolumns_sql:
with open(os.path.join(output_folder, 'dim_key_columns.sql'), 'w') as fkey_columns_sql:
with open(os.path.join(output_folder, 'dim_hierarchies.sql'), 'w') as fhierarchies_sql:
with open(os.path.join(output_folder, 'dim_levels.sql'), 'w') as flevels_sql:
WriteToCsvSql_DimensionTables_Helper(dim_tables_asdict,
ftables, ferror_configs, fcolumns, fkey_columns, fhierarchies, flevels,
ftables_sql, ferror_configs_sql, fcolumns_sql, fkey_columns_sql, fhierarchies_sql, flevels_sql
)
WriteToCsv_DimensionTables(dim_tables_asdict, output_folder)
def WriteToCsvSql_DsvTables_Helper(tables_asdict, foreign_keys_asdict,
ftables, fcolumns, fforeign_keys,
ftables_sql, fcolumns_sql, fforeign_keys_sql
):
fields = enumerable_diffs(SSAS_DSV_TableDef._fields, ['Columns'])
csv_tables = csv.DictWriter(ftables, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_tables.writeheader()
sql_tables = SqlDictWriter(ftables_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_tables.writeheader('[dbo].[tblVS_dsv_tables]')
fields = SSAS_DSV_ColumnDef._fields
csv_columns = csv.DictWriter(fcolumns, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_columns.writeheader()
sql_columns = SqlDictWriter(fcolumns_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_columns.writeheader('[dbo].[tblVS_dsv_columns]')
fields = SSAS_DSV_ForeignKeyDef._fields
csv_foreign_keys = csv.DictWriter(fforeign_keys, fields, extrasaction='ignore', dialect=csv.unix_dialect)
csv_foreign_keys.writeheader()
sql_foreign_keys = SqlDictWriter(fforeign_keys_sql, fields, extrasaction='ignore', dialect='sql_values')
sql_foreign_keys.writeheader('[dbo].[tblVS_dsv_foreign_keys]')
for fk in foreign_keys_asdict:
csv_foreign_keys.writerow(fk)
sql_foreign_keys.writerow(fk)
for t in tables_asdict:
csv_tables.writerow(t)
sql_tables.writerow(t)
for c in t['Columns']:
csv_columns.writerow(c)
sql_columns.writerow(c)
def WriteToCsvSql_DsvTables(tables_asdict, foreign_keys_asdict, output_folder):
with open(os.path.join(output_folder, 'dsv_tables.csv'), 'w') as ftables:
with open(os.path.join(output_folder, 'dsv_columns.csv'), 'w') as fcolumns:
with open(os.path.join(output_folder, 'dsv_foreign_keys.csv'), 'w') as fforeign_keys:
with open(os.path.join(output_folder, 'dsv_tables.sql'), 'w') as ftables_sql:
with open(os.path.join(output_folder, 'dsv_columns.sql'), 'w') as fcolumns_sql:
with open(os.path.join(output_folder, 'dsv_foreign_keys.sql'), 'w') as fforeign_keys_sql:
WriteToCsvSql_DsvTables_Helper(tables_asdict, foreign_keys_asdict,
ftables, fcolumns, fforeign_keys,
ftables_sql, fcolumns_sql, fforeign_keys_sql
)
WriteToCsvSql_DsvTables(tables_asdict, foreign_keys_asdict, output_folder)
|
Python
|
CL
|
1f5cfa35126402e58b2655d098ee7637981ad2e23610c626baa7f43b40b494ae
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import gym
import matplotlib.pyplot as plt
# @misc{pytorchrl,
# author = {Kostrikov, Ilya},
# title = {PyTorch Implementations of Reinforcement Learning Algorithms},
# year = {2018},
# publisher = {GitHub},
# journal = {GitHub repository},
# howpublished = {\url{https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail}},
# }
RENDER = False
#调用pytorch的分布函数
FixedCategorical = torch.distributions.Categorical
#重新定义原来的分布类的成员函数
old_sample = FixedCategorical.sample
FixedCategorical.sample = lambda self:old_sample(self).unsqueeze(-1)
log_prob_cat = FixedCategorical.log_prob
FixedCategorical.log_probs=lambda self, actions:log_prob_cat(self,actions.squeeze(-1)).view(actions.size(0),-1).sum(-1).unsqueeze(-1)
FixedCategorical.mode = lambda self:self.probs.argmax(dim=-1,keepdim=True)
#利用当前策略进行采样,产生数据
class Sample():
def __init__(self,env, policy_net):
self.env = env
self.policy_net=policy_net
self.gamma = 0.98
def sample_episodes(self, num_episodes):
#产生num_episodes条轨迹
batch_obs=[]
batch_actions=[]
batch_rs =[]
for i in range(num_episodes):
observation = self.env.reset()
#将一个episode的回报存储起来
reward_episode = []
while True:
if RENDER:self.env.render()
#根据策略网络产生一个动作
state = np.reshape(observation,[1,4])
#将numpy数据转换成torch张量
state = torch.from_numpy(state)
state = torch.as_tensor(state, dtype=torch.float32)
action = self.policy_net.act(state)
action = action.numpy()[0, 0]
observation_, reward, done, info = self.env.step(action)
batch_obs.append(observation)
batch_actions.append(action)
reward_episode.append(reward)
#一个episode结束
if done:
#处理回报函数
reward_sum = 0
discouted_sum_reward = np.zeros_like(reward_episode)
for t in reversed(range(0, len(reward_episode))):
reward_sum = reward_sum*self.gamma + reward_episode[t]
discouted_sum_reward[t] = reward_sum
# # #归一化处理
discouted_sum_reward -= np.mean(discouted_sum_reward)
discouted_sum_reward/= np.std(discouted_sum_reward)
#discouted_sum_reward+=0.05
#将归一化的数据存储到批回报中
for t in range(len(reward_episode)):
batch_rs.append(discouted_sum_reward[t])
break
#智能体往前推进一步
observation = observation_
#存储观测和回报
batch_obs = np.reshape(batch_obs, [len(batch_obs), self.policy_net.n_features])
batch_actions = np.reshape(batch_actions,[len(batch_actions),])
batch_rs = np.reshape(batch_rs,[len(batch_rs),])
#将数据转换为torch数据
batch_obs = torch.as_tensor(batch_obs, dtype=torch.float32)
batch_actions = torch.as_tensor(batch_actions,dtype=torch.float32)
batch_rs = torch.as_tensor(batch_rs,dtype=torch.float32)
batch_rs = batch_rs.view(-1,1)
return batch_obs, batch_actions, batch_rs
#构建基本的线性层
class MLPBase(nn.Module):
def __init__(self,num_inputs, hidden_size=20):
super(MLPBase,self).__init__()
self.hidden_size = hidden_size
self.linear_1 = nn.Linear(num_inputs,hidden_size)
nn.init.normal_(self.linear_1.weight,mean=0,std=0.1)
nn.init.constant_(self.linear_1.bias,0.1)
#self.actor = nn.Sequential(nn.Linear(num_inputs,hidden_size),nn.ReLU())
def forward(self,inputs):
x = inputs
x = self.linear_1(x)
hidden_actor = F.relu(x)
return hidden_actor
@property
def output_size(self):
return self.hidden_size
#构建分布层
class Categorical(nn.Module):
def __init__(self,num_inputs,num_outputs):
super(Categorical,self).__init__()
self.linear_2 = nn.Linear(num_inputs,num_outputs)
nn.init.normal_(self.linear_2.weight,mean=0,std=0.1)
nn.init.constant_(self.linear_2.bias, 0.1)
def forward(self,x):
x = self.linear_2(x)
return FixedCategorical(logits=x)
#定义策略及相关的操作
class Policy(nn.Module):
def __init__(self, env, model_file=None):
super(Policy,self).__init__()
self.learning_rate = 0.01
#输入特征的维数
self.n_features = env.observation_space.shape[0]
print(self.n_features)
#输出动作空间的维数
self.n_actions = env.action_space.n
#定义前向神经网络模型
# 1.1 动作网络的特征提取
self.base=MLPBase(self.n_features)
#1.2 动作分布,创建一个类对象
self.dist = Categorical(self.base.output_size,self.n_actions)
def act(self, inputs, deterministic=False):
#输出动作特征
actor_features = self.base(inputs)
#输出一个分布,类型是nn.module类型
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
return action
def evaluate_actions(self,inputs,action):
#输出动作特征层
actor_features = self.base(inputs)
#输出分布层
dist = self.dist(actor_features)
# print("概率分布:", dist.probs)
# print("action", action)
#计算当前动作概率的对数
action_log_probs = dist.log_probs(action)
#计算当前动作的交叉熵
# print("概率分布:",dist.probs)
# print("action",action)
# action=action.long()
# loss1 = nn.CrossEntropyLoss()
# cross_entropy = loss1(dist.probs, action)
# print("交叉熵",cross_entropy)
# return cross_entropy
return action_log_probs
#策略训练方法
class Policy_Gradient():
def __init__(self,actor, lr = 0.01):
#1. 定义网络模型
self.actor_net = actor
#2. 定义优化器
self.optimizer = optim.Adam(self.actor_net.parameters(),lr = lr)
def update(self,obs_batch,actions_batch,reward_batch):
#obs_batch, actions,reward_batch,= rollouts
# print("action",actions_batch)
#action_cross_entropy = self.actor_net.evaluate_actions(obs_batch,actions_batch)
action_log_probs = -self.actor_net.evaluate_actions(obs_batch, actions_batch)
#print("action_log_probs",action_log_probs)
#reward_batch=reward_batch.view(-1,1)
#print("reward",reward_batch)
#获得损失函数的均值
#loss =(action_cross_entropy*reward_batch).mean()
loss_1 = action_log_probs*reward_batch
#print("loss_1",loss_1.mean())
loss = (action_log_probs * reward_batch).mean()
#print("loss",action_log_probs*reward_batch)
#print("loss",loss)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#策略训练
def policy_train(env, alg, training_num):
reward_sum = 0.0
reward_sum_line = []
training_time = []
for i in range(training_num):
sampler = Sample(env, alg.actor_net)
temp = 0
training_time.append(i)
# 采样10个episode
train_obs, train_actions, train_rs = sampler.sample_episodes(1)
# 利用采样的数据进行梯度学习
#print("train_rs",train_rs)
alg.update(train_obs,train_actions,train_rs)
# print("current loss is %f"%loss)
if i == 0:
reward_sum = policy_test(env, alg.actor_net,False,1)
else:
reward_sum = 0.9 * reward_sum + 0.1 * policy_test(env, alg.actor_net,False, 1)
# print(policy_test(env, brain,False,1))
reward_sum_line.append(reward_sum)
print(reward_sum)
print("training episodes is %d,trained reward_sum is %f" % (i, reward_sum))
if reward_sum > 199:
break
#brain.save_model('./current_bset_pg_cartpole')
plt.plot(training_time, reward_sum_line)
plt.xlabel("training number")
plt.ylabel("score")
plt.show()
def policy_test(env, actor, render, test_num):
for i in range(test_num):
observation = env.reset()
reward_sum = 0
# 将一个episode的回报存储起来
while True:
if render: env.render()
# 根据策略网络产生一个动作
state = np.reshape(observation, [1, 4])
state=torch.as_tensor(state,dtype=torch.float32)
action = actor.act(state,deterministic=True)
action = action.numpy()[0, 0]
observation_, reward, done, info = env.step(action)
reward_sum += reward
if done:
break
observation = observation_
return reward_sum
if __name__=='__main__':
#声明环境名称
env_name = 'CartPole-v0'
#调用gym环境
env = gym.make(env_name)
env.unwrapped
env.seed(1)
#下载当前最好的模型
# brain = Policy_Net(env,'./current_bset_pg_cartpole')
#实例化策略网络
actor_1 = Policy(env)
# observation = env.reset()
# done = False
# env.render()
# while(done==False):
# env.render()
# state = np.reshape(observation,[1,4])
# state = torch.as_tensor(state, dtype=torch.float32)
# action = actor_net.act(state)
# action = action.numpy()[0,0]
# print("action",action)
# observation_, reward, done, info = env.step(action)
# 将一个episode的回报存储起来
#实例化采样函数
pg = Policy_Gradient(actor_1,lr=0.01)
#训练次数
training_num = 15000
#训练策略网络
policy_train(env, alg=pg, training_num=training_num)
#测试策略网络,随机生成10个初始状态进行测试
reward_sum = policy_test(env, actor_1, True, 10)
|
Python
|
CL
|
88b0dbc06f936dc94c4293e048aae50a08994a5dc648b5f9e9d63ae218a6da29
|
from get_eigen_space import get_eigen_space
from datagen import get_face94_male
import pickle
import os
from test import test
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_path', help="file path to image data", type=str, default='data/faces94/male/')
parser.add_argument('-e', '--epsilon', help='tolerance value for svd update', type=float, default=0.3)
parser.add_argument('-n', '--num_image', help='number of images per class', type = int, default=3)
parser.add_argument('-ntr', '--num_train', help='number of training sample per object', type=int, default=2)
args = parser.parse_args()
file_path = args.file_path
eps = args.epsilon
num_image = args.num_image
num_train = args.num_train
print("Getting Data")
data = get_face94_male(file_path, num_image)
print("Getting eigenfaces")
eigen = get_eigen_space(data, eps, num_train)
U, sigma, V, wt = eigen.eigenSpaceUpdate()
fts = ['U', 'sigma', 'V', 'wt']
ft_dict = dict(zip(fts, [U, sigma, V, wt]))
print('Tesing on test set')
test = test(data, ft_dict.values(), num_train)
acc = test.test()
print("Test accuracy at eps {} = {}".format(eps, acc))
if not os.path.exists('data/face94ft'):
os.makedirs('data/face94ft/')
with open('data/face94ft/feat{}.pickle'.format(eps), 'wb') as f:
pickle.dump(ft_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
|
Python
|
CL
|
92406f51e7073c024d7b20aff8ed83e1550cfb79be9ad01ef97fc34f0b90a3f7
|
# -*- coding: utf-8 -*-
import json
import os
import unittest
from configparser import ConfigParser
from unittest.mock import Mock
from installed_clients.WorkspaceClient import Workspace
from kb_ModelIndexer.kb_ModelIndexerImpl import kb_ModelIndexer
from kb_ModelIndexer.kb_ModelIndexerServer import MethodContext
class kb_ModelIndexerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_ModelIndexer'):
cls.cfg[nameval[0]] = nameval[1]
cls.cfg['workspace-admin-token'] = token
# Getting username from Auth profile for token
# authServiceUrl = cls.cfg['auth-service-url']
# auth_client = _KBaseAuth(authServiceUrl)
# user_id = auth_client.get_user(token)
user_id = 'bogus'
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_ModelIndexer',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.serviceImpl = kb_ModelIndexer(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.test_dir = os.path.dirname(os.path.abspath(__file__))
cls.mock_dir = os.path.join(cls.test_dir, 'mock_data')
cls.wsinfo = cls.read_mock('get_workspace_info.json')
cls.mediaobj = cls.read_mock('media_object.json')
cls.fbamodelobj = cls.read_mock('fbamodel_object.json')
cls.gensubobj = cls.read_mock('genome_sub_object.json')
cls.schema_dir = cls.cfg['schema-dir']
cls.params = {'upa': '1/2/3'}
cls.serviceImpl.indexer.ws.get_objects2 = Mock()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
@classmethod
def read_mock(cls, filename):
with open(os.path.join(cls.mock_dir, filename)) as f:
obj = json.loads(f.read())
return obj
def getWsClient(self):
return self.__class__.wsClient
def _validate(self, sfile, data):
with open(os.path.join(self.schema_dir, sfile)) as f:
d = f.read()
schema = json.loads(d)
for key in schema['schema'].keys():
self.assertIn(key, data)
def _validate_features(self, sfile, data, plist):
with open(os.path.join(self.schema_dir, sfile)) as f:
d = f.read()
feature = data['features'][0]
parent = data['parent']
schema = json.loads(d)
for key in schema['schema'].keys():
if key in plist:
self.assertIn(key, parent)
else:
self.assertIn(key, feature)
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_media_index_1(self):
self.serviceImpl.indexer.ws.get_objects2.return_value = self.mediaobj
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
ret = self.serviceImpl.media_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('data', ret[0])
self.assertIn('schema', ret[0])
self._validate('media_schema.json', ret[0]['data'])
ret = self.serviceImpl.media_compound_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('features', ret[0])
self.assertIn('schema', ret[0])
self._validate_features('media_compound_schema.json', ret[0], [])
def test_media_index_2(self):
m2 = self.read_mock('media2_object.json')
self.serviceImpl.indexer.ws.get_objects2.return_value = m2
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
ret = self.serviceImpl.media_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('data', ret[0])
self.assertIn('schema', ret[0])
self._validate('media_schema.json', ret[0]['data'])
ret = self.serviceImpl.media_compound_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('features', ret[0])
self.assertIn('schema', ret[0])
self._validate_features('media_compound_schema.json', ret[0], [])
def test_fbamodel_index(self):
self.serviceImpl.indexer.ws.get_objects2.return_value = self.fbamodelobj
self.serviceImpl.indexer.ws.get_objects2.side_effect = [self.fbamodelobj, self.gensubobj]
ret = self.serviceImpl.fbamodel_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('data', ret[0])
self.assertIn('schema', ret[0])
self._validate('fbamodel_schema.json', ret[0]['data'])
def test_modelcompound_index(self):
self.serviceImpl.indexer.ws.get_objects2.return_value = self.fbamodelobj
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
ret = self.serviceImpl.modelcompound_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('features', ret[0])
self.assertIn('schema', ret[0])
self._validate_features('modelcompound_schema.json', ret[0], [])
def test_modelreaction_index(self):
self.serviceImpl.indexer.ws.get_objects2.return_value = self.fbamodelobj
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
ret = self.serviceImpl.modelreaction_index(self.ctx, self.params)
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
self.assertIsNotNone(ret[0])
self.assertIn('features', ret[0])
self.assertIn('schema', ret[0])
self._validate_features('modelreaction_schema.json', ret[0], [])
def test_modelreactionproteinsubunit_index(self):
self.serviceImpl.indexer.ws.get_objects2.return_value = self.fbamodelobj
self.serviceImpl.indexer.ws.get_objects2.side_effect = None
ret = self.serviceImpl.modelreactionproteinsubunit_index(self.ctx, self.params)
self.assertIsNotNone(ret[0])
self.assertIn('features', ret[0])
self.assertIn('schema', ret[0])
schema_file = 'modelreactionproteinsubunit_schema.json'
self._validate_features(schema_file, ret[0], [])
|
Python
|
CL
|
ea0c3ec77f98e267c3b21330bd331eb487a6a7234ca758032da3557b08bd405c
|
import os
import sys
import logging
import argparse
sys.path.append("libs")
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from utils import Init_logging
from cvae import CollaborativeVAE
from data import CVaeDataGenerator
from pretrain_vae import get_lp_vae
from layers import binary_crossentropy
class Params():
def __init__(self):
self.A = 1.
self.B = 0.01
self.lambda_U = 0.1
self.lambda_V = 10
self.lambda_W = 2e-4
self.max_iter = 1
self.min_iter = 1
if __name__ == '__main__':
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, choices=["citeulike-a"],
help="use which dataset for experiment")
parser.add_argument("--p_value", type=int, default=1,
help="num of interactions available during the training process")
parser.add_argument("--pretrained_path", type=str, default=None,
help="path where the weights pretrained vae are stored")
parser.add_argument("--batch_size", type=int, default=128,
help="batch size for updating vae in the EM like optimization")
parser.add_argument("--epochs", type=int, default=100,
help="epochs for the EM like optimization of cvae")
parser.add_argument("--device" , type=str, default="0",
help="use which GPU device")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Fix the random seeds.
np.random.seed(0)
tf.set_random_seed(0)
### Initialize the logging.
Init_logging()
### Setup the pretrained vae model.
content_path = "data/{}/mult_nor.npy".format(args.dataset)
feature_dim = np.load(content_path).shape[-1]
pretrained_vae = get_lp_vae(feature_dim)
if args.pretrained_path is None:
pretrained_path = "models/{}/pretrained/weights.h5".format(args.dataset)
else:
pretrained_path = args.pretrained_path
pretrained_vae.load_weights(pretrained_path)
### Get the data generater.
user_record_path = "data/{}/cf-train-{}-users.dat".format(args.dataset, args.p_value)
item_record_path = "data/{}/cf-train-{}-items.dat".format(args.dataset, args.p_value)
cvae_datagen = CVaeDataGenerator(
user_record_path = user_record_path,
item_record_path = item_record_path,
content_path = content_path,
latent_size = pretrained_vae.latent_size,
batch_size = args.batch_size,
params = Params(), shuffle = True
)
### Set up the collaborative vae model.
cvae_model = CollaborativeVAE(
cvae_datagen = cvae_datagen,
cvae_loss = binary_crossentropy,
learning_rate = 0.001,
pretrained_vae = pretrained_vae
)
### Use predict_on_batch or memory will leak.
init_item_embeddings = cvae_model.encoder.predict_on_batch(cvae_datagen.contents)
cvae_datagen.update_V(init_item_embeddings)
cvae_datagen.update_Theta(init_item_embeddings)
### Save the pretrained weights.
save_root = os.path.join("models", args.dataset, "pvalue_{}".format(args.p_value))
embedding_fmt = os.path.join(save_root, "embeddings", "epoch_{}")
weight_fmt = os.path.join(save_root, "cvae", "epoch_{}")
### Train the cvae model in an EM manner.
for epoch in np.arange(args.epochs):
logging.info("-"*10 + "Epoch: {}".format(epoch) + "-"*10)
cvae_model.update_vae()
neg_loglld = cvae_model.update_embeddings()
logging.info("PMF step. Neg log-likelihood: {}".format(neg_loglld))
if epoch >= 50 and (epoch+1) % 50 == 0:
embedding_root = embedding_fmt.format(epoch+1)
weight_root = weight_fmt.format(epoch+1)
if not os.path.exists(embedding_root):
os.makedirs(embedding_root)
if not os.path.exists(weight_root):
os.makedirs(weight_root)
cvae_model.save_embeddings(embedding_root, save_mat=True)
cvae_model.vae.save_weights(os.path.join(weight_root, "weights.h5"))
|
Python
|
CL
|
577290b6880d6a8d2578c6d4e2ce0315bc84800c02a9956503a2d894d7438c5c
|
"""
The long short term Memory (LSTM) neural network, which can capture temporal information,
has a strong ability to capture long information with its own Memory attributes
"""
# use RNN to finish text classification
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jieba
import pandas as pd
import argparse
import sys
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.contrib.layers.python.layers import encoders
from sklearn import metrics
data_file_technology = pd.read_csv("D:technology_news.csv", encoding='utf-8')
data_file_technology = data_file_technology.dropna()
data_file_car = pd.read_csv("D:car_news.csv", encoding='utf-8')
data_file_car = data_file_car.dropna()
data_file_entertainment = pd.read_csv("D:entertainment_news.csv", encoding='utf-8')
data_file_entertainment = data_file_entertainment.dropna()
data_file_military = pd.read_csv("D:military_news.csv", encoding='utf-8')
data_file_military = data_file_military.dropna()
data_file_sports = pd.read_csv("D:sports_news.csv", encoding='utf-8')
data_file_sports = data_file_sports.dropna()
technology = data_file_technology.content.values.tolist()[1000:21000]
car = data_file_car.content.values.tolist()[1000:21000]
entertainment = data_file_entertainment.content.values.tolist()[2000:22000]
military = data_file_military.content.values.tolist()[2000:22000]
sports = data_file_sports.content.values.tolist()[:20000]
# load the stopwords
stopwords = pd.read_csv("D:stopwords.txt", index_col=False, quoting=3,
sep="\t", names=['stopword'], encoding='utf-8')
stopwords = stopwords['stopword'].values
# preprocessing data
def preprocess_text(content_lines, sentences, category):
for line in content_lines:
try:
segments = jieba.lcut(line)
segments = filter(lambda x: len(x) > 1, segments)
segments = filter(lambda x: x not in stopwords, segments)
sentences.append(" ".join(segments), category)
except (OSError, TypeError) as reason:
print("the error info is:", str(reason))
print(line)
continue
# generate unsupervised train data
sentences = []
preprocess_text(technology, sentences, 'technology')
preprocess_text(car, sentences, 'car')
preprocess_text(entertainment, sentences, 'entertainment')
preprocess_text(military, sentences, 'military')
preprocess_text(sports, sentences, 'sports')
# Divide the original data set into test sets of training sets, using sklearn's own segmentation function "Zip!"
text, label = zip(*sentences)
train_data, test_data, train_target, test_target = train_test_split(text, label, random_state=1234)
learn = tf.contrib.learn
FLAGS = None
# the longest length of document
MAX_DOCUMENT_LENGTH = 15
# the minimum frequency of words
MIN_WORD_FREQUENCY = 1
# the word embedding dimension
EMBEDDING_SIZE = 50
global n_words
# process the vocabulary
vocab_processor = learn.preprocessing.VocabularyPreprocessor(MAX_DOCUMENT_LENGTH, mini_frenquency=MIN_WORD_FREQUENCY)
text_train = np.array(list(vocab_processor.fit_transform(train_data)))
text_test = np.array(list(vocab_processor.fit_transform(test_data)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
def bag_of_words_model(features, target):
""" first turn into bag of words model """
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_operation = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return({'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)}, loss, train_operation)
category_dictionary = {'technology': 1, 'car': 2, 'entertainment': 3, 'military': 4, 'sports': 5}
train_target = map(lambda x: category_dictionary[x], train_target)
test_target = map(lambda x: category_dictionary[x], test_target)
label_train = pd.Series(train_target)
label_test = pd.Series(test_target)
model = bag_of_words_model
classifier = learn.SKCompat(learn.Estimator(model_fn=model))
# train and predict
classifier.fit(text_train, label_train, steps=1000)
label_predicted = classifier.predict(text_test)['class']
score = metrics.accuracy_score(label_test, label_predicted)
print('Accuracy: {0: f.}'.format(score))
|
Python
|
CL
|
d752c02071c581eaad4c552bd3fffe5f32aaf836b50ca1b93b4340d14c5d1e3b
|
"""ProjectAuto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from AppAuto import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',views.Index.as_view(),name='index'),
url(r'^cars/',include('AppAuto.urls')),
url(r'^email-sent/$',views.email_sent, name='email_sent'),
url(r'^about/$',views.about.as_view(),name='about'),
url(r'^contact/$',views.contact,name='contact'),
url(r'^bob/$',views.bob.as_view(),name='bob'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns = [url(r'^__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
|
Python
|
CL
|
cb32cf4feb09bba132e1f49b9ba21ede35f99ca31191ffcd44dd86a9015d903f
|
""" Useful tools. Stolen from here: https://github.com/Swall0w/torchstat"""
import numpy as np
import torch
import torch.nn as nn
def compute_flops(module, inp, out):
if isinstance(module, nn.Conv1d):
return compute_Conv1d_flops(module, inp[0], out)
if isinstance(module, nn.Conv2d):
return compute_Conv2d_flops(module, inp[0], out)
if isinstance(module, nn.Conv3d):
return compute_Conv3d_flops(module, inp[0], out)
elif isinstance(module, nn.BatchNorm1d):
return compute_BatchNorm1d_flops(module, inp[0], out)
elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d)):
return compute_BatchNorm2d_flops(module, inp[0], out)
elif isinstance(module, nn.BatchNorm3d):
return compute_BatchNorm3d_flops(module, inp[0], out)
elif isinstance(
module, (nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)
):
return compute_Pool2d_flops(module, inp[0], out)
elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.GELU, nn.CELU, nn.SELU)):
return compute_ReLU_flops(module, inp[0], out)
elif isinstance(module, nn.Upsample):
return compute_Upsample_flops(module, inp[0], out)
elif isinstance(module, nn.Linear):
return compute_Linear_flops(module, inp[0], out)
elif isinstance(module, nn.Dropout):
return cat_out(0, inp[0], out)
elif isinstance(module, nn.Sigmoid):
return cat_out(0, inp[0], out)
elif isinstance(module, nn.Hardtanh):
return cat_out(0, inp[0], out)
elif isinstance(module, nn.Hardswish):
return compute_Hardswich_flops(module, inp[0], out)
elif isinstance(module, nn.Hardsigmoid):
return compute_Hardsigmoid_flops(module, inp[0], out)
elif isinstance(module, nn.Identity):
return cat_out(0, inp[0], out)
else:
print("Op {} is not supported at now, set FLOPs of it to zero.".format(module.__class__.__name__))
return cat_out(0, inp[0], out)
pass
def cat_out(total_flops, inp, out):
in_size_list = [-1, -1, -1, -1, -1]
out_size_list = [-1, -1, -1, -1, -1]
for idx, val in enumerate(inp.size()[1:]):
in_size_list[idx] = val
for idx, val in enumerate(out.size()[1:]):
out_size_list[idx] = val
return [total_flops] + in_size_list + out_size_list
def compute_Conv1d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv1d)
assert len(inp.size()) == 3 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h = module.kernel_size[0]
out_c, out_h = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * in_c * filters_per_channel
active_elements_count = batch_size * out_h
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return cat_out(total_flops, inp, out)
def compute_Conv2d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * k_w * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return cat_out(total_flops, inp, out)
def compute_Conv3d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv3d)
assert len(inp.size()) == 5 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h, k_w, k_d = module.kernel_size
out_c, out_h, out_w, out_d = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * k_w * k_d * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w * out_d
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return cat_out(total_flops, inp, out)
def compute_BatchNorm1d_flops(module, inp, out):
assert isinstance(module, nn.BatchNorm1d)
# assert len(inp.size()) == 3 and len(inp.size()) == len(out.size())
batch_flops = np.prod(inp.shape)
if module.affine:
batch_flops *= 2
return cat_out(batch_flops, inp, out)
def compute_BatchNorm2d_flops(module, inp, out):
# assert isinstance(module, nn.BatchNorm2d)
# assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_flops = np.prod(inp.shape)
if hasattr(module, 'affine') and module.affine:
batch_flops *= 2
# return batch_flops
return cat_out(batch_flops, inp, out)
def compute_BatchNorm3d_flops(module, inp, out):
assert isinstance(module, nn.BatchNorm3d)
assert len(inp.size()) == 5 and len(inp.size()) == len(out.size())
batch_flops = np.prod(inp.shape)
if module.affine:
batch_flops *= 2
# return batch_flops
return cat_out(batch_flops, inp, out)
def compute_ReLU_flops(module, inp, out):
assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.GELU, nn.CELU, nn.SELU))
batch_size = inp.size()[0]
active_elements_count = batch_size
for s in inp.size()[1:]:
active_elements_count *= s
# return active_elements_count
return cat_out(active_elements_count, inp, out)
def compute_Pool2d_flops(module, inp, out):
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
total_flops = np.prod(inp.shape)
return cat_out(total_flops, inp, out)
def compute_Linear_flops(module, inp, out):
assert isinstance(module, nn.Linear)
# assert len(inp.size()) == 2 and len(out.size()) == 2
batch_size = inp.size()[0]
total_flops = batch_size * inp.size()[1] * out.size()[1]
return cat_out(total_flops, inp, out)
def compute_Upsample_flops(module, inp, out):
assert isinstance(module, nn.Upsample)
output_size = out[0]
batch_size = inp.size()[0]
output_elements_count = batch_size
for s in output_size.shape[1:]:
output_elements_count *= s
total_flops = output_elements_count
return cat_out(total_flops, inp, out)
def compute_Hardswich_flops(module, inp, out):
""" hardswish: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish
"""
# Since the hardswish function has different flops with different inputs,
# Here we use a estimated flops just like RELU
total_flops = out.numel()
return cat_out(total_flops, inp, out)
def compute_Hardsigmoid_flops(module, inp, out):
""" Hardsigmoid: https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html#torch.nn.Hardsigmoid
"""
# Since the hardsigmoid function has different flops with different inputs,
# Here we use a estimated flops just like RELU
total_flops = out.numel()
return cat_out(total_flops, inp, out)
|
Python
|
CL
|
2c2f815216399c33b1827d63ad6a0d67245d3379ffa5df0a7817bccfb2495476
|
from cs import CloudStack
from cs import CloudStackException
from heat.engine import properties
from heat.engine import resource
from gettext import gettext as _
from time import sleep
__author__ = 'cima'
class CloudstackSecurityGroup(resource.Resource):
PROPERTIES = (
API_ENDPOINT,
API_KEY,
API_SECRET,
NAME,
RULES) = (
'api_endpoint',
'api_key',
'api_secret',
'name',
'rules')
properties_schema = {
API_ENDPOINT: properties.Schema(
data_type=properties.Schema.STRING,
description=_('Cloudstack API endpoint'),
required=True
),
API_KEY: properties.Schema(
data_type=properties.Schema.STRING,
description=_('API key'),
required=True
),
API_SECRET: properties.Schema(
data_type=properties.Schema.STRING,
description=_('API secret key'),
required=True
),
NAME: properties.Schema(
data_type=properties.Schema.STRING,
description=_('The name of the security group'),
required=True
),
RULES: properties.Schema(
data_type=properties.Schema.LIST,
description=_('List of ingress / egress rules'),
required=False
)
}
def _get_cloudstack(self):
cs = CloudStack(endpoint=self.properties.get(self.API_ENDPOINT),
key=self.properties.get(self.API_KEY),
secret=self.properties.get(self.API_SECRET))
return cs
def handle_create(self):
cs = self._get_cloudstack()
name = self.properties.get(self.NAME)
rules = self.properties.get(self.RULES)
sg = cs.createSecurityGroup(name=name)
sg_id = sg['securitygroup']['id']
for rule in rules:
if rule.get('direction', 'ingress') == 'ingress':
cs.authorizeSecurityGroupIngress(
securitygroupid=sg_id,
startport=rule.get('startport', None),
endport=rule.get('endport', None),
cidrlist=rule.get('cidr', '0.0.0.0/0'),
protocol=rule.get('protocol', 'tcp'))
elif rule.get('direction', 'ingress') == 'egress':
cs.authorizeSecurityGroupEgress(
securitygroupid=sg_id,
startport=rule.get('startport', None),
endport=rule.get('endport', None),
cidrlist=rule.get('cidr', '0.0.0.0/0'),
protocol=rule.get('protocol', 'tcp'))
self.resource_id_set(sg_id)
return sg_id
def check_create_complete(self, _compute_id):
cs = self._get_cloudstack()
sg = cs.listSecurityGroups(id=self.resource_id)
if sg:
return True
return False
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None):
# TODO
pass
def check_update_complete(self):
# TODO
pass
def handle_delete(self):
cs = self._get_cloudstack()
if self.resource_id is None:
return
try:
cs.deleteSecurityGroup(id=self.resource_id)
except CloudStackException as e:
if e.args[2]['errorcode'] == 536:
# Delete failed - cannot delete group when
# it's in use by virtual machines
# just wait for a while and try again
sleep(10)
self.handle_delete()
def check_delete_complete(self, _compute_id):
cs = self._get_cloudstack()
sg = None
try:
sg = cs.listSecurityGroups(id=self.resource_id)
except CloudStackException as e:
if e.args[2]['errorcode'] == 431:
# Resource cannot be found
# One thing less...
return True
if sg:
return False
else:
return True
def _resolve_attribute(self, name):
cs = self._get_cloudstack()
sg = cs.listSecurityGroups(id=self.resource_id)
if sg:
if name == 'id':
return sg['securitygroup'][0]['id']
return getattr(sg, name)
attributes_schema = {
'id': _('id')
}
def resource_mapping():
mappings = {}
mappings['Cloudstack::Network::SecurityGroup'] = CloudstackSecurityGroup
return mappings
|
Python
|
CL
|
1883b7b14959e02224595dcd2e0e3fb1cbb09c253f2d9995659b9ecaa0ea0fe4
|
from distutils.core import setup
from setuptools import find_packages
setup(name='tezos-hd-util',
version='0.1.7',
packages=find_packages(),
install_requires=[
'chainside-btcpy-multi>=0.2.78,<0.3.0',
'pyblake2>=1.1.2,<2.0.0',
'secp256k1new>=0.13.2,<0.14.0',
],
description='Python tool for for tezos hd generation',
author='Oskar Hladky',
author_email='oskyks1@gmail.com',
url='https://github.com/oskyk/tezos-hd-util',
download_url='https://github.com/oskyk/tezos-hd-util/archive/0.1.6.tar.gz',
python_requires='>=3',
keywords=['tezos', 'hd', 'address'],
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
|
Python
|
CL
|
6aaab345d1d3bf3bb1ce9c82f8d57d24203c7a129ea4298cc5f2eb64c17b03ba
|
from data_reader.utils import get_dicts, get_schema_emb
from data_reader.read_data import get_dialogues, get_seqs, get_batch, get_frame_level_data
from models.utils import get_bert_tokenizer
from models.model import Model
import numpy as np
from loss import LossFn
from transformers import *
import torch
import os
from tqdm import tqdm
import config
from evaluate import evaluate
def main(train, dev, test):
d_id_train, utt_true_train, x_train, x_train_len, y_train, inv_align_train, prev_sys_frame_train = train
d_id_dev, utt_true_dev, x_dev, x_dev_len, y_dev, inv_align_dev, prev_sys_frame_dev = dev
d_id_test, utt_true_test, x_test, x_test_len, y_test, inv_align_test, prev_sys_frame_test = test
model = Model(bert_dir=os.path.join(config.BERT_DIR, config.BERT_MODEL), device=config.DEVICE)
model.to(config.DEVICE)
n_iterations = int(np.ceil(len(x_train) / config.BATCH_SIZE))
num_total_steps = int(n_iterations // config.GRAD_STEPS * config.BATCH_SIZE)
warmup_proportion = 0.1
max_grad_norm = 1.0
num_warmup_steps = int(num_total_steps * warmup_proportion)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config.LEARNING_RATE, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_total_steps)
train_gen = get_batch(d_id_train, utt_true_train, x_train, x_train_len, y_train, inv_align_train, prev_sys_frame_train, tokenizer, schema_emb, batch_size=config.BATCH_SIZE, shuffle=True, typ='train')
dev_gen = get_batch(d_id_dev, utt_true_dev, x_dev, x_dev_len, y_dev, inv_align_dev, prev_sys_frame_dev, tokenizer, schema_emb, batch_size=config.BATCH_SIZE, shuffle=False, typ='dev')
test_gen = get_batch(d_id_test, utt_true_test, x_test, x_test_len, y_test, inv_align_test, prev_sys_frame_test, tokenizer, schema_emb, batch_size=config.BATCH_SIZE, shuffle=False, typ='test')
best_dev_acc = 0.
lossfn = LossFn()
for epoch in range(config.EPOCHS):
total_loss, intent_loss, req_slot_loss, cat_value_loss, cat_status_loss, non_cat_status_loss, non_cat_start_span_loss, non_cat_end_span_loss, cnt = 0., 0., 0., 0., 0., 0., 0., 0., 0
pbar = tqdm(range(n_iterations))
model.train()
print('\nEpoch :{}'.format(epoch+1))
for b_id in pbar:
d_id, utt_true, x, x_len, attn_mask, y, dial_lens, inv_align, prev_sys_frame = next(iter(train_gen))
batch_services = [list(y[i].keys()) for i in range(len(y))]
for frame in range(len(y[0])):
service_id = [batch_services[i][frame] for i in range(len(y))]
possible, masking, true_labels = get_frame_level_data(service_id, y, schema_emb)
scores = model(x, x_len, attn_mask, possible, masking)
intent_score, req_slot_score, cat_status_score, cat_value_score, non_cat_status_score, non_cat_value_score = scores
loss = {}
loss['intents'] = lossfn.get_intent_loss(intent_score.view(-1, intent_score.size(-1)), true_labels['intents'].view(-1))
loss['req_slots'] = lossfn.get_req_slot_loss(req_slot_score, true_labels['req_slots'], masking['req_slots'])
n_dials = x.size(0)
n_tokens = x.size(1)
n_cat_slots = possible['cat_slots'].size(-2)
n_non_cat_slots = possible['non_cat_slots'].size(-2)
n_values = possible['cat_values'].size(-2)
n_req_slots = possible['req_slots'].size(-2)
cat_values_tmp = torch.zeros((n_dials, n_cat_slots, n_values-1)).to(config.DEVICE)
for i, d in enumerate(true_labels['cat_values']):
for j, t in enumerate(d):
if t > 0:
cat_values_tmp[i, j, t-1] = 1.
loss['cat_slots'] = lossfn.get_cat_value_loss(cat_value_score[:, :, 1:].contiguous().view(-1), cat_values_tmp.view(-1), masking['cat_values'][:, :, 1:].contiguous().view(-1))
true_labels['cat_values'][true_labels['cat_values']>1] = 2
loss['cat_status'] = lossfn.get_status_loss(cat_status_score.view(-1, 3), true_labels['cat_values'].view(-1), masking['cat_slots'])
loss['non_cat_status'] = lossfn.get_status_loss(non_cat_status_score.view(-1, 3), true_labels['non_cat_values_status'].view(-1), masking['non_cat_slots'])
loss['non_cat_start_span'] = lossfn.get_noncat_value_loss(non_cat_value_score[:, :, :, 0].view(-1, n_tokens), true_labels['non_cat_values_start'].view(-1))
loss['non_cat_end_span'] = lossfn.get_noncat_value_loss(non_cat_value_score[:, :, :, 1].view(-1, n_tokens), true_labels['non_cat_values_end'].view(-1))
intent_loss += loss['intents'].item()
req_slot_loss += loss['req_slots'].item()
cat_value_loss += loss['cat_slots'].item()
cat_status_loss += loss['cat_status'].item()
non_cat_status_loss += loss['non_cat_status'].item()
non_cat_start_span_loss += loss['non_cat_start_span'].item()
non_cat_end_span_loss += loss['non_cat_end_span'].item()
loss = sum(loss.values())
total_loss += loss.item()
cnt += 1
grad = loss / config.GRAD_STEPS
grad.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
scheduler.step()
pbar.set_description('TL:{:.4f}, IL:{:.4f}, RL:{:.4f}, CS:{:.4f}, CL:{:.4f}, NSL:{:.4f}, NSP:{:.4f}, NEP:{:.4f}'.format(total_loss/cnt, intent_loss/cnt, req_slot_loss/cnt, cat_status_loss/cnt, cat_value_loss/cnt, non_cat_status_loss/cnt, non_cat_start_span_loss/cnt, non_cat_end_span_loss/cnt))
print('Evauating...')
dev_pred, jnt_gl, avg_gl = evaluate(model, dev_gen, length=int(np.ceil(len(x_dev)/config.BATCH_SIZE)), schema_dict=schema_dict, schema_emb=schema_emb)
if avg_gl > best_dev_acc:
# test_pred, jnt_gl_test, avg_gl_test = evaluate(model, test_gen, length=int(np.ceil(len(x_test)/config.BATCH_SIZE)), schema_dict=schema_dict, schema_emb=schema_emb, typ='test')
torch.save(model.state_dict(), config.OUT_DIR + 'model.pt')
best_dev_acc = avg_gl
model.load_state_dict(torch.load(config.OUT_DIR + 'model.pt'))
test_pred, jnt_gl_test, avg_gl_test = evaluate(model, test_gen, length=int(np.ceil(len(x_test)/config.BATCH_SIZE)), schema_dict=schema_dict, schema_emb=schema_emb, typ='test')
if __name__ == "__main__":
schema_dict = get_dicts()
schema_emb = get_schema_emb()
train_dialogues, dev_dialogues, test_dialogues = get_dialogues()
tokenizer = get_bert_tokenizer()
train = get_seqs(train_dialogues, tokenizer, schema_dict, schema_emb, typ='train')
dev = get_seqs(dev_dialogues, tokenizer, schema_dict, schema_emb, typ='dev')
test = get_seqs(test_dialogues, tokenizer, schema_dict, schema_emb, typ='test')
main(train, dev, test)
|
Python
|
CL
|
4c5e3bf479164926b49627915a8d9d485fab07a49c44ea5f25e8b191b96f9589
|
# read genIO CCI data and write it to csv timeseries
import numpy as np
from rsdata.ESA_CCI_SM.interface import ESA_CCI_SM
from pygeogrids.grids import CellGrid
import pygrids.ESA_CCI_SM as cci_grid
from datetime import datetime
from pytesmo.timedate import julian
import matplotlib.pyplot as plt
def read_gpis():
fpath = '/media/sf_H/CCI_markus/coordinates.csv'
coords = np.genfromtxt(fpath, dtype=[('name', 'S10'), ('lat', np.float),
('lon', np.float)], delimiter=',',
skip_header=1)
return coords
def CCI_genIO(valid_gpis, start_date, end_date, plot=False):
start_jd = julian.julday(start_date.month, start_date.day, start_date.year,
start_date.hour, start_date.minute,
start_date.second)
end_jd = julian.julday(end_date.month, end_date.day, end_date.year,
end_date.hour, end_date.minute,
end_date.second)
parent_grid = cci_grid.ESA_CCI_SM_grid_v4_1_indl()
nearest_gpis = parent_grid.find_nearest_gpi(valid_gpis['lon'],
valid_gpis['lat'])
nearest_gpis = np.unique(nearest_gpis[0])
cells = parent_grid.gpi2cell(nearest_gpis)
header = 'jd,sm,sm_noise,sensor,freqband,nobs,year,month,day'
descr = [('year', np.uint), ('month', np.uint), ('day', np.uint)]
for cell in sorted(np.unique(cells)):
gpis, lons, lats = parent_grid.grid_points_for_cell(cell)
grid = CellGrid(lons, lats,
np.ones_like(lons, dtype=np.int16) * cell, gpis=gpis)
cfg_path = ('/home/ipfeil/GitRepos/rs-data-readers/rsdata/'+
'ESA_CCI_SM/datasets/')
version = 'ESA_CCI_SM_v02.3'
param = 'esa_cci_sm_monthly'
cci_io = ESA_CCI_SM(version=version, parameter=param, grid=grid,
cfg_path=cfg_path)
for ts, gp in cci_io.iter_ts():
if gp not in nearest_gpis:
continue
valid_date_idx = np.where((ts['jd']>=start_jd) &
(ts['jd']<=end_jd))[0]
ts_valid_dates = ts[valid_date_idx]
ts_dates = add_field(ts_valid_dates, descr)
dates = julian.julian2datetime(ts_dates['jd'])
years = [date.year for date in dates]
ts_dates['year'] = years
ts_dates['month'] = [date.month for date in dates]
ts_dates['day'] = [date.day for date in dates]
np.savetxt('/media/sf_D/CCI_csv/'+str(gp)+'.csv',
ts_dates, delimiter=',', header=header)
if plot == True:
valid_ind = np.where(ts_valid_dates['sm'] != -999999)
dates = julian.julian2datetime(ts_valid_dates['jd'][valid_ind])
plt.plot(dates, ts_valid_dates['sm'][valid_ind])
plt.title('ESA CCI SM combined monthly average, gpi: '+str(gp))
plt.xlabel('date')
plt.ylabel('soil moisture [%]')
plt.show()
def add_field(a, descr):
"""Return a new array that is like "a", but has additional fields.
Arguments:
a -- a structured numpy array
descr -- a numpy type description of the new fields
The contents of "a" are copied over to the appropriate fields in
the new array, whereas the new fields are uninitialized. The
arguments are not modified.
>>> sa = numpy.array([(1, 'Foo'), (2, 'Bar')], \
dtype=[('id', int), ('name', 'S3')])
>>> sa.dtype.descr == numpy.dtype([('id', int), ('name', 'S3')])
True
>>> sb = add_field(sa, [('score', float)])
>>> sb.dtype.descr == numpy.dtype([('id', int), ('name', 'S3'), \
('score', float)])
True
>>> numpy.all(sa['id'] == sb['id'])
True
>>> numpy.all(sa['name'] == sb['name'])
True
"""
if a.dtype.fields is None:
raise ValueError, "'A' must be a structured numpy array"
b = np.empty(a.shape, dtype=a.dtype.descr + descr)
for name in a.dtype.names:
b[name] = a[name]
return b
if __name__ == '__main__':
valid_gpis = read_gpis()
start_date = datetime(1991, 1, 1)
end_date = datetime.today()
CCI_genIO(valid_gpis, start_date, end_date, plot=False)
print 'asdf'
|
Python
|
CL
|
23c1652ccf639f9274c910a3016bac56315a5e06daa9e1b584cde0ac2bc987ed
|
"""Miscellaneous utility methods for this repository."""
import os
import errno
import functools
import inspect
import warnings
def ensure_dir(path):
"""Ensure that the directory specified exists, and if not, create it."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise # pragma: no cover
return path
def deprecated(base, new_path):
"""Print a deprecation warning.
This is a decorator which can be used to mark functions as deprecated. It
will result in a warning being emitted when the function is used.
"""
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "The class {base}.{name} is deprecated, use " \
"{new_path} instead."
else:
fmt1 = "The function {base}.{name} is deprecated, use " \
"{new_path} instead."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', PendingDeprecationWarning)
warnings.warn(
fmt1.format(
base=base,
name=func1.__name__,
new_path=new_path
),
category=PendingDeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', PendingDeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
def recursive_update(d, u):
"""Update a nested dictionary recursively recursively."""
for k, v in u.items():
if isinstance(v, dict):
d[k] = recursive_update(d.get(k, {}), v)
else:
d[k] = v
return d
|
Python
|
CL
|
bf61dcb530bbf06e9b9483646e79d47f22542aca2f6e614fcb4f17bedb02a29a
|
import numpy as np
from growth_rate import growth_factor
from pmh import Pmm, get_PkInterps, linz_bz
from prep_camb import CAMB, Clxy
from scipy.integrate import simps
from params import get_params
from scipy.interpolate import interp1d
from tinker import tinker_sigma, tinker_fsigma
params = get_params()
params['om_m'] = 0.30851
rho_b = 2.78e11 * params['om_m'] * params['h_100'] ** 2. / params['h_100'] ** 3. ## [Msol * (Mpc/h)^3]
## Table 3 of https://arxiv.org/pdf/1005.2239.pdf
def jenkins(sigma):
## Valid for 0 < z < 5.
arg = np.log(1. / sigma)
exponent = - np.abs(arg + 0.61) ** 3.8
result = 0.315 * np.exp(exponent)
print('Jenkins mass fn. limited to -1.2 < ln |1/sigma| <= 1.05 and 0 < z < 5.')
result[arg < -1.20] = np.nan
result[arg > 1.05] = np.nan
return result
def sheth_tormen(sigma):
deltac = 1.686
fsigma = 0.3222 * np.sqrt( 2. * 0.75 / np.pi) * np.exp( - 0.75 * deltac ** 2. / 2. / sigma ** 2.) * (1. + (sigma**2. / 0.75 / deltac ** 2.) ** 0.3) * deltac / sigma
return fsigma
def W(x):
return 3. * (np.sin(x) - x * np.cos(x)) / x**3.
def sigma(Pk_interps, Ms, z):
result = []
## Lift Martin's linear z=0 P(k).
data = np.loadtxt('../dat/pklin_1.0000.txt')
PMW = interp1d(data[:,0], data[:,1], kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False)
for M in Ms:
## Implicitly a fn. of z by rho_b, Pmm and the growth factor.
ks = np.arange(1.e-3, 1.e1, 1.e-3)
Rms = (3. * M / 4 / np.pi / rho_b) ** (1. / 3.)
## Ps = Pmm(Pk_interps, ks, 0.0, type='linear')
Ps = PMW(ks)
Ws = W(ks * Rms)
## Defined by linear or non-linear.
integrand = ks * ks * Ps * Ws * Ws
sig2 = simps(integrand, dx = ks[1] - ks[0])
sig2 *= growth_factor(z) ** 2.
sig2 /= (2. * np.pi ** 2.)
result.append(np.sqrt(sig2))
return np.array(result)
def tinker_bias(nu, Delta=200):
## Tinker bias fn. (https://arxiv.org/pdf/1001.3162.pdf)
y = np.log10(Delta)
A = 1.0 + 0.24 * y * np.exp(- (4/y)**4.)
a = 0.44 * y - 0.88
B = 0.183
b = 1.5
C = 0.019 + 0.107 * y + 0.19 * np.exp(-(4./y)**4.)
c = 2.4
return 1. - A * nu ** a /(nu ** a + 1.686 ** a) + B * nu ** b + C * nu **c
if __name__ == "__main__":
import matplotlib as mpl
import matplotlib.pyplot as plt
import pylab as pl
import matplotlib.patches as patches
from prep_camb import CAMB, Clxy
## from nbar import comovdensity
## from specs import samplestats
from pylab import rcParams
## plt.style.use('ggplot')
mpl.rc('text', usetex = True)
rcParams['figure.figsize'] = (3.5, 3.5)
print("\n\nWelcome to massfn.\n\n")
z = 4.0
Ms = 1.e11 * 10.** np.arange(0.0, 3.8, 0.05)
## Prepare pycamb module; linear, non-linear matter P(k) and Cls.
cambx = CAMB()
Pk_interps = get_PkInterps(cambx)
for lsigma, lfsigma, label in zip([sigma, sigma, tinker_sigma], [sheth_tormen, jenkins, tinker_fsigma], ['ST', 'Jenkins', 'Tinker']):
sigmas = lsigma(Pk_interps, Ms, z)
fsigmas = lfsigma(sigmas)
## nus = 1.686 / sigmas
## tinker_bs = tinker_bias(nus, Delta=200)
dndsig = - rho_b * fsigmas / Ms / sigmas
ns = []
## Meff = []
for M in Ms[:-1]:
ns.append(simps(-dndsig[Ms > M], dx = np.abs(sigmas[1] - sigmas[0])))
## Meff.append( np.abs( simps(-dndsig[Ms > M] * Ms[Ms > M][::-1], dx = np.abs(sigmas[1] - sigmas[0])) ) )
ns = np.array(ns)
## Meff = np.array(Meff) / ns
pl.loglog(Ms[:-1], ns, label=label)
'''
band = 'g'
stats = samplestats()
phi_star = stats[band]['schechter']['phi_star']
M_star = stats[band]['schechter']['M_star']
alpha = stats[band]['schechter']['alpha']
## Second y-axis with app. mag. limits for GoldRush.
mags = np.arange(10.0, 35.0, 0.5)
nbars = []
for mag in mags:
nbars.append( 10. ** comovdensity(z, phi_star, M_star, alpha, type='app', mlim=mag, printit=False) )
interp_nbar = interp1d(nbars, mags, bounds_error=True)
mags_ax = interp_nbar(ns)
'''
ax = pl.gca()
'''
## Add second mag_axis.
ax2 = ax.twinx()
ax2.semilogx(Ms[:-1], mags_ax, alpha=0.0)
ax2.set_ylabel(r'$i_{{\rm{AB}}}$')
## Add b_eff axis on top.
ax3 = ax.twiny()
ax3.semilogy(tinker_bs[:-1], ns, alpha=0.0)
ax3.set_xlabel(r'$b_{\rm{min}}$')
'''
## and original
ax.set_xlabel(r'$M_{\rm{min}} \ [M_\odot]$')
ax.set_ylabel(r'$\bar n \ [(h^{-1} \ \rm{Mpc})^{-3}]$')
'''
## Add arrows
style = 'Simple, tail_width=0.5, head_width=4, head_length=8'
kw = dict(arrowstyle=style, color='k', alpha=0.3)
## Schechter fn.
arrow = patches.FancyArrowPatch((ax.get_xlim()[1], 1.e-3), (ax.get_xlim()[0], 1.e-3), connectionstyle = 'arc3, rad=-.3', **kw)
ax.add_patch(arrow)
ax.annotate('Schechter fn.', xy=(3.e14, 4.e-5), xycoords='data', va='bottom', ha='left') ## bbox=dict(fc='w')
## Jenkins mass fn.
arrow = patches.FancyArrowPatch((ax.get_xlim()[0], 1.e-3), (1.e13, ax.get_ylim()[0]), connectionstyle = 'arc3, rad=-.3', **kw)
ax.add_patch(arrow)
ax.annotate('Jenkins\n(2001)', xy=(9.e11, 5.e-6), xycoords='data', va='bottom', ha='left') ## bbox=dict(fc='w')
## Tinker bias.
arrow = patches.FancyArrowPatch((1.e13, ax.get_ylim()[0]), (1.e14, ax.get_ylim()[1]), connectionstyle = 'arc3, rad=-.3', **kw)
ax.add_patch(arrow)
ax.annotate('Tinker\n(2010)', xy=(1.4e12, 1.e-7), xycoords='data', va='bottom', ha='left') ## bbox=dict(fc='w')
## Harikane 17.
kw = dict(arrowstyle=style, color='g')
arrow = patches.FancyArrowPatch((ax.get_xlim()[1], 1.e-3), (1.5e13, ax.get_ylim()[1]), connectionstyle = 'arc3, rad=-.3', **kw)
ax.add_patch(arrow)
ax.annotate('Harikane (2017)', xy=(2.5e14, 1.3e-3), xycoords='data', va='bottom', ha='left') ## bbox=dict(fc='w')
## Straight line connection.
ax.axhline(y = 1.e-3, xmin = ax.get_xlim()[0], xmax = ax.get_xlim()[1], c='k')
'''
pl.legend(loc=1)
pl.savefig('../plots/abmatch_linb.pdf', bbox_inches='tight')
print("\n\nDone.\n\n")
|
Python
|
CL
|
4e4569f804103028ff3fe3f3caf2c5aff84ab098502636c7f4e3fce6a005f716
|
#!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Obtain objdump and gas binaries.
Usage:
obtain_binutils.py <objdump> <gas>
Check out appropriate version of binutils, compile it and
copy objdump and gas binaries to specified places.
"""
import os
import shutil
import sys
CHECKOUT_DIR = '/dev/shm/binutils'
BINUTILS_REPO = 'http://git.chromium.org/native_client/nacl-binutils.git'
BINUTILS_REVISION = '8c8125b61c560e6ae47d4f3e48786121f944e364'
# We need specific revision of binutils, and it have to include the
# following patches:
# Add prefetch_modified - non-canonical encoding of prefetch
#
# Properly handle data16+rex.w instructions, such as
# 66 48 68 01 02 03 04 data32 pushq $0x4030201
#
# We are not using head revision because decoder test depends
# on precise format of objdump output.
def Command(cmd):
print
print 'Running:', cmd
print
result = os.system(cmd)
if result != 0:
print 'Command returned', result
sys.exit(1)
def main():
if len(sys.argv) != 3:
print __doc__
sys.exit(1)
# These are required to make binutils,
# and when they are missing binutils's make
# error messages are cryptic, so we better fail early.
Command('flex --version')
Command('bison --version')
Command('makeinfo --version')
if os.path.exists(CHECKOUT_DIR):
shutil.rmtree(CHECKOUT_DIR)
Command('git clone %s %s' % (BINUTILS_REPO, CHECKOUT_DIR))
try:
old_dir = os.getcwd()
os.chdir(CHECKOUT_DIR)
Command('git checkout %s' % BINUTILS_REVISION)
Command('./configure')
Command('make')
os.chdir(old_dir)
objdump, gas = sys.argv[1:]
shutil.copy(os.path.join(CHECKOUT_DIR, 'binutils', 'objdump'), objdump)
shutil.copy(os.path.join(CHECKOUT_DIR, 'gas', 'as-new'), gas)
finally:
shutil.rmtree(CHECKOUT_DIR)
print 'ok'
if __name__ == '__main__':
main()
|
Python
|
CL
|
1e5ebc9d84fb6cc0cdce89a88ba6bb8de7d7a003032a177bde88c911cb70704b
|
#!/usr/bin/env python
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from os.path import dirname,abspath
PROJECT_DIR = dirname(dirname(abspath(__file__)))
# import sys
#sys.path.insert(0,PROJECT_DIR)
#sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
#sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
#sys.path.append('/home/ruixif/database/mysite')
#sys.path.append('/home/ruixif/database')
application = get_wsgi_application()
|
Python
|
CL
|
8edb30ce5200a421600e344cd6442298a69f8f4d573fa232605bccd68b57594a
|
"""
Chains find_files with a .get request:
A script to list files, pulled from: https://developers.google.com/drive/api/v3/reference/files/list
"""
from ..CLI import CLI
from ..Services import FilesService as Files
from ..Utilities import Downloader
params = CLI.get_parse_dict(
(
"--dir",
dict(
default="", type=str, dest='dir',
help="Directory to download to"
)
),
# file search parameter
(
"--corpora",
dict(
default="", type=str, dest='corpora',
help="Bodies of items (files/documents) to which the query applies. Supported bodies are 'user', 'domain', 'drive' and 'allDrives'. Prefer 'user' or 'drive' to 'allDrives' for efficiency.'"
)
),
(
"--driveId",
dict(
default="", type=str, dest='driveId',
help='ID of the shared drive to search'
)
),
(
'--orderBy',
dict(
default="", type=str, dest='orderBy',
help="A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored."
)
),
(
'--pageSize',
dict(
default=25, type=int, dest='pageSize',
help="The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached. Acceptable values are 1 to 1000, inclusive. (Default: 100"
)
),
(
'--pageToken',
dict(
default="", type=str, dest='pageToken',
help="The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response"
)
),
(
'--q',
dict(
default="", type=str, dest='q',
help="A query for filtering the file results. 'See the Search for files' guide for the supported syntax."
)
),
(
'--spaces',
dict(
default="", type=str, dest='spaces',
help="A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'."
)
),
(
'--supportsAllDrives',
dict(
default=True, action='store_const', const=True, dest='supportsAllDrives',
help="Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives."
)
)
)
files_api = Files() #
try:
d = params['dir']
except KeyError:
d = None
else:
del params['dir']
file_listing = files_api.list(fields='files(id,name,mimeType)', **params)
import os
if d is None:
d = os.path.join(os.getcwd(), 'GDrive Downloads')
os.makedirs(d, exist_ok=True)
for f in file_listing:
# print(f)
print(
"Downloaded {} to {}".format(
f['name'],
Downloader(**f).download(os.path.join(d, f['name']))
)
)
|
Python
|
CL
|
7fdfb8bb73733464a1907e3de1b16243f93fd70e0ced88ae98091ded991951fe
|
# ***************************************************************
# Copyright (c) 2019 Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
from jittor.nn import Pool, pool, AvgPool2d, avg_pool2d
from jittor.nn import MaxPool2d as j_MaxPool2d
from jittor.nn import max_pool2d as j_max_pool2d
import numpy as np
from .test_core import expect_error
from .test_grad import ngrad
from itertools import permutations
from jittor import compile_extern, Module
from .test_log import find_log_with_re
import random
import pickle as pk
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
from torch.nn import MaxPool2d, Sequential
except:
skip_this_test = True
class OldPool(Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False, count_include_pad=True, op="maximum"):
assert dilation == None
assert return_indices == None
self.kernel_size = kernel_size
self.op = op
self.stride = stride if stride else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad and padding != 0
def execute(self, x):
N,C,H,W = x.shape
if self.ceil_mode == False:
h = (H+self.padding*2-self.kernel_size)//self.stride+1
w = (W+self.padding*2-self.kernel_size)//self.stride+1
else:
h = (H+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
w = (W+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
# TODO: backward
xx = x.reindex([N,C,h,w,self.kernel_size,self.kernel_size], [
"i0", # Nid
"i1", # Cid
f"i2*{self.stride}-{self.padding}+i4", # Hid
f"i3*{self.stride}-{self.padding}+i5", # Wid
])
return xx.reduce(self.op, [4,5])
def check(jt_model, torch_model, shape, near_data):
if (near_data):
assert shape[0] * shape[1] * shape[2] * shape[3] % 8 == 0
data = list(range(8)) * int((shape[0] * shape[1] * shape[2] * shape[3]) / 8)
random.shuffle(data)
x = jt.array(data).float32().reshape(shape)
else:
x = jt.random(shape)
y = jt_model(x)
g = jt.grad(y.sum(), x)
x_ = torch.Tensor(x.data)
x_.requires_grad = True
y_ = torch_model(x_)
y_.sum().backward()
y__ = y_.detach().numpy()
g__ = x_.grad.detach().numpy()
assert np.allclose(y.data, y__)
assert np.allclose(g.data, g__)
@unittest.skipIf(skip_this_test, "No Torch found")
class TestArgPoolOp(unittest.TestCase):
@unittest.skipIf(not jt.compiler.has_cuda, "No cuda found")
@jt.flag_scope(use_cuda=1)
def test_cuda(self):
jt_model = jt.nn.Sequential(Pool(2, 2, 0), Pool(2, 2, 0), Pool(2, 2, 0, ceil_mode=True), Pool(2, 2, 0), Pool(2, 2, 0), Pool(3, 1, 1))
torch_model = Sequential(MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0, ceil_mode=True), MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0), MaxPool2d(3, 1, 1))
shape = [2, 3, 300, 300]
check(jt_model, torch_model, shape, False)
shape = [2, 3, 157, 300]
check(jt_model, torch_model, shape, False)
for i in range(10):
check(jt_model, torch_model, [1,1,300,300], True)
@unittest.skipIf(not jt.compiler.has_cuda, "No cuda found")
@jt.flag_scope(use_cuda=1)
def test_cuda_tuple(self):
jt_model = jt.nn.Sequential(Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1), ceil_mode=True), Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1)), Pool(3, 1, 1))
torch_model = Sequential(MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1), ceil_mode=True), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d(3, 1, 1))
shape = [2, 3, 300, 300]
check(jt_model, torch_model, shape, False)
shape = [2, 3, 157, 300]
check(jt_model, torch_model, shape, False)
for i in range(10):
check(jt_model, torch_model, [1,1,300,300], True)
@unittest.skipIf(True, "TODO: cannot pass this test, fix me")
@unittest.skipIf(not jt.compiler.has_cuda, "No cuda found")
@jt.flag_scope(use_cuda=1)
def test_cuda_old_pool(self):
from torch.nn import AvgPool2d
jt_model = OldPool(3, 1, 1, op="mean")
torch_model = AvgPool2d(3, 1, 1)
shape = [64, 64, 300, 300]
check(jt_model, torch_model, shape, False)
shape = [32, 128, 157, 300]
check(jt_model, torch_model, shape, False)
for i in range(10):
check(jt_model, torch_model, [1,1,300,300], True)
def test_cpu_(self):
# x = jt.random([32, 128, 157, 300])
x = jt.random([4, 128, 157, 300])
x = jt.nn.pool(x, 2, "maximum", 0, 2)
def test_cpu(self):
jt_model = jt.nn.Sequential(Pool(2, 2, 0), Pool(2, 2, 0), Pool(2, 2, 0, ceil_mode=True), Pool(2, 2, 0), Pool(2, 2, 0), Pool(3, 1, 1))
torch_model = Sequential(MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0, ceil_mode=True), MaxPool2d(2, 2, 0), MaxPool2d(2, 2, 0), MaxPool2d(3, 1, 1))
# shape = [64, 64, 300, 300]
shape = [4, 64, 300, 300]
check(jt_model, torch_model, shape, False)
# shape = [32, 128, 157, 300]
shape = [4, 128, 157, 300]
check(jt_model, torch_model, shape, False)
for i in range(10):
check(jt_model, torch_model, [1,1,300,300], True)
def test_cpu_tuple(self):
jt_model = jt.nn.Sequential(Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1), ceil_mode=True), Pool((2,3), (2,3), (1,1)), Pool((2,3), (2,3), (1,1)), Pool(3, 1, 1))
torch_model = Sequential(MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1), ceil_mode=True), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d((2,3), (2,3), (1,1)), MaxPool2d(3, 1, 1))
shape = [2, 3, 300, 300]
check(jt_model, torch_model, shape, False)
shape = [2, 3, 157, 300]
check(jt_model, torch_model, shape, False)
for i in range(10):
check(jt_model, torch_model, [1,1,300,300], True)
def test_index_pool(self):
pool = jt.nn.Pool(2, return_indices=True)
a = jt.randn([10,3,100,100])
b, idx = pool(a)
idx.sync()
def test_index_pool2(self):
pool = jt.nn.Pool(2, return_indices=True)
a = jt.array([1,0,0,1,
0,0,0,0,
0,0,0,0,
1,0,0,1]).reshape((1,1,4,4))
b, idx = pool(a)
assert (idx.data.reshape((4,)) == [0,3,12,15]).all()
def test_unpool(self):
from jittor import nn
pool = nn.MaxPool2d(2, stride=2, return_indices=True)
unpool = nn.MaxUnpool2d(2, stride=2)
input = jt.array([[[[ 1., 2, 3, 4,0],
[ 5, 6, 7, 8,0],
[ 9, 10, 11, 12,0],
[13, 14, 15, 16,0],
[0, 0, 0, 0, 0]]]])
output, indices = pool(input)
assert (indices == jt.array([[6,8],[16,18]])).all()
out = unpool(output, indices, output_size=input.shape)
assert (out == jt.array([[[[ 0., 0., 0., 0., 0.],
[ 0., 6., 0., 8., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 14., 0., 16., 0.],
[ 0., 0., 0., 0., 0.]]]])).all()
def test_unpool_diff_kernel_stride(self):
from jittor import nn
pool = nn.MaxPool2d(3, stride=2, return_indices=True)
unpool = nn.MaxUnpool2d(3, stride=2)
input = jt.array([[[[ 1., 2, 3, 4, 0],
[ 5, 6, 7, 8, 0],
[ 9, 10, 11, 12,0],
[13, 14, 16, 15,0],
[0, 0, 0, 0, 0]]]])
output, indices = pool(input)
out = unpool(output, indices, output_size=input.shape)
assert (out == jt.array([[[
[ 0., 0., 0., 0., 0.,],
[ 0., 0., 0., 0., 0.,],
[ 0., 0., 11., 12., 0.,],
[ 0., 0., 32., 0., 0.,],
[ 0., 0., 0., 0., 0.,]]]])).all()
@unittest.skipIf(not jt.compiler.has_cuda, "No cuda found")
@jt.flag_scope(use_cuda=1)
def test_cuda_avg_pool(self):
self.test_cpu_avg_pool()
def test_cpu_avg_pool(self):
from torch.nn import AvgPool2d
jt_model = Pool(2, 2, 0, op="mean", ceil_mode=True)
torch_model = AvgPool2d(2, 2, 0, ceil_mode=True)
shape = (2, 16, 33, 33)
check(jt_model, torch_model, shape, False)
def test_cpu_avg_pool2(self):
from torch.nn import AvgPool2d
jt_model = Pool(3, 1, 1, op="mean", ceil_mode=True)
torch_model = AvgPool2d(3, 1, 1, ceil_mode=True)
shape = (2, 16, 33, 33)
check(jt_model, torch_model, shape, False)
def test_AvgPool2d(self):
from torch.nn import AvgPool2d as t_AvgPool2d
jt_model = AvgPool2d(3, 1, 1, ceil_mode=True)
torch_model = t_AvgPool2d(3, 1, 1, ceil_mode=True)
shape = (2, 16, 33, 33)
check(jt_model, torch_model, shape, False)
jt_model = AvgPool2d(3, 1, 1, ceil_mode=True, count_include_pad=False)
torch_model = t_AvgPool2d(3, 1, 1, ceil_mode=True, count_include_pad=False)
shape = (2, 16, 100, 100)
check(jt_model, torch_model, shape, False)
print('finish')
def test_avg_pool2d(self):
from torch.nn.functional import avg_pool2d as t_avg_pool2d
arr = np.random.random((2, 16, 33, 33))
jt_model = avg_pool2d(jt.array(arr), 3, 1, 1, ceil_mode=True)
torch_model = t_avg_pool2d(torch.Tensor(arr), 3, 1, 1, ceil_mode=True)
assert np.allclose(jt_model.numpy(), torch_model.numpy())
jt_model = avg_pool2d(jt.array(arr), 3, 1, 1, ceil_mode=True, count_include_pad=False)
torch_model = t_avg_pool2d(torch.Tensor(arr), 3, 1, 1, ceil_mode=True, count_include_pad=False)
assert np.allclose(jt_model.numpy(), torch_model.numpy())
print('finish')
def test_MaxPool2d(self):
from torch.nn import MaxPool2d
jt_model = j_MaxPool2d(3, 1, 1, ceil_mode=True)
torch_model = MaxPool2d(3, 1, 1, ceil_mode=True)
shape = (2, 16, 33, 33)
check(jt_model, torch_model, shape, False)
print('finish')
def test_max_pool2d(self):
from torch.nn.functional import max_pool2d
arr = np.random.random((2, 16, 33, 33))
jt_model = j_max_pool2d(jt.array(arr), 3, 1, 1, ceil_mode=True)
torch_model = max_pool2d(torch.Tensor(arr), 3, 1, 1, ceil_mode=True)
assert np.allclose(jt_model.numpy(), torch_model.numpy())
jt_model = j_max_pool2d(jt.array(arr), 3, 1, 1)
torch_model = max_pool2d(torch.Tensor(arr), 3, 1, 1)
assert np.allclose(jt_model.numpy(), torch_model.numpy())
def test_pool_3d(self):
from torch.nn.functional import max_pool2d
arr = np.random.random((2, 16, 20, 20, 20)).astype("float32")
# arr = np.random.random((1, 1, 1, 5, 5)).astype("float32")
jin = jt.array(arr)
tin = torch.Tensor(arr)
tin.requires_grad = True
jt_model = jt.nn.Pool3d(3,1,1)(jin)
torch_model = torch.nn.MaxPool3d(3,1,1)(tin)
assert np.allclose(jt_model.numpy(), torch_model.detach().numpy())
nout = np.random.random(tuple(jt_model.shape)).astype("float32")
jout = jt_model * nout
tout = torch_model * torch.Tensor(nout)
dj = jt.grad(jout, jin)
tout.sum().backward()
dt = tin.grad
assert np.allclose(dj.numpy(), dt.numpy())
@unittest.skipIf(not jt.compiler.has_cuda, "No cuda found")
@jt.flag_scope(use_cuda=1)
def test_cuda_pool_3d(self):
self.test_pool_3d()
if __name__ == "__main__":
unittest.main()
|
Python
|
CL
|
e1704013bb2b9a715dc595c1792f1dfa1d3d674de687f00dcbc083c0e0b9c800
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005,2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.autoconf()
shelltools.export("CFLAGS","%s -fsigned-char" % get.CFLAGS())
autotools.configure("--disable-static \
--enable-ogg-vorbis \
--enable-mad \
--enable-lame \
--enable-gsm \
--enable-oss-dsp \
--enable-alsa-dsp \
--enable-fast-ulaw \
--enable-fast-alaw \
--with-samplerate \
--with-flac \
--with-sndfile \
--enable-largefile")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("Changelog", "README", "TODO", "*.txt")
|
Python
|
CL
|
1e80e1797bfe01c0f57fefa43bfa163f537058290581e848488dfc3b9a3d2f15
|
# -*- coding:utf-8 -*-
import os
import sys
import signal
import argparse
import json
import time
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
from phystats.logger import logger
from phystats.daemonize import daemonizef
from phystats.collector.power_info import power_info
from phystats.repeat_timer import RepeatTimer
from phystats.kafkah.kafka_helper import KafkaHelper
parser = argparse.ArgumentParser()
parser.add_argument('--kafka_host', default="localhost", type=str, help="kafka host")
parser.add_argument('--kafka_port', default="9092", type=str, help="kafka port")
parser.add_argument('--kafka_topic', default="phystats", type=str, help="kafka topic")
parser.add_argument('--collect_interval', default=5.0, type=float, help="metric collect interval")
parser.add_argument('--cmd_args', default="ipmitool", type=str, nargs='+',help="sudo ipmitool sdr elist")
parser.add_argument('--filters', default="Power", type=str, help="key word to filter raw data")
parser.add_argument('--daemon', action='store_true', help="daemon mod")
parser.add_argument('--daemon_action', default='start', type=str, choices=['start', 'stop'],
help="start/stop daemon process")
args = parser.parse_args()
def get_phy_power_info():
msgs = []
try:
filters = args.filters.strip().split(",")
msgs = power_info(args.cmd_args, filters)
except Exception as e:
logger.warn("Get pysical power info failed! Exception: {}".format(e))
logger.info("Number of physical power information messages: {}".format(len(msgs)))
for msg in msgs:
logger.debug("phy_power_info: {}".format(msg))
kafka_helper = KafkaHelper(topic=args.kafka_topic, host=args.kafka_host, port=args.kafka_port)
kafka_helper.send_msg_list(msgs, topic=None)
if __name__ == '__main__':
logger.info("Main thread start!")
# get_k8s_cluster_info()
# get_metrics()
for k in list(vars(args).keys()):
logger.info('{}: {}'.format(k, vars(args)[k]))
# import sys
# sys.exit(0)
PIDFILE = '/tmp/phy_power_info_daemon.pid'
if args.daemon:
if args.daemon_action == 'start':
try:
daemonizef(PIDFILE,
stdout='/tmp/phy_power_info_daemon.log',
stderr='/tmp/phy_power_info_daemon.log')
except RuntimeError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
# 守护进程中运行的主程序
power_info_timer = RepeatTimer(args.collect_interval, get_phy_power_info)
power_info_timer.start()
elif args.daemon_action == 'stop':
if os.path.exists(PIDFILE):
with open(PIDFILE) as f:
os.kill(int(f.read()), signal.SIGTERM)
else:
print('Not running', file=sys.stderr)
raise SystemExit(1)
else:
print('Unknown command {!r}'.format(sys.argv[1]), file=sys.stderr)
raise SystemExit(1)
else:
power_info_timer = RepeatTimer(args.collect_interval, get_phy_power_info)
power_info_timer.start()
|
Python
|
CL
|
556dbb5225d94eb1dbe6edf749027c2e4aec287b5acc073f77d2c070e809ef5f
|
# Chris Hicks 2020
#
# Selection of the ith order statistic (e.g. smallest or median) of an array is a
# fundamentally easier problem than sorting and can be done in linear time!
#
# Input: A file of integers, one per line, first line specifies size and i.
# Output: The ith order statistic of the input array of integers.
import random
import numpy as np
nComparisons = 0
# Partition the array about element p such that the output comrprises
# a reshuffled array in which all elements left of p are lesser than,
# and all elements right of p are greater than.
# Returns the the final position of p in the array
def partition(array):
global nComparisons
n = len(array)
# Choose a pivot index p_idx from array uniformly "at random"
p_idx = random.randint(0, n-1)
# Swap the pivot into the first position of the array
array[0], array[p_idx] = array[p_idx], array[0]
pivot_value = array[0]
i = 1
for j in range(1, len(array)):
nComparisons += 1
if array[j] < pivot_value:
# Swap array[j] with array[i]
array[j], array[i] = array[i], array[j]
i += 1
# Swap the pivot_value into it's final place
array[0], array[i-1] = array[i-1], array[0]
return (i-1)
# Linear time method for finding the ith order statistic of an input array
def rselect(array, i):
n = len(array)
if n == 1:
return array[0]
# Partition array around a random pivot and return the index of the new pivot
p = partition(array)
if p == i: # Once p==i then we have found the ith order statistic
return array[p]
elif p > i: # Recurse onto left side of array only
return rselect(array[0:p], i)
else: # Recurse onto right side only, applying offset to i
return rselect(array[p+1:], i - p - 1)
def main():
try:
arraysize, i = [int(val) for val in input().split(' ')]
except:
print("First line of input must specify array size and the ith order to be found.")
quit(-1)
array = np.array([0]*arraysize)
try:
for idx in range(arraysize):
array[idx] = int(input())
except:
print("Only integers allowed in input, please check input file.")
quit(-1)
ith_statistic_value = rselect(array, i)
print("The {}th order statistic is {}.".format(i, ith_statistic_value))
if __name__ == '__main__':
main()
|
Python
|
CL
|
1a9d43ec4ac8626dd35dc59d1324d5b127d6acf9de80ce1269a785ded8891ea3
|
import math
import numpy
import numpy.random as nrand
"""
Note - for some of the metrics the absolute value is returns. This is because if the risk (loss) is higher we want to
discount the expected excess return from the portfolio by a higher amount. Therefore risk should be positive.
"""
def vol(returns):
# Return the standard deviation of returns
return numpy.std(returns)
def beta(returns, market):
# Create a matrix of [returns, market]
m = numpy.matrix([returns, market])
# Return the covariance of m divided by the standard deviation of the market returns
return numpy.cov(m)[0][1] / numpy.std(market)
def lpm(returns, threshold, order):
# This method returns a lower partial moment of the returns
# Create an array he same length as returns containing the minimum return threshold
threshold_array = numpy.empty(len(returns))
threshold_array.fill(threshold)
# Calculate the difference between the threshold and the returns
diff = threshold_array - returns
# Set the minimum of each to 0
diff = diff.clip(min=0)
# Return the sum of the different to the power of order
return numpy.sum(diff ** order) / len(returns)
def hpm(returns, threshold, order):
# This method returns a higher partial moment of the returns
# Create an array he same length as returns containing the minimum return threshold
threshold_array = numpy.empty(len(returns))
threshold_array.fill(threshold)
# Calculate the difference between the returns and the threshold
diff = returns - threshold_array
# Set the minimum of each to 0
diff = diff.clip(min=0)
# Return the sum of the different to the power of order
return numpy.sum(diff ** order) / len(returns)
def var(returns, alpha):
# This method calculates the historical simulation var of the returns
sorted_returns = numpy.sort(returns)
# Calculate the index associated with alpha
index = int(alpha * len(sorted_returns))
# VaR should be positive
return abs(sorted_returns[index])
def cvar(returns, alpha):
# This method calculates the condition VaR of the returns
sorted_returns = numpy.sort(returns)
# Calculate the index associated with alpha
index = int(alpha * len(sorted_returns))
# Calculate the total VaR beyond alpha
sum_var = sorted_returns[0]
for i in range(1, index):
sum_var += sorted_returns[i]
# Return the average VaR
# CVaR should be positive
return abs(sum_var / index)
def prices(returns, base):
# Converts returns into prices
s = [base]
for i in range(len(returns)):
s.append(base * (1 + returns[i]))
return numpy.array(s)
def dd(returns, tau):
# Returns the draw-down given time period tau
values = prices(returns, 100)
pos = len(values) - 1
pre = pos - tau
drawdown = float('+inf')
# Find the maximum drawdown given tau
while pre >= 0:
dd_i = (values[pos] / values[pre]) - 1
if dd_i < drawdown:
drawdown = dd_i
pos, pre = pos - 1, pre - 1
# Drawdown should be positive
return abs(drawdown)
def max_dd(returns):
# Returns the maximum draw-down for any tau in (0, T) where T is the length of the return series
max_drawdown = float('-inf')
for i in range(0, len(returns)):
drawdown_i = dd(returns, i)
if drawdown_i > max_drawdown:
max_drawdown = drawdown_i
# Max draw-down should be positive
return abs(max_drawdown)
def average_dd(returns, periods):
# Returns the average maximum drawdown over n periods
drawdowns = []
for i in range(0, len(returns)):
drawdown_i = dd(returns, i)
drawdowns.append(drawdown_i)
drawdowns = sorted(drawdowns)
total_dd = abs(drawdowns[0])
for i in range(1, periods):
total_dd += abs(drawdowns[i])
return total_dd / periods
def average_dd_squared(returns, periods):
# Returns the average maximum drawdown squared over n periods
drawdowns = []
for i in range(0, len(returns)):
drawdown_i = math.pow(dd(returns, i), 2.0)
drawdowns.append(drawdown_i)
drawdowns = sorted(drawdowns)
total_dd = abs(drawdowns[0])
for i in range(1, periods):
total_dd += abs(drawdowns[i])
return total_dd / periods
def treynor_ratio(er, returns, market, rf):
return (er - rf) / beta(returns, market)
def sharpe_ratio(er, returns, rf):
return (er - rf) / vol(returns)
def information_ratio(returns, benchmark):
diff = returns - benchmark
return numpy.mean(diff) / vol(diff)
def modigliani_ratio(er, returns, benchmark, rf):
np_rf = numpy.empty(len(returns))
np_rf.fill(rf)
rdiff = returns - np_rf
bdiff = benchmark - np_rf
return (er - rf) * (vol(rdiff) / vol(bdiff)) + rf
def excess_var(er, returns, rf, alpha):
return (er - rf) / var(returns, alpha)
def conditional_sharpe_ratio(er, returns, rf, alpha):
return (er - rf) / cvar(returns, alpha)
def omega_ratio(er, returns, rf, target=0):
return (er - rf) / lpm(returns, target, 1)
def sortino_ratio(er, returns, rf, target=0):
return (er - rf) / math.sqrt(lpm(returns, target, 2))
def kappa_three_ratio(er, returns, rf, target=0):
return (er - rf) / math.pow(lpm(returns, target, 3), float(1/3))
def gain_loss_ratio(returns, target=0):
return hpm(returns, target, 1) / lpm(returns, target, 1)
def upside_potential_ratio(returns, target=0):
return hpm(returns, target, 1) / math.sqrt(lpm(returns, target, 2))
def calmar_ratio(er, returns, rf):
return (er - rf) / max_dd(returns)
def sterling_ration(er, returns, rf, periods):
return (er - rf) / average_dd(returns, periods)
def burke_ratio(er, returns, rf, periods):
return (er - rf) / math.sqrt(average_dd_squared(returns, periods))
def test_risk_metrics():
# This is just a testing method
r = nrand.uniform(-1, 1, 50)
m = nrand.uniform(-1, 1, 50)
print("vol =", vol(r))
print("beta =", beta(r, m))
print("hpm(0.0)_1 =", hpm(r, 0.0, 1))
print("lpm(0.0)_1 =", lpm(r, 0.0, 1))
print("VaR(0.05) =", var(r, 0.05))
print("CVaR(0.05) =", cvar(r, 0.05))
print("Drawdown(5) =", dd(r, 5))
print("Max Drawdown =", max_dd(r))
def test_risk_adjusted_metrics():
# Returns from the portfolio (r) and market (m)
r = nrand.uniform(-1, 1, 50)
m = nrand.uniform(-1, 1, 50)
# Expected return
e = numpy.mean(r)
# Risk free rate
f = 0.06
# Risk-adjusted return based on Volatility
print("Treynor Ratio =", treynor_ratio(e, r, m, f))
print("Sharpe Ratio =", sharpe_ratio(e, r, f))
print("Information Ratio =", information_ratio(r, m))
# Risk-adjusted return based on Value at Risk
print("Excess VaR =", excess_var(e, r, f, 0.05))
print("Conditional Sharpe Ratio =", conditional_sharpe_ratio(e, r, f, 0.05))
# Risk-adjusted return based on Lower Partial Moments
print("Omega Ratio =", omega_ratio(e, r, f))
print("Sortino Ratio =", sortino_ratio(e, r, f))
print("Kappa 3 Ratio =", kappa_three_ratio(e, r, f))
print("Gain Loss Ratio =", gain_loss_ratio(r))
print("Upside Potential Ratio =", upside_potential_ratio(r))
# Risk-adjusted return based on Drawdown risk
print("Calmar Ratio =", calmar_ratio(e, r, f))
print("Sterling Ratio =", sterling_ration(e, r, f, 5))
print("Burke Ratio =", burke_ratio(e, r, f, 5))
if __name__ == "__main__":
test_risk_metrics()
test_risk_adjusted_metrics()
|
Python
|
CL
|
5cae5743e66147a8febae09faf34f1d3d0c0df427c47b22de9b857dd9da6c338
|
import base64
import os
from argparse import Namespace
from unittest import TestCase
from unittest.mock import MagicMock, patch
from onelogin_aws_cli import OneloginAWS
TEST_ROOT = os.path.join(os.path.dirname(__file__), "fixtures")
class TestOneloginAWS(TestCase):
ROLE_PREFIX = "arn:aws:iam::123456789012:role/OneLogin-MyRole"
PRVD_PREFIX = "arn:aws:iam::123456789012:saml-provider/OneLogin-MyProvider"
def setUp(self):
"""
Set up mock SAML and base OnloginAWS object
"""
with open(os.path.join(TEST_ROOT, 'saml_single_role.xml'), 'rb') as fp:
self.SAML_SINGLE_ROLE = base64.b64encode(fp.read())
with open(os.path.join(TEST_ROOT, 'saml_multi_role.xml'), 'rb') as fp:
self.SAML_MULTI_ROLE = base64.b64encode(fp.read())
self.ol = OneloginAWS(dict(
base_uri="https://api.us.onelogin.com/",
client_id='mock-id',
client_secret='mock-secret',
username='mock-username',
duration_seconds=2600,
ip_address='1.2.3.4',
auto_determine_ip_address=False
))
self.ol_with_role = OneloginAWS(dict(
base_uri="https://api.us.onelogin.com/",
client_id='mock-id',
client_secret='mock-secret',
username='mock-username',
duration_seconds=2600,
role_arn='arn:aws:iam::123456789012:role/OneLogin-MyRole1',
))
def test_init(self):
mock_config = dict(
base_uri="https://api.us.onelogin.com/",
client_id='mock-id',
client_secret='mock-secret',
username='mock-username',
duration_seconds=2600
)
ol = OneloginAWS(mock_config)
self.assertEqual(mock_config, ol.config)
self.assertEqual('mock-username', ol.user_credentials.username)
def test_get_ip_address(self):
self.ol.saml = Namespace(saml_response=self.SAML_SINGLE_ROLE)
ip_address = self.ol.get_ip_address()
self.assertEqual('1.2.3.4', ip_address)
def test_get_arns(self):
self.ol.saml = Namespace(saml_response=self.SAML_SINGLE_ROLE)
self.ol.get_arns()
self.assertEqual(
[(self.ROLE_PREFIX, self.PRVD_PREFIX)],
self.ol.all_roles
)
def test_get_arns_multi(self):
self.ol.saml = Namespace(saml_response=self.SAML_MULTI_ROLE)
self.ol.get_arns()
self.assertEqual(
(self.ROLE_PREFIX + '0', self.PRVD_PREFIX + '0'),
self.ol.all_roles[0]
)
self.assertEqual(
(self.ROLE_PREFIX + '1', self.PRVD_PREFIX + '1'),
self.ol.all_roles[1]
)
self.assertEqual(
(self.ROLE_PREFIX + '2', self.PRVD_PREFIX + '1'),
self.ol.all_roles[2]
)
def test_get_role(self):
self.ol.saml = Namespace(saml_response=self.SAML_SINGLE_ROLE)
self.ol.get_role()
self.assertEqual(self.ROLE_PREFIX, self.ol.role_arn)
self.assertEqual(self.PRVD_PREFIX, self.ol.principal_arn)
def test_get_role_multi(self):
self.ol.saml = Namespace(saml_response=self.SAML_MULTI_ROLE)
with patch('builtins.input', side_effect=['3']):
self.ol.get_role()
self.assertEqual(self.ROLE_PREFIX + "2", self.ol.role_arn)
self.assertEqual(self.PRVD_PREFIX + "1", self.ol.principal_arn)
def test_get_role_peselected(self):
self.ol_with_role.saml = Namespace(
saml_response=self.SAML_SINGLE_ROLE,
)
self.ol_with_role.get_role()
# Should ignore a bad preselection.
self.assertEqual(self.ROLE_PREFIX, self.ol_with_role.role_arn)
self.assertEqual(self.PRVD_PREFIX, self.ol_with_role.principal_arn)
def test_get_role_multi_preselected(self):
self.ol_with_role.saml = Namespace(
saml_response=self.SAML_MULTI_ROLE,
)
self.ol_with_role.get_role()
self.assertEqual(self.ROLE_PREFIX + "1", self.ol_with_role.role_arn)
self.assertEqual(
self.PRVD_PREFIX + "1",
self.ol_with_role.principal_arn,
)
def test_get_role_fail(self):
self.ol.all_roles = []
self.ol.get_arns = MagicMock()
with self.assertRaisesRegex(Exception, r'^No roles found$'):
self.ol.get_role()
def test__initialize_credentials(self):
with patch('os.path.expanduser', side_effect=[
'/home/.aws/credentials', '/home/.aws/']):
with patch('os.path.exists', side_effect=[True]):
cred_file = self.ol._initialize_credentials()
self.assertEqual('/home/.aws/credentials', cred_file)
def test__initialize_credentials_env_var(self):
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = 'mock-file'
cred_file = self.ol._initialize_credentials()
self.assertEqual('mock-file', cred_file)
del os.environ['AWS_SHARED_CREDENTIALS_FILE']
|
Python
|
CL
|
18ba467bdd504d155ce13e3ac1dc6234c0420ae5cd7689dbffa7db156b29d072
|
'''
Created on Feb 21, 2018
@author: dgrewal
'''
import pypeliner
import pypeliner.managed as mgd
from wgs.utils import helpers
def create_vcf2maf_workflow(
vcf_file,
maf_file,
reference,
vep_fasta_suffix,
vep_ncbi_build,
vep_cache_version,
vep_species,
tumour_id=None,
normal_id=None
):
workflow = pypeliner.workflow.Workflow()
workflow.transform(
name='split_vcf',
func='wgs.workflows.vcf2maf.tasks.split_vcf',
ctx=helpers.get_default_ctx(
memory=15,
walltime='8:00', ),
args=(
mgd.InputFile(vcf_file),
mgd.TempOutputFile('split.vcf', 'split')
),
kwargs={'lines_per_file': 20000}
)
workflow.transform(
name='vcf2maf',
func='wgs.workflows.vcf2maf.tasks.run_vcf2maf',
ctx=helpers.get_default_ctx(
memory=15,
walltime='8:00', ),
axes=('split',),
args=(
mgd.TempInputFile('split.vcf', 'split'),
mgd.TempOutputFile('maf_file.maf', 'split'),
mgd.TempSpace('vcf2maf_temp', 'split'),
reference,
vep_fasta_suffix,
vep_ncbi_build,
vep_cache_version,
vep_species
)
)
workflow.transform(
name='merge_maf',
ctx=helpers.get_default_ctx(
memory=15,
walltime='8:00', ),
func='wgs.workflows.vcf2maf.tasks.merge_mafs',
args=(
mgd.TempInputFile('maf_file.maf', 'split'),
mgd.TempOutputFile('maf_file_merged.maf')
)
)
workflow.transform(
name='update_ids',
func='wgs.workflows.vcf2maf.tasks.update_ids',
ctx=helpers.get_default_ctx(
memory=15,
walltime='8:00', ),
args=(
mgd.TempInputFile('maf_file_merged.maf'),
tumour_id,
normal_id,
mgd.OutputFile(maf_file),
)
)
return workflow
|
Python
|
CL
|
0c2d600c41e98ad57154aea85ee8cbd8e58b7c954d0b7fdf00bef904eba65d0d
|
import os
import requests
import re
from tqdm import tqdm
import zipfile
import numpy as np
# consts
DATA_DIR = 'data'
RAW_DATA_DIR = os.path.join(DATA_DIR, 'raw')
METRICS_DATA_DIR = os.path.join(DATA_DIR, 'with_metrics')
RESULTS_DIR = 'results'
EXPERIMENTS_DIR = os.path.join(DATA_DIR, 'experiments')
LABEL_VAL_FIELD = 'label_value'
LABEL_NAME_FIELD = 'label_name'
LABEL_PREFIX = 'label_'
METRIC_FIELD_PREFIX = 'metric_'
def dict_print(d, indent=0):
# code from https://stackoverflow.com/questions/3229419/how-to-pretty-print-nested-dictionaries
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
dict_print(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
def parse_path_list(path_str, default_path, file_extension='.csv'):
csv_list = []
input_split = [default_path] if path_str == '' else path_str.split(',')
for path in input_split:
if os.path.isfile(path) and path.endswith(file_extension):
csv_list.append(path)
elif os.path.isdir(path):
for subdir, dirs, files in os.walk(path):
for file in files:
sub_path = os.path.join(subdir, file)
if os.path.isfile(sub_path) and sub_path.endswith(file_extension):
csv_list.append(sub_path)
else:
raise FileNotFoundError('[{}] not exists.'.format(path))
return csv_list
def CamleCase2snake_case(string):
# code from https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
return re.sub(r'(?<!^)(?=[A-Z])', '_', string).lower()
def represents_int(s):
# code from https://stackoverflow.com/questions/1265665/
# how-can-i-check-if-a-string-represents-an-int-without-using-try-except
try:
int(s)
return True
except ValueError:
return False
def lines_to_ngrams(lines, n=3):
ngrams = []
for s in lines:
words = [e for e in s.replace('.','').replace('\n','').split(' ') if e != '']
ngrams.append([tuple(words[i:i + n]) for i in range(len(words) - n + 1)])
return ngrams
def stringify_keys(d):
"""Convert a dict's keys to strings if they are not."""
# code from https://stackoverflow.com/questions/12734517/json-dumping-a-dict-throws-typeerror-keys-must-be-a-string
for key in d.keys():
# check inner dict
if isinstance(d[key], dict):
value = stringify_keys(d[key])
else:
value = d[key]
# convert nonstring to string if needed
if not isinstance(key, str):
# delete old key
del d[key]
try:
d[str(key)] = value
except Exception:
try:
d[repr(key)] = value
except Exception:
raise
return d
def download_and_place_data():
if not os.path.exists(DATA_DIR):
url = 'http://diversity-eval.s3-us-west-2.amazonaws.com/data.zip'
target_zip = 'data.zip'
response = requests.get(url, stream=True)
# download
print('Downloading data from [{}]...'.format(url))
with open(target_zip, "wb") as handle:
for data in tqdm(response.iter_content(), unit='B', unit_scale=True, unit_divisor=1024):
handle.write(data)
# place
with zipfile.ZipFile(target_zip, 'r') as zip_ref:
zip_ref.extractall('.')
os.remove(target_zip)
def optimal_classification_accuracy(group_1, group_2):
"""
find optimal classification accuracy in 1d feature space by exhaustively checking all separators.
:param group_1: list of 1d data points
:param group_2: list of 1d data points
:return: optimal classification accuracy (ocr), and classification threshold (th)
"""
accuracy_list = []
th_list = []
all_samples = group_1 + group_2
for separator in all_samples:
group_1_left = sum([v <= separator + 1e-5 for v in group_1])
group_2_right = sum([v > separator + 1e-5 for v in group_2])
acc = (group_1_left + group_2_right) / len(all_samples)
th_list.append(separator)
accuracy_list.append(acc if acc > 0.5 else 1 - acc)
best_separator_idx = np.argmax(accuracy_list)
oca = accuracy_list[best_separator_idx]
th = th_list[best_separator_idx]
return oca, th
if __name__ == '__main__':
pass
|
Python
|
CL
|
78bfecbf006fefb322df707d48dd664b280501c21fd08dd952358329c305b796
|
"""Logging utility
"""
from ..helper.models import MetricUnit
from .logger import (
log_metric,
logger_inject_lambda_context,
logger_inject_process_booking_sfn,
logger_setup,
)
__all__ = [
"logger_setup",
"logger_inject_lambda_context",
"logger_inject_process_booking_sfn",
"log_metric",
"MetricUnit",
]
|
Python
|
CL
|
cc8a90343cb167e91c038ef6b247eedce9fa135505c2182b61dc7eeecfe472a5
|
#!/usr/bin/python
import logging
import logging.config
import sys
import importlib
from ansible.module_utils.basic import AnsibleModule
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import datetime
from ibmsecurity.appliance.isdsappliance import ISDSAppliance
from ibmsecurity.appliance.isdsappliance_adminproxy import ISDSApplianceAdminProxy
from ibmsecurity.appliance.ibmappliance import IBMError
from ibmsecurity.user.applianceuser import ApplianceUser
logger = logging.getLogger(sys.argv[0])
def main():
module = AnsibleModule(
argument_spec=dict(
log=dict(required=False, default='INFO', choices=['DEBUG', 'INFO', 'ERROR', 'CRITICAL']),
appliance=dict(required=True),
lmi_port=dict(required=False, default=443, type='int'),
action=dict(required=True),
force=dict(required=False, default=False, type='bool'),
username=dict(required=False),
password=dict(required=True),
isdsapi=dict(required=False, type='dict'),
adminProxyProtocol=dict(required=False, default='https', choices=['http','https']),
adminProxyHostname=dict(required=False),
adminProxyPort=dict(required=False, default=443, type='int'),
adminProxyApplianceShortName=dict(required=False, default=False, type='bool'),
omitAdminProxy=dict(required=False, default=False, type='bool')
),
supports_check_mode=True
)
module.debug('Started isds module')
# Process all Arguments
logLevel = module.params['log']
force = module.params['force']
action = module.params['action']
appliance = module.params['appliance']
lmi_port = module.params['lmi_port']
username = module.params['username']
password = module.params['password']
adminProxyProtocol = module.params['adminProxyProtocol']
adminProxyHostname = module.params['adminProxyHostname']
adminProxyPort = module.params['adminProxyPort']
adminProxyApplianceShortName = module.params['adminProxyApplianceShortName']
omitAdminProxy = module.params['omitAdminProxy']
# Setup logging for format, set log level and redirect to string
strlog = StringIO()
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] [PID:%(process)d TID:%(thread)d] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] %(message)s'
},
},
'handlers': {
'default': {
'level': logLevel,
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': strlog
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': logLevel,
'propagate': True
},
'requests.packages.urllib3.connectionpool': {
'handlers': ['default'],
'level': 'ERROR',
'propagate': True
}
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Create appliance object to be used for all calls
if username == '' or username is None:
u = ApplianceUser(password=password)
else:
u = ApplianceUser(username=username, password=password)
# Create appliance object to be used for all calls
# if adminProxy hostname is set, use the ISDSApplianceAdminProxy
if adminProxyHostname == '' or adminProxyHostname is None or omitAdminProxy:
isds_server = ISDSAppliance(hostname=appliance, user=u, lmi_port=lmi_port)
else:
isds_server = ISDSApplianceAdminProxy(adminProxyHostname=adminProxyHostname, user=u, hostname=appliance, adminProxyProtocol=adminProxyProtocol, adminProxyPort=adminProxyPort, adminProxyApplianceShortName=adminProxyApplianceShortName)
# Create options string to pass to action method
options = 'isdsAppliance=isds_server, force=' + str(force)
if module.check_mode is True:
options = options + ', check_mode=True'
if isinstance(module.params['isdsapi'], dict):
try:
basestring
except NameError:
basestring = (str, bytes)
try:
for key, value in module.params['isdsapi'].iteritems():
if isinstance(value, basestring):
options = options + ', ' + key + '="' + value + '"'
else:
options = options + ', ' + key + '=' + str(value)
except AttributeError:
for key, value in module.params['isdsapi'].items():
if isinstance(value, basestring):
options = options + ', ' + key + '="' + value + '"'
else:
options = options + ', ' + key + '=' + str(value)
module.debug('Option to be passed to action: ' + options)
# Dynamically process the action to be invoked
# Simple check to restrict calls to just "isds" ones for safety
if action.startswith('ibmsecurity.isds.'):
try:
module_name, method_name = action.rsplit('.', 1)
module.debug('Action method to be imported from module: ' + module_name)
module.debug('Action method name is: ' + method_name)
mod = importlib.import_module(module_name)
func_ptr = getattr(mod, method_name) # Convert action to actual function pointer
func_call = 'func_ptr(' + options + ')'
startd = datetime.datetime.now()
# Execute requested 'action'
ret_obj = eval(func_call)
endd = datetime.datetime.now()
delta = endd - startd
ret_obj['stdout'] = strlog.getvalue()
ret_obj['stdout_lines'] = strlog.getvalue().split()
ret_obj['start'] = str(startd)
ret_obj['end'] = str(endd)
ret_obj['delta'] = str(delta)
ret_obj['cmd'] = action + "(" + options + ")"
ret_obj['ansible_facts'] = isds_server.facts
module.exit_json(**ret_obj)
except ImportError:
module.fail_json(name=action, msg='Error> action belongs to a module that is not found!',
log=strlog.getvalue())
except AttributeError:
module.fail_json(name=action, msg='Error> invalid action was specified, method not found in module!',
log=strlog.getvalue())
except TypeError:
module.fail_json(name=action,
msg='Error> action does not have the right set of arguments or there is a code bug! Options: ' + options,
log=strlog.getvalue())
except IBMError as e:
module.fail_json(name=action, msg=str(e), log=strlog.getvalue())
else:
module.fail_json(name=action, msg='Error> invalid action specified, needs to be isds!',
log=strlog.getvalue())
if __name__ == '__main__':
main()
|
Python
|
CL
|
d3b8bb8b136ad1568ef78dfb188153c163307439ac19947435bce036dfabbcb7
|
import torch
import numpy as np
class PartialDataset(torch.utils.data.Dataset):
def __init__(self, parent_ds, offset, length):
self.parent_ds = parent_ds
self.offset = offset
self.length = length
assert len(parent_ds) >= offset + length, Exception("Parent Dataset not long enough")
super(PartialDataset, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.parent_ds[i + self.offset]
def validation_split(dataset, val_share=0.1):
"""
Split a (training and vaidation combined) dataset into training and validation.
Note that to be statistically sound, the items in the dataset should be statistically
independent (e.g. not sorted by class, not several instances of the same dataset that
could end up in either set).
inputs:
dataset: ("training") dataset to split into training and validation
val_share: fraction of validation data (should be 0<val_share<1, default: 0.1)
returns: input dataset split into test_ds, val_ds
"""
val_offset = int(len(dataset) * (1 - val_share))
return PartialDataset(dataset, 0, val_offset), PartialDataset(dataset, val_offset, len(dataset) - val_offset)
class PartialFolder(torch.utils.data.Dataset):
def __init__(self, parent_ds, perm, length):
self.parent_ds = parent_ds
self.perm = perm
self.length = length
super(PartialFolder, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.parent_ds[self.perm[i]]
def validation_split_folder(dataset, val_share=0.1):
"""
Split a (training and vaidation combined) dataset into training and validation.
Note that to be statistically sound, the items in the dataset should be statistically
independent (e.g. not sorted by class, not several instances of the same dataset that
could end up in either set).
inputs:
dataset: ("training") dataset to split into training and validation
val_share: fraction of validation data (should be 0<val_share<1, default: 0.1)
returns: input dataset split into test_ds, val_ds
"""
num_train = int(len(dataset) * (1 - val_share))
num_val = len(dataset) - num_train
perm = np.asarray(range(len(dataset)))
np.random.seed(0)
np.random.shuffle(perm)
train_perm, val_perm = perm[:num_train], perm[num_train:]
return PartialFolder(dataset, train_perm, num_train), PartialFolder(dataset, val_perm, num_val)
|
Python
|
CL
|
16875da198ea78345a88a083be1b2e7e1f9820bc4267879fb7180f766188ccdd
|
# --------------------------------------------------------
# Author: James Griffiths
# Date Created: Monday, 1st July 2019
# Version: 1.0
# --------------------------------------------------------
# Challenge six ------------------------------------------
# Description: http://www.pythonchallenge.com/pc/def/peak.html
# I have absolutely no idea what this means... a quick Google on 'peak hill' returns posts on
# 'peak hell' pronounces like 'pickle'. I quickly left before seeing too much!
# So, investigate what Pickle is in Python...
# I found this post pretty good:
# https://www.geeksforgeeks.org/understanding-python-pickling-example/
# There is a source file embedded within the page source:
# http://www.pythonchallenge.com/pc/def/banner.p
# So I'll use this to lay some pickle magic down...
from urllib.request import urlopen
import pickle
h = urlopen("http://www.pythonchallenge.com/pc/def/banner.p")
data = pickle.load(h)
for line in data:
print("".join([k * v for k, v in line]))
# I had to look up how to display the key and values as a visual representation
# The dictionary key suggests what will be displayed, the value is how many spaces to move...
|
Python
|
CL
|
159148330f91192bc7a76c8d49754a86dba4c2fc7539b343d83404bc37155a90
|
class RunningModeEnum:
RANDOMIZATION = "data_randomization"
REACTION_BASED_SLICING = "reaction_based_slicing"
DUPLICATE_REMOVAL = "duplicate_removal"
STATS_EXTRACTION = "stats_extraction"
FILE_SHUFFLING = "file_shuffling"
REACTION_VALIDATION = "reaction_validation"
REAGENT_VALIDATION = "reagent_validation"
VALIDATION_SCAFFOLD_SELECTION = "validation_scaffold_selection"
TENSORBOARD_LOG_EXTRACTION = "tensorboard_log_extraction"
SCAFFOLD_MEMORY_ANALYSIS = "scaffold_memory_analysis"
DECORATION_SIMILARITY = "decoration_similarity"
VALIDATION_SET_SIMILARITIES = "scaffold_similarity"
VALIDATION_SET_FILTERING = "filtering"
VALIDATION_SET_SLICED = "sliced_split"
# try to find the internal value and return
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# prohibit any attempt to set any values
def __setattr__(self, key, value):
raise ValueError("No changes allowed.")
|
Python
|
CL
|
d9ef4ab6138fe9218012680c1cc647b09c7fbc514ab92c021d7a21ed5faf6729
|
import tensorflow as tf
import numpy as np
import pandas as pd
import csv
import re
# Load data and prepare the input layers.
with open('test.csv', 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f)
test_sentence = []
test_label = []
for row in reader:
test_sentence.append(row[1])
if row[0] == 'postive':
test_label.append([1, 0])
else:
test_label.append([0, 1])
test_label = np.array(test_label, dtype=int)
with open('train.csv', 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f)
train_sentence = []
train_label = []
for row in reader:
train_sentence.append(row[1])
if row[0] == 'postive':
train_label.append([1, 0])
else:
train_label.append([0, 1])
train_label = np.array(train_label, dtype=int)
# Although it seems the sentences are clean in trained and test data, I applied a cleaning process to remove
# any punctuation, parentheses, question marks, etc., and leaves only alphanumeric characters
# All words also are converted to a lower case.
remove_special_chars = re.compile("[^A-Za-z0-9 ]+")
def clean(sentence):
return re.sub(remove_special_chars, "", sentence.lower())
# Create a data frame for wordVectors using the word-vectors.txt file for both train and test sets.
# Embedding matrix: the word vectors of all words in a sentences over all sentences.
wordVectors = pd.read_csv('word-vectors.txt', index_col=0, header=None)
max_words = 100
word_dim = 50
train_embedding_matrix = np.zeros((len(train_sentence), max_words, word_dim), dtype=np.float32)
for i in range(len(train_sentence)):
indexCounter = 0
clean_sentence = clean(train_sentence[i])
split_sentence = clean_sentence.split()
for word in split_sentence:
try:
train_embedding_matrix[i, indexCounter, :] = wordVectors.loc[word]
except KeyError:
pass
indexCounter = indexCounter + 1
if indexCounter >= max_words:
break
test_embedding_matrix = np.zeros((len(test_sentence), max_words, word_dim), dtype=np.float32)
for i in range(len(test_sentence)):
indexCounter = 0
clean_sentence = clean(test_sentence[i])
split_sentence = clean_sentence.split()
for word in split_sentence:
try:
test_embedding_matrix[i, indexCounter, :] = wordVectors.loc[word]
except KeyError:
pass
indexCounter = indexCounter + 1
if indexCounter >= max_words:
break
# Creating the vanilla RNN, LSTM, GRU models
# Specifying hyper-parameters
batchSize = 100
Units = 64 # number of units in RNNs
numClasses = 2
epoch = 1
# Input variables (place holders), wordVectors of a sentence (input x) and y_ (labels)
x = tf.placeholder(dtype=tf.float32, shape=[None, max_words, word_dim])
y_ = tf.placeholder(dtype=tf.float32, shape=[None, numClasses])
RNNCell = tf.contrib.rnn.BasicLSTMCell(Units)
RNNCell = tf.nn.rnn_cell.BasicRNNCell(Units)
RNNCell = tf.contrib.rnn.GRUCell(Units)
RNNCell = tf.contrib.rnn.DropoutWrapper(cell=RNNCell, output_keep_prob=0.75)
value, _ = tf.nn.dynamic_rnn(RNNCell, x, dtype=tf.float32)
# output layer for classification task
weight = tf.Variable(tf.truncated_normal([Units, numClasses]))
# weight = tf.get_variable('w_output', shape=[lstmUnits, numClasses])
bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))
# bias = tf.get_variable('bias', shape=[numClasses], initializer=tf.constant_initializer(0.1))
value = tf.transpose(value, [1, 0, 2])
last = tf.gather(value, int(value.get_shape()[0]) - 1)
prediction = (tf.matmul(last, weight) + bias)
# Training model
# Use Adam as the optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y_))
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
# Evaluate model
# Accuracy
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
''''
import datetime
tf.summary.scalar('Loss', cross_entropy)
tf.summary.scalar('Accuracy', accuracy)
merged = tf.summary.merge_all()
logdir = "tensorboard/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
writer = tf.summary.FileWriter(logdir, sess.graph)
'''
# Train the model over number epochs and batch size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for k in range(epoch):
counter = 0
for i in range(int(len(train_sentence)/batchSize)):
nextBatch = train_embedding_matrix[counter:counter+batchSize, :]
nextBatch_label = train_label[counter: counter+batchSize, :]
train_step.run(feed_dict={x: nextBatch, y_: nextBatch_label})
counter += batchSize
if counter % 5000:
train_accuracy = accuracy.eval(feed_dict={x: nextBatch, y_: nextBatch_label})
print('After training %d examples, training accuracy %g' % (counter, train_accuracy))
print('Test accuracy %g' % accuracy.eval(feed_dict={
x: test_embedding_matrix, y_: test_label}))
|
Python
|
CL
|
1882c676ed8ee20c5e47a09cf96e3917427058a4b066b574cc56546da7a557dc
|
import csv
from contextlib import contextmanager
from pathlib import Path
from time import time
from typing import Iterator
from neat.reporting import BaseReporter
from neat_improved.neat.evaluator import GymEvaluator
_SPECIES = 'species'
_POPULATION = 'population'
_FIELDS = {
_SPECIES: (
'iteration',
'num_frames',
'time_in_s',
'species_id',
'size',
'age',
'stagnation',
'fitness',
'adjusted_fitness',
),
_POPULATION: (
'iteration',
'num_frames',
'time_in_s',
'individual_id',
'species_id',
'fitness',
'num_nodes',
'num_connections',
),
}
class FileReporter(BaseReporter):
def __init__(
self,
save_dir_path: Path,
evaluator: GymEvaluator,
):
self.evaluator = evaluator
self.generation = 0
self.start_time = None
self.save_dir_path = save_dir_path
self.save_dir_path.mkdir(exist_ok=True, parents=True)
for key, fieldnames in _FIELDS.items():
with self._get_writer(key, fieldnames, 'w') as file:
file.writeheader()
@contextmanager
def _get_writer(self, filename, fieldnames, mode) -> Iterator[csv.DictWriter]:
file = (self.save_dir_path / (filename + '.csv')).open(mode)
try:
yield csv.DictWriter(file, fieldnames)
finally:
file.close()
def end_generation(self, config, population, species_set):
self.generation += 1
def post_evaluate(self, config, population, species, best_genome):
if self.start_time is None:
self.start_time = time()
with self._get_writer(_POPULATION, _FIELDS[_POPULATION], 'a') as writer:
for key, individual in population.items():
species_id = species.get_species_id(key)
writer.writerow(
{
'iteration': self.generation,
'individual_id': key,
'species_id': species_id,
'fitness': individual.fitness,
'num_nodes': len(individual.nodes),
'num_connections': len(individual.connections),
'num_frames': self.evaluator.num_frames,
'time_in_s': time() - self.start_time,
}
)
with self._get_writer(_SPECIES, _FIELDS[_SPECIES], 'a') as writer:
for key, specie in species.species.items():
writer.writerow(
{
'iteration': self.generation,
'species_id': key,
'size': len(specie.members),
'age': self.generation - specie.created,
'stagnation': self.generation - specie.last_improved,
'fitness': specie.fitness,
'adjusted_fitness': specie.adjusted_fitness,
'num_frames': self.evaluator.num_frames,
'time_in_s': time() - self.start_time,
}
)
|
Python
|
CL
|
ae5f914a1e600b276449cefcc99937dcbc9b3337740d6a244bd6ad5bed2ce476
|
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
from sklearn import metrics
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.decomposition import FactorAnalysis
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import scipy
from sklearn import random_projection
from cluster_func import em
from cluster_func import kmeans
from sklearn.neural_network import MLPClassifier
data = pd.read_csv('winequality-data.csv')
X = data.iloc[:,:-2]
y = data.iloc[:,-2]
y = y > 6
#Splitting data into training and testing and keeping testing data aside
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
n_classes = 7
##########################################################################################################
#PCA
print('PCA....')
time_pca = []
n_components_pca = range(1,12)
cv_score_pca = []
for comp_pca in n_components_pca:
#Reducing the dimensions with optimal number of components
pca_new = PCA(n_components = comp_pca)
pca_new.fit(X_train)
X_transformed_pca = pca_new.transform(X)
nodes_hidden_layer = int((comp_pca + n_classes)/2)
#neural network learner
t1 = time.time()
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
cv_score_pca.append(np.mean(cross_val_score(mlp, X_transformed_pca, y, cv = 3)))
t2 = time.time()
time_pca.append((t2 - t1))
print('Adding cluster label and checking accuracy')
#Adding a cluster label as a feature
cv_score_em_pca = []
cv_score_km_pca = []
clf_em = GaussianMixture(n_components=n_classes,covariance_type='spherical', max_iter= 500, init_params= 'kmeans')
clf_km = KMeans(n_clusters= n_classes, init='k-means++')
for comp_pca in n_components_pca:
nodes_hidden_layer = int((comp_pca + n_classes)/2)
#neural network learner
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
#Reducing the dimensions with optimal number of components
pca_new = PCA(n_components = comp_pca)
pca_new.fit(X_train)
X_transformed_pca = pca_new.transform(X)
clf_em.fit(X_transformed_pca)
cluster_em = clf_em.predict(X_transformed_pca)
cluster_em = np.array(cluster_em).reshape(-1,1)
X_transformed_em_pca = np.concatenate((X_transformed_pca, cluster_em), axis=1)
cv_score_em_pca.append(np.mean(cross_val_score(mlp, X_transformed_em_pca, y, cv = 3)))
clf_km.fit(X_transformed_pca)
cluster_km = clf_km.predict(X_transformed_pca)
cluster_km = np.array(cluster_km).reshape(-1,1)
X_transformed_km_pca = np.concatenate((X_transformed_pca, cluster_km), axis=1)
cv_score_km_pca.append(np.mean(cross_val_score(mlp, X_transformed_km_pca, y, cv = 3)))
#Plotting
fig1, ax1 = plt.subplots()
ax1.plot(n_components_pca, cv_score_pca, linewidth =2)
ax1.plot(n_components_pca, cv_score_em_pca, linewidth = 2)
ax1.plot(n_components_pca, cv_score_km_pca, linewidth = 2)
plt.legend(['without cluster label', 'with EM label', 'with KMeans label'])
plt.xlabel("Number of components")
plt.ylabel("Three fold Cross Validation score")
plt.title("Neural network accuracy with dimensionally reduced dataset using PCA")
plt.savefig("nn_plotwine/phish_nn_1")
#plt.show()
##########################################################################################################
#ICA
print('ICA...')
n_components_ica = range(1,12)
cv_score_ica = []
time_ica = []
for comp_ica in n_components_ica:
#Reducing the dimensions with optimal number of components
ica_new = FastICA(n_components = comp_ica)
ica_new.fit(X_train)
X_transformed_ica = ica_new.transform(X)
nodes_hidden_layer = int((comp_ica + n_classes)/2)
#neural network learner
t1 = time.time()
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
cv_score_ica.append(np.mean(cross_val_score(mlp, X_transformed_ica, y, cv = 3)))
t2 = time.time()
time_ica.append((t2 - t1))
#Adding a cluster label as a feature
print('Adding Cluster label and checking accuracy')
cv_score_em_ica = []
cv_score_km_ica = []
clf_em = GaussianMixture(n_components=n_classes, covariance_type='spherical', max_iter= 500, init_params= 'kmeans')
clf_km = KMeans(n_clusters= n_classes, init='k-means++')
for comp_ica in n_components_ica:
nodes_hidden_layer = int((comp_ica + n_classes)/2)
#neural network learner
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
#Reducing the dimensions with optimal number of components
ica_new = FastICA(n_components = comp_ica)
ica_new.fit(X_train)
X_transformed_ica = ica_new.transform(X)
clf_em.fit(X_transformed_ica)
cluster_em = clf_em.predict(X_transformed_ica)
cluster_em = np.array(cluster_em).reshape(-1,1)
X_transformed_em_ica = np.concatenate((X_transformed_ica, cluster_em), axis=1)
cv_score_em_ica.append(np.mean(cross_val_score(mlp, X_transformed_em_ica, y, cv = 3)))
clf_km.fit(X_transformed_ica)
cluster_km = clf_km.predict(X_transformed_ica)
cluster_km = np.array(cluster_km).reshape(-1,1)
X_transformed_km_ica = np.concatenate((X_transformed_ica, cluster_km), axis=1)
cv_score_km_ica.append(np.mean(cross_val_score(mlp, X_transformed_km_ica, y, cv = 3)))
#Reducing the dimensions with optimal number of components
fig2, ax2 = plt.subplots()
ax2.plot(n_components_ica, cv_score_ica, linewidth = 2)
ax2.plot(n_components_ica, cv_score_em_ica, linewidth = 2)
ax2.plot(n_components_ica, cv_score_km_ica, linewidth = 2)
plt.legend(['without cluster label', 'with EM label', 'with Kmeans label'])
plt.xlabel("Number of components")
plt.ylabel("Three fold Cross Validation score")
plt.title("Neural network accuracy with dimensionally reduced dataset using ICA")
plt.savefig("nn_plotwine/phish_nn_2")
#plt.show()
# ##########################################################################################################
#RP
print('RP...')
n_components_rp = range(1,12)
cv_score_rp = []
time_rp = []
for comp_rp in n_components_rp:
#Reducing the dimensions with optimal number of components
rp_new = random_projection.GaussianRandomProjection(n_components = comp_rp)
rp_new.fit(X_train)
X_transformed_rp = rp_new.transform(X)
nodes_hidden_layer = int((comp_rp + n_classes)/2)
#neural network learner
t1 = time.time()
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
cv_score_rp.append(np.mean(cross_val_score(mlp, X_transformed_rp, y, cv = 3)))
t2 = time.time()
time_rp.append((t2 - t1))
#Adding a cluster label as a feature
print('Adding cluster label and checking accuracy')
cv_score_em_rp = []
cv_score_km_rp = []
clf_em = GaussianMixture(n_components=n_classes, covariance_type='spherical', max_iter= 500, init_params= 'kmeans')
clf_km = KMeans(n_clusters= n_classes, init='k-means++')
for comp_rp in n_components_rp:
nodes_hidden_layer = int((comp_rp + n_classes)/2)
#neural network learner
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
#Reducing the dimensions with optimal number of components
rp_new = random_projection.GaussianRandomProjection(n_components = comp_rp)
rp_new.fit(X_train)
X_transformed_rp = rp_new.transform(X)
clf_em.fit(X_transformed_rp)
cluster_em = clf_em.predict(X_transformed_rp)
cluster_em = np.array(cluster_em).reshape(-1,1)
X_transformed_em_rp = np.concatenate((X_transformed_rp, cluster_em), axis=1)
cv_score_em_rp.append(np.mean(cross_val_score(mlp, X_transformed_em_rp, y, cv = 3)))
clf_km.fit(X_transformed_rp)
cluster_km = clf_km.predict(X_transformed_rp)
cluster_km = np.array(cluster_km).reshape(-1,1)
X_transformed_km_rp = np.concatenate((X_transformed_rp, cluster_km), axis=1)
cv_score_km_rp.append(np.mean(cross_val_score(mlp, X_transformed_km_rp, y, cv = 3)))
fig3, ax3 = plt.subplots()
ax3.plot(n_components_rp, cv_score_rp, linewidth= 2)
ax3.plot(n_components_rp, cv_score_em_rp, linewidth =2)
ax3.plot(n_components_rp, cv_score_km_rp, linewidth = 2)
plt.legend(['without cluster label', 'with EM label', 'with Kmeans label'])
plt.xlabel("Number of components")
plt.ylabel("Three fold Cross Validation score")
plt.title("Neural network accuracy with dimensionally reduced dataset using RP")
plt.savefig("nn_plotwine/phish_nn_3")
#plt.show()
# ##########################################################################################################
#fa
print('FA...')
n_components_fa = range(1,12)
cv_score_fa = []
time_fa = []
for comp_fa in n_components_fa:
#Reducing the dimensions with optimal number of components
fa_new = FactorAnalysis(n_components = comp_fa, max_iter = 100)
fa_new.fit(X_train)
X_transformed_fa = fa_new.transform(X)
nodes_hidden_layer = int((comp_fa + n_classes)/2)
#neural network learner
t1 = time.time()
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
cv_score_fa.append(np.mean(cross_val_score(mlp, X_transformed_fa, y, cv = 3)))
t2 = time.time()
time_fa.append((t2 - t1))
#Adding a cluster label as a feature
print('Adding cluster label and checking accuracy.')
cv_score_em_fa = []
cv_score_km_fa = []
clf_em = GaussianMixture(n_components=n_classes, covariance_type='spherical', max_iter= 500, init_params= 'kmeans')
clf_km = KMeans(n_clusters= n_classes, init='k-means++')
for comp_fa in n_components_fa:
nodes_hidden_layer = int((comp_fa + n_classes)/2)
#neural network learner
mlp = MLPClassifier(hidden_layer_sizes=(nodes_hidden_layer,),max_iter=500)
#Reducing the dimensions with optimal number of components
fa_new = FactorAnalysis(n_components = comp_fa, max_iter = 100)
fa_new.fit(X_train)
X_transformed_fa = fa_new.transform(X)
clf_em.fit(X_transformed_fa)
cluster_em = clf_em.predict(X_transformed_fa)
cluster_em = np.array(cluster_em).reshape(-1,1)
X_transformed_em_fa = np.concatenate((X_transformed_fa, cluster_em), axis=1)
cv_score_em_fa.append(np.mean(cross_val_score(mlp, X_transformed_em_fa, y, cv = 3)))
clf_km.fit(X_transformed_fa)
cluster_km = clf_km.predict(X_transformed_fa)
cluster_km = np.array(cluster_km).reshape(-1,1)
X_transformed_km_fa = np.concatenate((X_transformed_fa, cluster_km), axis=1)
cv_score_km_fa.append(np.mean(cross_val_score(mlp, X_transformed_km_fa, y, cv = 3)))
fig4, ax4 = plt.subplots()
ax4.plot(n_components_fa, cv_score_fa, linewidth= 2)
ax4.plot(n_components_fa, cv_score_em_fa, linewidth =2)
ax4.plot(n_components_fa, cv_score_km_fa, linewidth =2)
plt.legend(['without cluster label', 'with EM label', 'with Kmeans label'])
plt.xlabel("Number of components")
plt.ylabel("Three fold Cross Validation score")
plt.title("Neural network accuracy with dimensionally reduced dataset using FA")
plt.savefig("nn_plotwine/phish_nn_4")
#plt.show()
#############################################################################################################
#Plotting neural network time
#pca
print('plotting time graph')
fig5, ax5 = plt.subplots()
plt.plot(n_components_pca, time_pca, linewidth =2)
plt.plot(n_components_ica, time_ica, linewidth=2)
plt.plot(n_components_rp, time_rp, linewidth=2)
plt.plot(n_components_fa, time_fa, linewidth=2)
plt.legend(['PCA', 'ICA', 'RP', 'FA'])
plt.xlabel("Number of components")
plt.ylabel("Total training time for 3 fold CV")
plt.title("Neural network computation time after dimensionality reduction")
plt.savefig("nn_plotwine/phish_nn_5")
#plt.show()
|
Python
|
CL
|
0a71b37ec7d0a65b21cebb61aceecebb2b5f16e493be04647bc06e145539235a
|
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Widget for entering a frame spec."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from cuesubmit.ui import Widgets
class FrameSpecWidget(Widgets.CueHelpWidget):
"""Widget for entering a frame spec."""
helpText = 'Enter a FrameSpec value.\n' \
'A frame spec consists of a start time, an optional end time, a step, ' \
'and an interleave.\n' \
'Multiple ranges can be added together by separating with commas.\n' \
' Ex:\n' \
' 1-10x3\n' \
' 1-10y3 // inverted step\n' \
' 10-1x-1\n' \
' 1 // same as "1-1x1"\n' \
' 1-10:5 // interleave of 5\n' \
' 1-5x2, 6-10 // 1 through 5 with a step of 2 and 6 through 10\n'
def __init__(self, parent=None):
super(FrameSpecWidget, self).__init__(parent)
self.frameSpecInput = Widgets.CueLabelLineEdit('Frame Spec:')
self.contentLayout.addWidget(self.frameSpecInput)
|
Python
|
CL
|
4b415d16c7f9da4ec90da3ae16d0afe5b6cc4dae60e03f1c1743e577db3fe32d
|
from core.views import BaseMedFileViewSet
from .models import Medicine
from .serializers import MedicineSerializer
class MedicineViewSet(BaseMedFileViewSet):
"""
CRUD options for medicine only for authenticated user.
"""
serializer_class = MedicineSerializer
queryset = Medicine.objects.all()
|
Python
|
CL
|
ab4465cf46799ed2cbfef4538258b2189fce3b448814e515bb59ce4554a01681
|
import collections
# Import Python wrapper for or-tools CP-SAT solver.
from ortools.sat.python import cp_model
def JobScheduling(plats_data):
"""Minimal platoon problem."""
# Create the model.
model = cp_model.CpModel()
nodes_count = 1 + max(task[0] for plat in plats_data for task in plat)
all_nodes = range(nodes_count)
# Computes horizon dynamically as the sum of all durations.
horizon = sum(task[1] for plat in plats_data for task in plat)
# Named tuple to store information about created variables.
task_type = collections.namedtuple('task_type', 'start end interval')
# Named tuple to manipulate solution information.
assigned_task_type = collections.namedtuple('assigned_task_type',
'start plat index duration')
# Creates plat intervals and add to the corresponding node lists.
all_tasks = {}
node_to_intervals = collections.defaultdict(list)
for plat_id, plat in enumerate(plats_data):
for task_id, task in enumerate(plat):
node = task[0]
duration = task[1]
suffix = '_%i_%i' % (plat_id, task_id)
start_var = model.NewIntVar(0, horizon, 'start' + suffix)
end_var = model.NewIntVar(0, horizon, 'end' + suffix)
interval_var = model.NewIntervalVar(start_var, duration, end_var,
'interval' + suffix)
all_tasks[plat_id, task_id] = task_type(start=start_var,
end=end_var,
interval=interval_var)
node_to_intervals[node].append(interval_var)
# Create and add disjunctive constraints.
for node in all_nodes:
model.AddNoOverlap(node_to_intervals[node])
# Precedences inside a plat.
for plat_id, plat in enumerate(plats_data):
for task_id in range(len(plat) - 1):
model.Add(all_tasks[plat_id, task_id +
1].start >= all_tasks[plat_id, task_id].end)
# Makespan objective.
obj_var = model.NewIntVar(0, horizon, 'makespan')
model.AddMaxEquality(obj_var, [
all_tasks[plat_id, len(plat) - 1].end
for plat_id, plat in enumerate(plats_data)
])
model.Minimize(obj_var)
# Solve model.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.OPTIMAL:
# Create one list of assigned tasks per node.
assigned_plats = collections.defaultdict(list)
for plat_id, plat in enumerate(plats_data):
for task_id, task in enumerate(plat):
node = task[0]
assigned_plats[node].append(
assigned_task_type(start=solver.Value(
all_tasks[plat_id, task_id].start),
plat=plat_id,
index=task_id,
duration=task[1]))
# Create per node output lines.
output = ''
for node in all_nodes:
# Sort by starting time.
assigned_plats[node].sort()
sol_line_tasks = 'node ' + str(node) + ': '
sol_line = ' '
for assigned_task in assigned_plats[node]:
name = 'plat_%i_%i' % (assigned_task.plat, assigned_task.index)
# Add spaces to output to align columns.
sol_line_tasks += '%-10s' % name
start = assigned_task.start
duration = assigned_task.duration
sol_tmp = '[%i,%i]' % (start, start + duration)
# Add spaces to output to align columns.
sol_line += '%-10s' % sol_tmp
sol_line += '\n'
sol_line_tasks += '\n'
output += sol_line_tasks
output += sol_line
# Finally print the solution found.
print('Optimal Schedule Length: %i' % solver.ObjectiveValue())
print(output)
def reservationSimulation(platoon, step):
rc = []
rta = []
rtd = []
s = platoon.getMaxSpeed()
dint = s * step
t = 0
while platoon.isInIntersection():
rc.append([cell not in rc for cell in platoon.getCells()])
rta.append(t - 1)
rtd.append(t + 1)
t += 1
|
Python
|
CL
|
c3a207a21a9a13ad4863c99ff9ead2ef7afa301bfede5388e9cd28c312bd44c7
|
from __future__ import print_function
import logging
from argparse import ArgumentParser
import backoff
import requests
import sys
POLL_URL = 'https://coveralls.io/builds/{}.json'
DONE_URL = 'https://coveralls.io/webhook'
def setup_logging():
logger = logging.getLogger('backoff')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
def message(args, covered, template):
print(template.format(
args.commit, covered, args.fail_under
))
def get_coverage(commit):
response = requests.get(POLL_URL.format(commit))
data = response.json()
return data['covered_percent']
def decorate(func, args):
interval = 10
return backoff.on_predicate(
backoff.constant,
interval=interval, max_tries=args.max_wait*60/interval,
jitter=lambda value: value,
)(func)
def ensure_parallel_done(args):
if args.parallel_build_number:
response = requests.post(
DONE_URL,
params={'repo_token': args.repo_token},
json={
"payload": {
"build_num": args.parallel_build_number,
"status": "done"
}
}
)
if response.status_code == 200:
print('Confirmed end of parallel build')
else:
print(
'Attempt to confirmed end of parallel build got {}:\n{}'.format(
response.status_code, response.content
)
)
sys.exit(1)
def parse_args():
parser = ArgumentParser()
parser.add_argument('commit', help='the commit hash to check')
parser.add_argument('--fail-under', type=float, default=100,
help='Exit with a status of 2 if the total coverage is '
'less than MIN.')
parser.add_argument('--max-wait', type=int, default=5,
help='Maximum time, in minutes, to wait for Coveralls '
'data. Defaults to 5.')
parser.add_argument('--parallel-build-number', type=int,
help='The build number, eg $TRAVIS_BUILD_NUMBER.')
parser.add_argument('--repo-token',
help='Required if --parallel-build-number is used and '
'should be the token use when POSTing back to '
'coveralls to mark the parallel build as done. '
'Should come from a secret.')
return parser.parse_args()
def main():
args = parse_args()
setup_logging()
ensure_parallel_done(args)
get_coverage_ = decorate(get_coverage, args)
covered = get_coverage_(args.commit)
if covered is None:
print('No coverage information available for {}'.format(args.commit))
sys.exit(1)
elif covered < args.fail_under:
message(args, covered, 'Failed coverage check for {} as {} < {}')
sys.exit(2)
else:
message(args, covered, 'Coverage OK for {} as {} >= {}')
|
Python
|
CL
|
44e43787d9868f6d90a38bcb7beff35e1911f4c78b47dc7c1f4eae8b217e63a0
|
"""
This is a Python 3 port of FORTRAN code from the EXPOKIT package.
gexpmv is a port of the _GEXPV routines.
See R.B. Sidje, ACM Trans. Math. Softw., 24(1):130-156, 1998
and http://www.maths.uq.edu.au/expokit
@author: Ashley Milsted
"""
from __future__ import absolute_import, division, print_function
import scipy as sp
import scipy.linalg as la
from math import sqrt, log10, copysign, trunc
def gexpmv(A, v, t, anorm, m=None, tol=0.0, w=None, verbose=False, itrace=0, mxstep=500, break_tol=None):
mxreject = 0
delta = 1.2
gamma = 0.9
if break_tol is None:
#break_tol = tol
break_tol = anorm*tol
n = A.shape[0]
if hasattr(A, "matvec"):
matvec = A.matvec
else:
matvec = lambda v: sp.dot(A,v)
if m is None:
m = min(20, n-1)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError("A is not a square matrix")
if m >= n or m <= 0:
raise ValueError("m is invalid")
k1 = 2
mh = m + 2
ibrkflag = 0
mbrkdwn = m
nmult = 0
nreject = 0
nexph = 0
nscale = 0
t_out = abs( t )
tbrkdwn = 0.0
step_min = t_out
step_max = 0.0
nstep = 0
s_error = 0.0
x_error = 0.0
t_now = 0.0
t_new = 0.0
avnorm = 0.0 #I think the EXPOKIT source relied on this being initialized to zero by the compiler
#pretty sure this just computes machine epsilon
eps = 0.0
p1 = 4.0/3.0
while eps == 0.0:
p2 = p1 - 1.0
p3 = 3*p2
eps = abs( p3-1.0 )
if tol <= eps:
tol = sqrt( eps )
rndoff = eps*anorm
sgn = copysign( 1.0, t )
if w is None: #allow supplying a starting vector
w = v.copy()
else:
w[:] = v
beta = la.norm(w)
vnorm = beta
hump = beta
#obtain the very first stepsize ...
SQR1 = sqrt( 0.1 )
xm = 1.0/m
p2 = tol*(((m+1)/sp.e)**(m+1)) * sqrt(2.0*sp.pi*(m+1))
t_new = (1.0/anorm) * (p2 / (4.0*beta*anorm))**xm
p1 = 10.0**(round( log10( t_new )-SQR1 )-1)
t_new = trunc( t_new/p1 + 0.55 ) * p1
#step-by-step integration ...
while t_now < t_out:
nstep = nstep + 1
t_step = min( t_out-t_now, t_new )
#initialize Krylov subspace
vs = sp.zeros((m+2,n), A.dtype)
vs[0,:] = w / beta
H = sp.zeros((mh,mh), A.dtype)
#Arnoldi loop ...
for j in range(1,m+1):
nmult = nmult + 1
vs[j,:] = matvec(vs[j-1,:])
for i in range(1,j+1):
#Compute overlaps of new vector Av with all other Kyrlov vectors
#(these are elements of an upper Hessenberg matrix)
hij = sp.vdot(vs[i-1,:], vs[j,:])
vs[j,:] -= hij * vs[i-1,:] #orthogonalize new vector. maybe switch to axpy
H[i-1,j-1] = hij #store matrix element
hj1j = la.norm( vs[j,:] )
#if the orthogonalized Krylov vector is zero, stop!
if hj1j <= break_tol:
print('breakdown: mbrkdwn =',j,' h =',hj1j)
k1 = 0
ibrkflag = 1
mbrkdwn = j
tbrkdwn = t_now
t_step = t_out-t_now
break
H[j,j-1] = hj1j
vs[j,:] *= 1.0/hj1j
if ibrkflag == 0: #if we didn't break down
nmult = nmult + 1
vs[m+1,:] = matvec(vs[m,:])
avnorm = la.norm( vs[m+1,:] )
#Orig: set 1 for the 2-corrected scheme
H[m+1, m] = 1.0
#loop while ireject<mxreject until the tolerance is reached
ireject = 0
#compute w = beta*V*exp(t_step*H)*e1 ...
#First compute expH for a good step size
while True:
nexph = nexph + 1
mx = mbrkdwn + k1 #max(mx) = m+2
#irreducible rational Pade approximation. scipy's implementation automatically chooses an order
expH = la.expm(sgn * t_step * H[:mx,:mx])
#nscale = nscale + ns #don't have this info
#local error estimation
if k1 == 0:
err_loc = tol
else:
p1 = abs( expH[m,0] ) * beta #wsp(iexph+m)
p2 = abs( expH[m+1,0] ) * beta * avnorm #FIXME: avnorm is not always defined....
if p1 > 10.0*p2:
err_loc = p2
xm = 1.0/m
elif p1 > p2:
err_loc = (p1*p2)/(p1-p2)
xm = 1.0/m
else:
err_loc = p1
xm = 1.0/(m-1)
#reject the step-size if the error is not acceptable ...
if ( (k1 != 0) and (err_loc > delta*t_step*tol) and
(mxreject == 0 or ireject < mxreject) ):
t_old = t_step
t_step = gamma * t_step * (t_step*tol/err_loc)**xm
p1 = 10.0**(round( log10( t_step )-SQR1 )-1)
t_step = trunc( t_step/p1 + 0.55 ) * p1
if verbose:
print('t_step = ',t_old)
print('err_loc = ',err_loc)
print('err_required = ',delta*t_old*tol)
print('stepsize rejected, stepping down to: ',t_step)
ireject = ireject + 1
nreject = nreject + 1
if mxreject != 0 and ireject > mxreject:
print("Failure in ZGEXPV: ---")
print("The requested tolerance is too high.")
print("Rerun with a smaller value.")
iflag = 2
return
else:
break #step size OK
#now update w = beta*V*exp(t_step*H)*e1 and the hump ...
mx = mbrkdwn + max( 0, k1-1 ) #max(mx) = m+1
w = beta * vs[:mx,:].T.dot(expH[:mx,0])
beta = la.norm(w)
hump = max( hump, beta )
#suggested value for the next stepsize ...
t_new = gamma * t_step * (t_step*tol/err_loc)**xm
p1 = 10.0**(round( log10( t_new )-SQR1 )-1)
t_new = trunc( t_new/p1 + 0.55 ) * p1
err_loc = max( err_loc, rndoff )
#update the time covered ...
t_now = t_now + t_step
#display and keep some information ...
if itrace != 0:
print('integration ', nstep, ' ---------------------------------')
#print('scale-square = ', nscale)
print('step_size = ', t_step)
print('err_loc = ',err_loc)
print('next_step = ',t_new)
step_min = min( step_min, t_step )
step_max = max( step_max, t_step )
s_error = s_error + err_loc
x_error = max( x_error, err_loc )
if mxstep == 0 or nstep < mxstep:
continue
iflag = 1
break
return w, nstep < mxstep, nstep, ibrkflag==1, mbrkdwn
|
Python
|
CL
|
984bba16d85b4d0b6b7d0de5959fd6829765095e378a779ec411c1e60059d56a
|
#!/usr/bin/env python3
import mfutil
import os
import paho.mqtt.client as mqtt
import xattrfile
import signal
from acquisition.listener import AcquisitionListener
class ExtraDaemonMqttListener(AcquisitionListener):
client = None
plugin_name = "mqtt_listener"
daemon_name = "extra_daemon_mqtt_listener"
connected = False
def __init__(self):
super(ExtraDaemonMqttListener, self).__init__()
self.client = mqtt.Client()
self.client.on_message = self._on_message
self.client.on_connect = self._on_connect
self.client.on_disconnect = self._on_disconnect
signal.signal(signal.SIGTERM, self.__sigterm_handler)
def _on_connect(self, client, userdata, flags, rc):
self.info("the client is connected to the broker")
client.subscribe(self.args.subscription_topic)
def _on_disconnect(self, client, userdata, rc):
self.info("the client disconnected from the broker")
def _on_message(self, client, userdata, message):
self.info("message received on %s (size: %i)", message.topic,
len(message.payload))
self.debug("message qos: %s", message.qos)
self.debug("message retain flag: %s", message.retain)
self.debug("message info: %s", message.info)
basename = mfutil.get_unique_hexa_identifier()
filepath = os.path.join(self.args.dest_dir, basename)
tmp_filepath = ".".join((filepath, self.args.tmp_suffix))
with open(tmp_filepath, "wb") as f:
f.write(message.payload)
xaf = xattrfile.XattrFile(tmp_filepath)
self.set_tag(
xaf, "mqtt_listener_subscription_topic",
self.args.subscription_topic
)
self.set_tag(xaf, "mqtt_listener_received_topic", message.topic)
self.set_tag(
xaf, "mqtt_listener_broker_hostname", self.args.broker_hostname
)
self.set_tag(
xaf, "mqtt_listener_broker_port", str(self.args.broker_port)
)
self._set_before_tags(xaf)
xaf.rename(filepath)
def __sigterm_handler(self, *args):
self.info("SIGTERM signal handled => schedulling shutdown")
try:
self.client.disconnect()
except Exception:
pass
def add_extra_arguments(self, parser):
parser.add_argument(
"--broker-hostname",
action="store",
default="127.0.0.1",
help="the hostname or IP address of the remote broker. "
"Defaults to localhost",
)
parser.add_argument(
"--broker-port",
action="store",
default=1883,
type=int,
help="the network port of the server host to "
"connect to. Defaults to 1883.",
)
parser.add_argument(
"--keep-alive",
action="store",
default=60,
type=int,
help="maximum period in seconds allowed between"
" communications with the broker",
)
parser.add_argument(
"--dest-dir",
action="store",
help="destination directory of the file "
"made from the MQTT message",
)
parser.add_argument(
"--subscription-topic",
action="store",
default="#",
help="string specifying the subscription topic "
"to subscribe to. Default everybody",
)
parser.add_argument(
"--tmp-suffix",
action="store",
default="t",
help="temporary file suffix. Default t",
)
def listen(self):
self.info("Start daemon %s" % self.daemon_name)
self.debug("broker_hostname: %s" % self.args.broker_hostname)
self.debug("broker_port: %s" % self.args.broker_port)
self.debug("keep_alive: %s" % self.args.keep_alive)
self.debug("dest_dir: %s" % self.args.dest_dir)
self.debug("subscription-topic: %s" % self.args.subscription_topic)
try:
self.client.connect(self.args.broker_hostname,
port=self.args.broker_port)
except Exception:
self.warning(
"Can not connect to the broker %s on port %d"
% (self.args.broker_hostname, self.args.broker_port)
)
return
self.client.loop_forever()
self.info("Stopping daemon")
if __name__ == "__main__":
x = ExtraDaemonMqttListener()
x.run()
|
Python
|
CL
|
4a943d898b130c7cda382ff89c9858c7e034f73f685c9f4d5d4ce2baffbd03ec
|
'''
Hartree-Fock for periodic systems with k-point sampling
See Also:
hf.py : Hartree-Fock for periodic systems at a single k-point
'''
import time
import numpy as np
import scipy.special
import pyscf.dft
import pyscf.pbc.dft
import pyscf.pbc.scf.hf as pbchf
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc.scf import scfint
def get_ovlp(mf, cell, kpts):
'''Get the overlap AO matrices at sampled k-points.
Args:
kpts : (nkpts, 3) ndarray
Returns:
ovlp_kpts : (nkpts, nao, nao) ndarray
'''
nkpts = len(kpts)
nao = cell.nao_nr()
ovlp_kpts = np.zeros((nkpts,nao,nao), np.complex128)
for k in range(nkpts):
kpt = kpts[k,:]
if mf.analytic_int:
ovlp_kpts[k,:,:] = scfint.get_ovlp(cell, kpt)
else:
ovlp_kpts[k,:,:] = pbchf.get_ovlp(cell, kpt)
return ovlp_kpts
def get_hcore(mf, cell, kpts):
'''Get the core Hamiltonian AO matrices at sampled k-points.
Args:
kpts : (nkpts, 3) ndarray
Returns:
hcore : (nkpts, nao, nao) ndarray
'''
nao = cell.nao_nr()
nkpts = len(kpts)
hcore = np.zeros((nkpts,nao,nao), np.complex128)
for k in range(nkpts):
kpt = kpts[k,:]
if mf.analytic_int:
hcore[k,:,:] = scfint.get_hcore(cell, kpt)
else:
hcore[k,:,:] = pbchf.get_hcore(cell, kpt)
return hcore
def get_j(mf, cell, dm_kpts, kpts, kpt_band=None):
'''Get the Coulomb (J) AO matrix at sampled k-points.
Args:
dm_kpts : (nkpts, nao, nao) ndarray
Density matrix at each k-point
kpts : (nkpts, 3) ndarray
Kwargs:
kpt_band : (3,) ndarray
An arbitrary "band" k-point at which to evalute the matrix.
Returns:
vj : (nkpts, nao, nao) ndarray
vk : (nkpts, nao, nao) ndarray
'''
coords = pyscf.pbc.dft.gen_grid.gen_uniform_grids(cell)
nkpts = len(kpts)
ngs = len(coords)
nao = cell.nao_nr()
aoR_kpts = np.zeros((nkpts,ngs,nao), np.complex128)
for k in range(nkpts):
kpt = kpts[k,:]
aoR_kpts[k,:,:] = pyscf.pbc.dft.numint.eval_ao(cell, coords, kpt)
vjR = get_vjR_(cell, dm_kpts, aoR_kpts)
if kpt_band is not None:
aoR_kband = pyscf.pbc.dft.numint.eval_ao(cell, coords, kpt_band)
vj_kpts = cell.vol/ngs * np.dot(aoR_kband.T.conj(),
vjR.reshape(-1,1)*aoR_kband)
else:
vj_kpts = np.zeros((nkpts,nao,nao), np.complex128)
for k in range(nkpts):
vj_kpts[k,:,:] = cell.vol/ngs * np.dot(aoR_kpts[k,:,:].T.conj(),
vjR.reshape(-1,1)*aoR_kpts[k,:,:])
return vj_kpts
def get_jk(mf, cell, dm_kpts, kpts, kpt_band=None):
'''Get the Coulomb (J) and exchange (K) AO matrices at sampled k-points.
Args:
dm_kpts : (nkpts, nao, nao) ndarray
Density matrix at each k-point
kpts : (nkpts, 3) ndarray
Kwargs:
kpt_band : (3,) ndarray
An arbitrary "band" k-point at which to evalute the matrix.
Returns:
vj : (nkpts, nao, nao) ndarray
vk : (nkpts, nao, nao) ndarray
'''
coords = pyscf.pbc.dft.gen_grid.gen_uniform_grids(cell)
nkpts = len(kpts)
ngs = len(coords)
nao = cell.nao_nr()
aoR_kpts = np.zeros((nkpts,ngs,nao), np.complex128)
for k in range(nkpts):
kpt = kpts[k,:]
aoR_kpts[k,:,:] = pyscf.pbc.dft.numint.eval_ao(cell, coords, kpt)
vjR = get_vjR_(cell, dm_kpts, aoR_kpts)
if kpt_band is not None:
aoR_kband = pyscf.pbc.dft.numint.eval_ao(cell, coords, kpt_band)
vj_kpts = cell.vol/ngs * np.dot(aoR_kband.T.conj(),
vjR.reshape(-1,1)*aoR_kband)
vk_kpts = np.zeros((nao,nao), np.complex128)
for k2 in range(nkpts):
kpt2 = kpts[k2,:]
vkR_k1k2 = pbchf.get_vkR_(mf, cell, aoR_kband, aoR_kpts[k2,:,:],
kpt_band, kpt2)
aoR_dm_k2 = np.dot(aoR_kpts[k2,:,:], dm_kpts[k2,:,:])
tmp_Rq = np.einsum('Rqs,Rs->Rq', vkR_k1k2, aoR_dm_k2)
vk_kpts += 1./nkpts * (cell.vol/ngs) \
* np.dot(aoR_kband.T.conj(), tmp_Rq)
#vk_kpts += 1./nkpts * (cell.vol/ngs) * np.einsum('rs,Rp,Rqs,Rr->pq',
# dm_kpts[k2,:,:], aoR_kband.conj(),
# vkR_k1k2, aoR_kpts[k2,:,:])
else:
vj_kpts = np.zeros((nkpts,nao,nao), np.complex128)
for k in range(nkpts):
vj_kpts[k,:,:] = cell.vol/ngs * np.dot(aoR_kpts[k,:,:].T.conj(),
vjR.reshape(-1,1)*aoR_kpts[k,:,:])
aoR_dm_kpts = np.zeros((nkpts,ngs,nao), np.complex128)
for k in range(nkpts):
aoR_dm_kpts[k,:,:] = np.dot(aoR_kpts[k,:,:], dm_kpts[k,:,:])
vk_kpts = np.zeros((nkpts,nao,nao), np.complex128)
for k1 in range(nkpts):
kpt1 = kpts[k1,:]
for k2 in range(nkpts):
kpt2 = kpts[k2,:]
vkR_k1k2 = pbchf.get_vkR_(mf, cell, aoR_kpts[k1,:,:], aoR_kpts[k2,:,:],
kpt1, kpt2)
tmp_Rq = np.einsum('Rqs,Rs->Rq', vkR_k1k2, aoR_dm_kpts[k2,:,:])
vk_kpts[k1,:,:] += 1./nkpts * (cell.vol/ngs) \
* np.dot(aoR_kpts[k1,:,:].T.conj(), tmp_Rq)
#vk_kpts[k1,:,:] += 1./nkpts * (cell.vol/ngs) * np.einsum('rs,Rp,Rqs,Rr->pq',
# dm_kpts[k2,:,:], aoR_kpts[k1,:,:].conj(),
# vkR_k1k2, aoR_kpts[k2,:,:])
return vj_kpts, vk_kpts
def get_vjR_(cell, dm_kpts, aoR_kpts):
'''Get the real-space Hartree potential of the k-point sampled density matrix.
Returns:
vR : (ngs,) ndarray
The real-space Hartree potential at every grid point.
'''
nkpts, ngs, nao = aoR_kpts.shape
coulG = tools.get_coulG(cell)
rhoR = np.zeros(ngs)
for k in range(nkpts):
rhoR += 1./nkpts*pyscf.pbc.dft.numint.eval_rho(cell, aoR_kpts[k,:,:], dm_kpts[k,:,:])
rhoG = tools.fft(rhoR, cell.gs)
vG = coulG*rhoG
vR = tools.ifft(vG, cell.gs)
return vR
def get_fock_(mf, h1e_kpts, s1e_kpts, vhf_kpts, dm_kpts, cycle=-1, adiis=None,
diis_start_cycle=0, level_shift_factor=0, damp_factor=0):
'''Get the Fock matrices at sampled k-points.
This is a k-point version of pyscf.scf.hf.get_fock_
Returns:
fock : (nkpts, nao, nao) ndarray
'''
fock = np.zeros_like(h1e_kpts)
# By inheritance, this is just pyscf.scf.hf.get_fock_
fock = pbchf.RHF.get_fock_(mf, h1e_kpts, s1e_kpts,
vhf_kpts, dm_kpts,
cycle, adiis, diis_start_cycle,
level_shift_factor, damp_factor)
return fock
def make_rdm1(mo_coeff_kpts, mo_occ_kpts):
nkpts = len(mo_occ_kpts)
dm_kpts = np.zeros_like(mo_coeff_kpts)
for k in range(nkpts):
dm_kpts[k,:,:] = pyscf.scf.hf.make_rdm1(mo_coeff_kpts[k,:,:],
mo_occ_kpts[k,:]).T.conj()
return dm_kpts
#FIXME: project initial guess for k-point
def init_guess_by_chkfile(cell, chkfile_name, project=True):
'''Read the KHF results from checkpoint file, then project it to the
basis defined by ``cell``
Returns:
Density matrix, 3D ndarray
'''
#from pyscf.pbc.scf import addons
mo = pyscf.pbc.scf.chkfile.load(chkfile_name, 'scf/mo_coeff')
mo_occ = pyscf.pbc.scf.chkfile.load(chkfile_name, 'scf/mo_occ')
#def fproj(mo):
# if project:
# return addons.project_mo_nr2nr(chk_cell, mo, cell)
# else:
# return mo
dm = make_rdm1(mo, mo_occ)
return dm
class KRHF(pbchf.RHF):
'''RHF class with k-point sampling.
Compared to molecular SCF, some members such as mo_coeff, mo_occ
now have an additional first dimension for the k-points,
e.g. mo_coeff is (nkpts, nao, nao) ndarray
Attributes:
kpts : (nks,3) ndarray
The sampling k-points in Cartesian coordinates, in units of 1/Bohr.
'''
def __init__(self, cell, kpts, exxdiv='ewald'):
pbchf.RHF.__init__(self, cell, kpts, exxdiv=exxdiv)
self.kpts = kpts
self.mo_occ = []
self.mo_coeff_kpts = []
if cell.ke_cutoff is not None:
raise RuntimeError("ke_cutoff not supported with K pts yet")
self.exx_built = False
if self.exxdiv == 'vcut_ws':
self.precompute_exx()
def dump_flags(self):
pbchf.RHF.dump_flags(self)
if self.exxdiv == 'vcut_ws':
if self.exx_built is False:
self.precompute_exx()
logger.info(self, 'WS alpha = %s', self.exx_alpha)
def precompute_exx(self):
print "# Precomputing Wigner-Seitz EXX kernel"
from pyscf.pbc import gto as pbcgto
Nk = tools.get_monkhorst_pack_size(self.cell, self.kpts)
print "# Nk =", Nk
kcell = pbcgto.Cell()
kcell.atom = 'H 0. 0. 0.'
kcell.spin = 1
kcell.unit = 'B'
kcell.h = self.cell._h * Nk
Lc = 1.0/np.linalg.norm(np.linalg.inv(kcell.h.T), axis=0)
print "# Lc =", Lc
Rin = Lc.min() / 2.0
print "# Rin =", Rin
# ASE:
alpha = 5./Rin # sqrt(-ln eps) / Rc, eps ~ 10^{-11}
kcell.gs = np.array([2*int(L*alpha*3.0) for L in Lc])
# QE:
#alpha = 3./Rin * np.sqrt(0.5)
#kcell.gs = (4*alpha*np.linalg.norm(kcell.h,axis=0)).astype(int)
print "# kcell.gs FFT =", kcell.gs
kcell.build(False,False)
vR = tools.ifft( tools.get_coulG(kcell), kcell.gs )
kngs = len(vR)
print "# kcell kngs =", kngs
rs = pyscf.pbc.dft.gen_grid.gen_uniform_grids(kcell)
corners = np.dot(np.indices((2,2,2)).reshape((3,8)).T, kcell._h.T)
for i, rv in enumerate(rs):
# Minimum image convention to corners of kcell parallelepiped
r = np.linalg.norm(rv-corners, axis=1).min()
if np.isclose(r, 0.):
vR[i] = 2*alpha / np.sqrt(np.pi)
else:
vR[i] = scipy.special.erf(alpha*r) / r
vG = (kcell.vol/kngs) * tools.fft(vR, kcell.gs)
self.exx_alpha = alpha
self.exx_kcell = kcell
self.exx_q = kcell.Gv
self.exx_vq = vG
self.exx_built = True
print "# Finished precomputing"
def get_init_guess(self, cell=None, key='minao'):
if cell is None: cell = self.cell
if key.lower() == '1e':
return self.init_guess_by_1e(cell)
elif key.lower() == 'chkfile':
return self.init_guess_by_chkfile()
else:
dm = pyscf.scf.hf.get_init_guess(cell, key)
nao = cell.nao_nr()
nkpts = len(self.kpts)
dm_kpts = np.zeros((nkpts,nao,nao), np.complex128)
# Use the molecular "unit cell" dm for each k-point
for k in range(nkpts):
dm_kpts[k,:,:] = dm
return dm_kpts
def get_hcore(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_hcore(self, cell, kpts)
def get_ovlp(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_ovlp(self, cell, kpts)
def get_j(self, cell=None, dm_kpts=None, hermi=1, kpt=None, kpt_band=None):
# Must use 'kpt' kwarg
if cell is None: cell = self.cell
if kpt is None: kpt = self.kpts
kpts = kpt
if dm_kpts is None: dm_kpts = self.make_rdm1()
cpu0 = (time.clock(), time.time())
vj = get_j(self, cell, dm_kpts, kpts, kpt_band)
logger.timer(self, 'vj', *cpu0)
return vj
def get_jk(self, cell=None, dm_kpts=None, hermi=1, kpt=None, kpt_band=None):
# Must use 'kpt' kwarg
if cell is None: cell = self.cell
if kpt is None: kpt = self.kpts
kpts = kpt
if dm_kpts is None: dm_kpts = self.make_rdm1()
cpu0 = (time.clock(), time.time())
vj, vk = get_jk(self, cell, dm_kpts, kpts, kpt_band)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_fock_(self, h1e_kpts, s1e, vhf, dm_kpts, cycle=-1, adiis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
if diis_start_cycle is None:
diis_start_cycle = self.diis_start_cycle
if level_shift_factor is None:
level_shift_factor = self.level_shift
if damp_factor is None:
damp_factor = self.damp
return get_fock_(self, h1e_kpts, s1e, vhf, dm_kpts, cycle, adiis,
diis_start_cycle, level_shift_factor, damp_factor)
def get_veff(self, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpt_band=None):
'''Hartree-Fock potential matrix for the given density matrix.
See :func:`scf.hf.get_veff` and :func:`scf.hf.RHF.get_veff`
'''
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if kpts is None: kpts = self.kpts
# TODO: Check incore, direct_scf, _eri's, etc
vj, vk = self.get_jk(cell, dm, hermi, kpts, kpt_band)
return vj - vk * .5
def get_grad(self, mo_coeff_kpts, mo_occ_kpts, fock=None):
'''
returns 1D array of gradients, like non K-pt version
note that occ and virt indices of different k pts now occur
in sequential patches of the 1D array
'''
if fock is None:
dm1 = self.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = self.get_hcore(self.cell, self.kpts) + self.get_veff(self.cell, dm1)
nkpts = len(self.kpts)
# make this closer to the non-kpt one
grad_kpts = np.empty(0,)
for k in range(nkpts):
grad = pyscf.scf.hf.RHF.get_grad(self,
mo_coeff_kpts[k,:,:], mo_occ_kpts[k,:], fock[k,:,:])
grad_kpts = np.hstack((grad_kpts, grad))
return grad_kpts
def eig(self, h_kpts, s_kpts):
nkpts = len(h_kpts)
nao = h_kpts.shape[1]
eig_kpts = np.zeros((nkpts,nao))
mo_coeff_kpts = np.zeros_like(h_kpts)
# TODO: should use superclass eig fn here?
for k in range(nkpts):
eig_kpts[k,:], mo_coeff_kpts[k,:,:] = pyscf.scf.hf.eig(h_kpts[k,:,:], s_kpts[k,:,:])
return eig_kpts, mo_coeff_kpts
def get_occ(self, mo_energy_kpts, mo_coeff_kpts):
'''Label the occupancies for each orbital for sampled k-points.
This is a k-point version of scf.hf.SCF.get_occ
'''
if mo_energy_kpts is None: mo_energy_kpts = self.mo_energy
mo_occ_kpts = np.zeros_like(mo_energy_kpts)
nkpts, nao = mo_coeff_kpts.shape[:2]
nocc = (self.cell.nelectron * nkpts) // 2
# Sort eigs in each kpt
mo_energy = np.reshape(mo_energy_kpts, [nkpts*nao])
# TODO: store mo_coeff correctly (for later analysis)
#self.mo_coeff = np.reshape(mo_coeff_kpts, [nao, nao*nkpts])
mo_idx = np.argsort(mo_energy)
mo_energy = mo_energy[mo_idx]
for ix in mo_idx[:nocc]:
k, ikx = divmod(ix, nao)
# TODO: implement Fermi smearing
mo_occ_kpts[k, ikx] = 2
if nocc < mo_energy.size:
logger.info(self, 'HOMO = %.12g LUMO = %.12g',
mo_energy[nocc-1], mo_energy[nocc])
if mo_energy[nocc-1]+1e-3 > mo_energy[nocc]:
logger.warn(self, '!! HOMO %.12g == LUMO %.12g',
mo_energy[nocc-1], mo_energy[nocc])
else:
logger.info(self, 'HOMO = %.12g', mo_energy[nocc-1])
if self.verbose >= logger.DEBUG:
np.set_printoptions(threshold=len(mo_energy))
logger.debug(self, ' mo_energy = %s', mo_energy)
np.set_printoptions()
self.mo_energy = mo_energy_kpts
self.mo_occ = mo_occ_kpts
return mo_occ_kpts
def make_rdm1(self, mo_coeff_kpts=None, mo_occ_kpts=None):
'''One particle density matrix at each k-point.
Returns:
dm_kpts : (nkpts, nao, nao) ndarray
'''
if mo_coeff_kpts is None:
# Note: this is actually "self.mo_coeff_kpts"
# which is stored in self.mo_coeff of the scf.hf.RHF superclass
mo_coeff_kpts = self.mo_coeff
if mo_occ_kpts is None:
# Note: this is actually "self.mo_occ_kpts"
# which is stored in self.mo_occ of the scf.hf.RHF superclass
mo_occ_kpts = self.mo_occ
return make_rdm1(mo_coeff_kpts, mo_occ_kpts)
def energy_elec(self, dm_kpts=None, h1e_kpts=None, vhf_kpts=None):
'''Following pyscf.scf.hf.energy_elec()
'''
if dm_kpts is None: dm_kpts = self.make_rdm1()
if h1e_kpts is None: h1e_kpts = self.get_hcore()
if vhf_kpts is None: vhf_kpts = self.get_veff(self.cell, dm_kpts)
nkpts = len(dm_kpts)
e1 = e_coul = 0.
for k in range(nkpts):
e1 += 1./nkpts * np.einsum('ij,ji', dm_kpts[k,:,:], h1e_kpts[k,:,:])
e_coul += 1./nkpts * 0.5 * np.einsum('ij,ji', dm_kpts[k,:,:], vhf_kpts[k,:,:])
if abs(e_coul.imag > 1.e-12):
raise RuntimeError("Coulomb energy has imaginary part, "
"something is wrong!", e_coul.imag)
e1 = e1.real
e_coul = e_coul.real
logger.debug(self, 'E_coul = %.15g', e_coul)
return e1+e_coul, e_coul
def get_bands(self, kpt_band, cell=None, dm_kpts=None, kpts=None):
'''Get energy bands at a given (arbitrary) 'band' k-point.
Returns:
mo_energy : (nao,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nao) ndarray
Band orbitals psi_n(k)
'''
if cell is None: cell = self.cell
if dm_kpts is None: dm_kpts = self.make_rdm1()
if kpts is None: kpts = self.kpts
fock = pbchf.get_hcore(cell, kpt_band) \
+ self.get_veff(kpts=kpts, kpt_band=kpt_band)
s1e = pbchf.get_ovlp(cell, kpt_band)
mo_energy, mo_coeff = pyscf.scf.hf.eig(fock, s1e)
return mo_energy, mo_coeff
def init_guess_by_chkfile(self, chk=None, project=True):
if chk is None: chk = self.chkfile
return init_guess_by_chkfile(self.cell, chk, project)
|
Python
|
CL
|
b6694c456692d24c557c16159839b2e84649d0c6e2cd98f291c128eb3e74d5f3
|
class ResXWriter:
def __init__(self, filename):
self.fileName = filename
self.file = open(filename, 'w', encoding='utf-8')
self.data = {}
def add_resource(self, name, value):
self.data[name] = value
def flush(self):
self.file.truncate(0);
self.file.seek(0);
self.file.writelines(self.__startRoot);
for name, value in self.data.items():
self.file.writelines(f' <data name="{name}" xml:space="preserve">\n')
self.file.writelines(f' <value>{value}</value>\n')
self.file.writelines(f' </data>\n')
self.file.writelines(self.__endRoot)
def close(self):
self.flush()
self.file.close()
__startRoot = '''<?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
'''
__endRoot = '</root>'
|
Python
|
CL
|
ebc768ae37a488f07b55a30536e72ad585f7bcedcad2542f3133f8d833a2adc9
|
import mge_fit_1d as mge
import numpy as np
import jam_axi_rms as Jrms
import matplotlib.pyplot as plt
import os, sys, time
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from scipy.misc import imread
import matplotlib.cbook as cbook
from galaxyParametersDictionary_v9 import *
from Sabine_Define import *
from JAM_Analysis_def import *
def gNFW(radius, rho_s, R_s, gamma): # From Cappellari+13
r_tmp = radius/R_s
return rho_s*(r_tmp**gamma)*(0.5+0.5*r_tmp)**(-gamma-3)
def gNFW_RelocatedDensity(x, radius, rho_x, R_s, gamma): # From Cappellari+13
return rho_x * (radius/x)**gamma * ((R_s+radius)/(R_s+x))**(-gamma-3)
def powerLaw(radius, rho_s, R_s, gamma, outer): # here we use a broken power law.
'''
I have realised that the initial power law parametrisation wasn't doing what I thoguht it was
therefore I will change the parametrisation to be the same as the gNFW, in the same manner was
was done by Poci+17
'''
x = R_s/20.
return rho_s * (radius/x)**gamma * ((R_s+radius)/(R_s+x))**(-gamma+outer)
def CheckBoundaryConditions(Parameters, ParameterBoundaries):
ParameterNumber = len(Parameters)
# separate the boundaries array into a lower and an upper array
# first selecting the lower bounds, which are indices 0, 2, 4...
Indices_lower = np.arange(0, 2*ParameterNumber, 2)
# then selecting the lower bounds, which are indices 1, 3, 5...
Indices_upper = np.arange(1, 2*ParameterNumber, 2)
ParameterBoundaries_lower = ParameterBoundaries[Indices_lower]
ParameterBoundaries_upper = ParameterBoundaries[Indices_upper]
Check = True
for ii in range(ParameterNumber):
if not ((Parameters[ii] <= ParameterBoundaries_upper[ii]) & (Parameters[ii] >= ParameterBoundaries_lower[ii])):
Check = False
return Check
def lnprior(theta, *args):
tmpInputArgs = args[0]
if CheckBoundaryConditions(np.array(theta), np.array(tmpInputArgs)):
return 0.
else:
return -np.inf
def lnprob(theta, *args):
boundaries, ParameterNames, Data, model, mbh, distance, FixedStellarMass, logStellarMass, reff, filename, surf_lum, sigma_lum, qobs_lum = args
lp = lnprior(theta, boundaries)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, ParameterNames, Data, model, mbh, distance, FixedStellarMass, logStellarMass, reff, filename, surf_lum, sigma_lum, qobs_lum) #In logarithmic space, the multiplication becomes sum.
def lnlike(theta, ParameterNames, Data, model, mbh, distance, FixedStellarMass, logStellarMass, reff, filename, surf_lum, sigma_lum, qobs_lum):
ParameterNames = np.array(ParameterNames)
if 'Inclination' in ParameterNames:
inc = np.array(theta)[np.where(ParameterNames == 'Inclination')]
else:
raise ValueError('Inclination not set as a free parameter. ')
if 'Beta' in ParameterNames:
beta = np.array(theta)[np.where(ParameterNames == 'Beta')]
else:
raise ValueError('Beta not set as a free parameter. ')
if 'ScaleDensity' in ParameterNames:
log_rho_s = np.array(theta)[np.where(ParameterNames == 'ScaleDensity')]
else:
raise ValueError('Scale density not set as a free parameter. ')
if 'Gamma' in ParameterNames:
gamma = np.array(theta)[np.where(ParameterNames == 'Gamma')]
else:
raise ValueError('Gamma not set as a free parameter. ')
if 'ML' in ParameterNames:
ml = np.array(theta)[np.where(ParameterNames == 'ML')]
elif FixedStellarMass:
# calculate here what the M/L should be in order for the total luminous MGE to account for
# all measured stellar mass.
sigma_lum_pc = sigma_lum * (distance * 1e6) / 206265 # since the input sigma_lum is in arcseconds.
StellarMassMGE = MGE_mass_total(surf_lum, sigma_lum_pc, qobs_lum, 1000*reff, radian(inc), 1)
ml = 10**logStellarMass / StellarMassMGE
else:
ml = 1
if 'ScaleRadius' in ParameterNames:
R_S = np.array(theta)[np.where(ParameterNames == 'ScaleRadius')]
else:
R_S = 20000
if len(Data) == 4:
if 'AtlasWeight' in ParameterNames:
raise ValueError("ATLAS^3D hyperparameter set, but ATLAS^3D data not provided.")
else:
xbin1, ybin1, rms1, erms1 = Data
DatasetNumber = 1
elif len(Data) == 8:
if not 'AtlasWeight' in ParameterNames:
raise ValueError("ATLAS^3D hyperparameter not not provided.")
if not 'SluggsWeight' in ParameterNames:
raise ValueError("SLUGGS hyperparameter not not provided.")
sluggsWeight = np.array(theta)[np.where(ParameterNames == 'SluggsWeight')]
atlasWeight = np.array(theta)[np.where(ParameterNames == 'AtlasWeight')]
xbin1, ybin1, rms1, erms1, xbin2, ybin2, rms2, erms2 = Data
DatasetNumber = 2
elif len(Data) == 12:
if not 'AtlasWeight' in ParameterNames:
raise ValueError("ATLAS^3D hyperparameter not not provided.")
if not 'SluggsWeight' in ParameterNames:
raise ValueError("SLUGGS hyperparameter not not provided.")
if not 'GCWeight' in ParameterNames:
raise ValueError("GC hyperparameter not not provided.")
sluggsWeight = np.array(theta)[np.where(ParameterNames == 'SluggsWeight')]
atlasWeight = np.array(theta)[np.where(ParameterNames == 'AtlasWeight')]
gcWeight = np.array(theta)[np.where(ParameterNames == 'GCWeight')]
xbin1, ybin1, rms1, erms1, xbin2, ybin2, rms2, erms2, xbin3, ybin3, rms3, erms3 = Data
DatasetNumber = 3
beta_array = np.ones(len(surf_lum))*beta
n_MGE = 300. # logarithmically spaced radii in parsec
x_MGE = np.logspace(np.log10(1), np.log10(30000), n_MGE) # the units of the density are now mass/pc^2
if model == 'gNFW':
y_MGE = gNFW_RelocatedDensity(1000, x_MGE, 10.**log_rho_s, R_S, gamma) # measured at 1 kpc (1000 pc)
p = mge.mge_fit_1d(x_MGE, y_MGE, ngauss=10, quiet=True)
surf_dm = p.sol[0] # here implementing a potential
sigma_dm = p.sol[1] * 206265 / (distance * 1e6) # configuration which includes the
qobs_dm = np.ones(len(surf_dm)) # stellar mass and also a dark matter
# halo in the form of a gNFW distribution.
surf_pot = np.append(ml * surf_lum, surf_dm) # I'm going to need to be careful here to account for the stellar M/L,
sigma_pot = np.append(sigma_lum, sigma_dm) # since the scaling between the two mass distributions needs to be right.
qobs_pot = np.append(qobs_lum, qobs_dm) #
if DatasetNumber == 1:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2 = np.sum(np.log(2*np.pi*erms1**2) + ((rms1 - rmsModel)/erms1)**2.)
except:
realChi2 = np.inf
elif DatasetNumber == 2:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_sluggs = np.sum(np.log(2*np.pi*erms1**2/sluggsWeight) + (sluggsWeight*(rms1 - rmsModel)/erms1)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin2, ybin2, filename,
rms=rms2, erms=erms2,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_atlas = np.sum(np.log(2*np.pi*erms2**2/atlasWeight) + (atlasWeight*(rms2 - rmsModel)/erms2)**2.)
realChi2 = realChi2_sluggs + realChi2_atlas
except:
realChi2 = np.inf
elif DatasetNumber == 3:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_sluggs = np.sum(np.log(2*np.pi*erms1**2/sluggsWeight) + (sluggsWeight*(rms1 - rmsModel)/erms1)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin2, ybin2, filename,
rms=rms2, erms=erms2,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_atlas = np.sum(np.log(2*np.pi*erms2**2/atlasWeight) + (atlasWeight*(rms2 - rmsModel)/erms2)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin3, ybin3, filename,
rms=rms3, erms=erms3,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_gc = np.sum(np.log(2*np.pi*erms3**2/gcWeight) + (gcWeight*(rms3 - rmsModel)/erms3)**2.)
realChi2 = realChi2_sluggs + realChi2_atlas + realChi2_gc
except:
realChi2 = np.inf
elif model == 'powerLaw':
y_MGE = powerLaw(x_MGE, 10.**log_rho_s, R_S, gamma, -3)
p = mge.mge_fit_1d(x_MGE, y_MGE, ngauss=10, quiet=True)
surf_pot = p.sol[0]
sigma_pot = p.sol[1] * 206265 / (distance * 1e6) # converting the dispersions into arcseconds because it wants only
qobs_pot = np.ones(len(surf_pot)) # the radial dimension in these units.
if DatasetNumber == 1:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename, ml,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2 = np.sum(np.log(2*np.pi*erms1**2) + ((rms1 - rmsModel)/erms1)**2.)
except:
realChi2 = np.inf
elif DatasetNumber == 2:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename, ml,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_sluggs = np.sum(np.log(2*np.pi*erms1**2/sluggsWeight) + (sluggsWeight*(rms1 - rmsModel)/erms1)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin2, ybin2, filename, ml,
rms=rms2, erms=erms2,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_atlas = np.sum(np.log(2*np.pi*erms2**2/atlasWeight) + (atlasWeight*(rms2 - rmsModel)/erms2)**2.)
realChi2 = realChi2_sluggs + realChi2_atlas
except:
realChi2 = np.inf
elif DatasetNumber == 3:
try:
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin1, ybin1, filename, ml,
rms=rms1, erms=erms1,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_sluggs = np.sum(np.log(2*np.pi*erms1**2/sluggsWeight) + (sluggsWeight*(rms1 - rmsModel)/erms1)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin2, ybin2, filename, ml,
rms=rms2, erms=erms2,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_atlas = np.sum(np.log(2*np.pi*erms2**2/atlasWeight) + (atlasWeight*(rms2 - rmsModel)/erms2)**2.)
rmsModel, ml, chi2, flux = Jrms.jam_axi_rms(
surf_lum, sigma_lum, qobs_lum, surf_pot,
sigma_pot, qobs_pot,
inc, mbh, distance,
xbin3, ybin3, filename, ml,
rms=rms3, erms=erms3,
plot=False, beta=beta_array,
tensor='zz', quiet=True)
realChi2_gc = np.sum(np.log(2*np.pi*erms3**2/gcWeight) + (gcWeight*(rms3 - rmsModel)/erms3)**2.)
realChi2 = realChi2_sluggs + realChi2_atlas + realChi2_gc
except:
realChi2 = np.inf
else:
raise ValueError('Model is unrecognisable. Should be either "gNFW" or "powerLaw"')
return -realChi2/2
def InitialWalkerPosition(WalkerNumber, LowerBounds, UpperBounds, PriorType):
# wiriting up a code that, for an arbitrary number of parameters and prior bounds, will set up the initial walker positions.
InitialPosition = []
for ii in np.arange(WalkerNumber):
WalkerPosition = []
for jj in range(len(LowerBounds)):
if PriorType[jj] == 'uniform':
WalkerPosition.append(np.random.uniform(low=LowerBounds[jj], high=UpperBounds[jj]) )
elif PriorType[jj] == 'exponential':
ExpInitPosition = np.random.uniform(low=np.exp(-UpperBounds[jj]), high=np.exp(LowerBounds[jj]))
WalkerPosition.append(-np.log(ExpInitPosition))
InitialPosition.append(WalkerPosition)
Boundaries = []
for ii in range(len(LowerBounds)):
Boundaries.append(LowerBounds[ii])
Boundaries.append(UpperBounds[ii])
return InitialPosition, Boundaries
def mainCall_modular(GalName, Input_path, JAM_path, Model = 'gNFW', SLUGGS = True, ATLAS = True, GC = False,\
Inclination = True, Beta = True, Gamma = True, ScaleDensity = True, ML = False, ScaleRadius = False, \
SluggsWeight = False, AtlasWeight = False, GCWeight = False, FixedStellarMass = False, \
nwalkers = 2000, burnSteps = 1000, stepNumber = 4000):
MGE = MGE_Source[GalName]
'''
Loading the MGE parameters for each galaxy.
MGE source is determined in the JAM_Dictionary.py file, under the MGE_Source array
'''
surf_lum, sigma_lum, qobs_lum = MGEfinder(MGE, GalName, units = 'original') # specify whether the units of the MGE are in arcsec,
# kpc, or in the original units.
sigmapsf = 0.6
pixsize = 0.8
filename=JAM_path+'JAM/Output/'+str(GalName)+'/JAM_'+str(GalName)
try:
mbh = BlackHoleMass[GalName]
except:
mbh = 0
print 'No black hole mass specified for', GalName
distance = distMpc[GalName]
reff = Reff_Spitzer[GalName] * (distance * 1e6) / 206265 # defining the effective radius in pc.
logStellarMass = M_star_Spitzer[GalName]
# R_S = 20000 # leaving the input scale radius in parsec.
# print 'Break radius (arcseconds):', R_S
t0 = time.time()
'''
Calling in the input file as determined by JAM_InputGenerator.py in the ../InputFiles folder.
This method of calling in the files doesn't affect the cleaning of the data at all.
All the cleaning is done in the previously mentioned python code.
'''
Data = []
if SLUGGS:
input_filename_sluggs = JAM_path+'/'+Input_path+'/'+str(GalName)+'/JAM_Input_'+GalName+'_SLUGGS.txt'
xbin_sluggs, ybin_sluggs, rms_sluggs, erms_sluggs = np.loadtxt(input_filename_sluggs, unpack = True, usecols = [0, 1, 2, 3], comments = '#')
Data.append(xbin_sluggs)
Data.append(ybin_sluggs)
Data.append(rms_sluggs)
Data.append(erms_sluggs)
if ATLAS:
input_filename_atlas = JAM_path+'/'+Input_path+'/'+str(GalName)+'/JAM_Input_'+GalName+'_ATLAS3D.txt'
xbin_atlas, ybin_atlas, rms_atlas, erms_atlas = np.loadtxt(input_filename_atlas, unpack = True, usecols = [0, 1, 2, 3], comments = '#')
Data.append(xbin_atlas)
Data.append(ybin_atlas)
Data.append(rms_atlas)
Data.append(erms_atlas)
if GC:
input_filename_gc = JAM_path+'/'+Input_path+'/'+str(GalName)+'/JAM_Input_'+GalName+'_GC.txt'
xbin_gc, ybin_gc, rms_gc, erms_gc = np.loadtxt(input_filename_gc, unpack = True, usecols = [0, 1, 2, 3], comments = '#')
Data.append(xbin_gc)
Data.append(ybin_gc)
Data.append(rms_gc)
Data.append(erms_gc)
min_inclination = degree(np.arccos(np.min(qobs_lum))) # using the MGE to calculate the minimum possible inclination
if FixedStellarMass:
if ML:
ML = False
print 'Free M/L has been set to false to fix stellar mass to observed stellar mass. '
ndim = 0
LowerBounds, UpperBounds, PriorType, ParameterNames, ParamSymbol = [], [], [], [], []
# selecting all possible free parameters, with their prior bounds:
if Inclination: # inclination
ndim += 1
LowerBounds.append(min_inclination)
UpperBounds.append(90)
PriorType.append('uniform')
ParameterNames.append('Inclination')
ParamSymbol.append(r"$i$")
if Beta: # inclination
ndim += 1
LowerBounds.append(-0.2)
UpperBounds.append(0.5)
PriorType.append('uniform')
ParameterNames.append('Beta')
ParamSymbol.append(r"$\beta$")
if Gamma: # inclination
ndim += 1
LowerBounds.append(-2.4)
UpperBounds.append(-1.5)
PriorType.append('uniform')
ParameterNames.append('Gamma')
ParamSymbol.append(r"$\gamma$")
if ScaleDensity: # inclination
ndim += 1
LowerBounds.append(np.log10(0.1))
UpperBounds.append(np.log10(100000))
PriorType.append('uniform')
ParameterNames.append('ScaleDensity')
ParamSymbol.append(r"$\rho_S$")
if ML: # inclination
ndim += 1
LowerBounds.append(1)
UpperBounds.append(5)
PriorType.append('uniform')
ParameterNames.append('ML')
ParamSymbol.append(r"$M/L$")
if ScaleRadius: # inclination
ndim += 1
LowerBounds.append(10000)
UpperBounds.append(40000)
PriorType.append('uniform')
ParameterNames.append('ScaleRadius')
ParamSymbol.append(r"$R_S$")
if SluggsWeight:
ndim += 1
LowerBounds.append(0)
UpperBounds.append(10)
PriorType.append('exponential')
ParameterNames.append('SluggsWeight')
ParamSymbol.append(r"$\omega_{\rm SLUGGS}$")
if AtlasWeight:
ndim += 1
LowerBounds.append(0)
UpperBounds.append(10)
PriorType.append('exponential')
ParameterNames.append('AtlasWeight')
ParamSymbol.append(r"$\omega_{\rm ATLAS}$")
if GCWeight:
ndim += 1
LowerBounds.append(0)
UpperBounds.append(10)
PriorType.append('exponential')
ParameterNames.append('GCWeight')
ParamSymbol.append(r"$\omega_{\rm GC}$")
pos, boundaries = InitialWalkerPosition(nwalkers, LowerBounds, UpperBounds, PriorType)
# Setup MCMC sampler
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(boundaries, ParameterNames, Data, Model, mbh, distance, FixedStellarMass, logStellarMass,
reff, filename, surf_lum, sigma_lum, qobs_lum),
threads=16) #Threads gives the number of processors to use
############ implementing a burn-in period ###########
pos_afterBurn, prob, state = sampler.run_mcmc(pos, burnSteps)
sampler.reset()
suffix = ''
if SLUGGS:
suffix = suffix + 'SLUGGS_'
if ATLAS:
suffix = suffix + 'ATLAS_'
if GC:
suffix = suffix + 'GC_'
if FixedStellarMass:
suffix = suffix + 'FixedM*_'
suffix = suffix + 'FreeParam-'+str(ndim)
if not os.path.exists(JAM_path+'/'+str(GalName)):
os.mkdir(JAM_path+'/'+str(GalName))
if not os.path.exists(JAM_path+'/'+str(GalName)+'/'+suffix):
os.mkdir(JAM_path+'/'+str(GalName)+'/'+suffix)
OutputFilename = JAM_path+'/'+str(GalName)+'/'+suffix+'/'+str(GalName)+'_MCMCOutput_'+suffix+'.dat'
stepsBetweenIterations = stepNumber / 100
iterationNumber = stepNumber / stepsBetweenIterations
import pickle
for iteration in range(iterationNumber):
pos_afterBurn, prob, state = sampler.run_mcmc(pos_afterBurn, stepsBetweenIterations) # uses the final position of the burn-in period as the starting point.
fileOut = open(OutputFilename, 'wb')
pickle.dump([sampler.chain, sampler.flatchain, sampler.lnprobability, sampler.flatlnprobability], fileOut)
fileOut.close()
print 'Number of steps completed:', (iteration+1)*stepsBetweenIterations, len(sampler.chain[0])
######################################################
t = time.time() - t0
Date=time.strftime('%Y-%m-%d')
Time = time.asctime().split()[3]
print '########################################'
print 'time elapsed:', float(t)/3600, 'hours'
print '########################################'
print 'Mean acceptance fraction: ', (np.mean(sampler.acceptance_fraction))
print 'output filename: ', OutputFilename
fileOut = open(OutputFilename, 'wb')
pickle.dump([sampler.chain, sampler.flatchain, sampler.lnprobability, sampler.flatlnprobability], fileOut)
fileOut.close()
return OutputFilename, ParamSymbol, ParameterNames, Data
|
Python
|
CL
|
c56b4153b43441db9d2ee6d8952b4e46bf92b891358ed15d1b0fe59571ad2625
|
import numpy as np
import matplotlib.pyplot as plt
def initialize_centroids(X, K):
"""
Randomly initialize K cluster centroids from the data
arguments:
X -- our data that we want to cluster
K -- number of clusters
return:
centroids -- initialized centroids
"""
idx = np.random.choice(X.shape[0], K, replace = False)
centroids = X[idx,:]
return centroids
def compute_distance(X, K_clusters):
"""
compute the distance between the examples of our data and the centroids of the clusters
arguments:
X -- our data
K_clusters -- centroids of the K clusters
return:
dis -- the distance
"""
dis = np.linalg.norm((X-K_clusters),2,axis=1)**2
return dis
def k_means (X, K):
"""
derive the K clusters from the data
arguments:
X -- our data
K -- number of clusters
return:
groups -- the labels (clusters) assigned to the examples in the data
K_clusters -- centers of clusters
"""
K_clusters = initialize_centroids(X, K)
m = X.shape[0]
dif = 1
while (dif > 10**(-7)): # we stop when the centroids almost don't move
groups = np.empty(m)
K_clusters_old = K_clusters
#cluster assignment step
for i in range(m):
groups[i] = np.argmin(compute_distance(X[i,:],K_clusters))
#centroids update step
for k in range(K):
K_clusters[k,:] = np.mean(X[groups==k,:],axis=0)
dif = np.linalg.norm(K_clusters-K_clusters_old, 2) / (np.linalg.norm(K_clusters, 2) + np.linalg.norm(K_clusters_old, 2))
return groups.astype(int), K_clusters
def compute_cost(X, groups, K_clusters):
"""
compute the cost function (also called distortion of the training examples) that we want to minimize.
It represents the average of the distances of every training example to its corresponding cluster centroid
arguments:
X -- the data
groups -- labels of clusters assignedto each example of the data
K_clusters -- centroids of the clusters
return:
cost -- the cost function to minimize
"""
m = X.shape[0]
dis = np.empty(m)
for i in range(m):
dis[i] = compute_distance(X[i,:].reshape(1,X.shape[1]), K_clusters[groups[i],:].reshape(1,X.shape[1]))
cost = (1/m)*np.sum(dis)
return cost
def k_means_iter(X, K, n_iter):
"""
run the k_means algorithm on many different random initialization and then we keep the clustering that gace the lowest cost.
arguments:
X -- our data
K -- number of clusters
n_iter -- number of iterations
return:
cluster_groups -- the labels (clusters) assigned to the examples in the data
cluster_centroids -- centers of clusters
"""
cost=[]
centroids_dict={}
for i in range (n_iter):
groups, K_clusters=k_means(X, K)
cost.append(compute_cost(X, groups, K_clusters))
centroids_dict['groups'+str(i)]=groups
centroids_dict['K_clusters'+str(i)]=K_clusters
opt_cost_index=cost.index(min(cost))
cluster_groups=centroids_dict['groups'+str(opt_cost_index)]
cluster_centroids=centroids_dict['K_clusters'+str(opt_cost_index)]
return cluster_groups,cluster_centroids
#def main():
#if __name__=="__main__":
# main()
|
Python
|
CL
|
ee7a61a4c0937c7d53a4fbe194f866b8661ced655f030abefd679e54072532dc
|
import tensorflow as tf
from tensorflow.python.keras.applications.vgg16 import preprocess_input as pre_process_VGG
from tensorflow.python.keras.applications.resnet50 import preprocess_input as pre_process_ResNet
from tensorflow.python.keras.applications.inception_v3 import preprocess_input as pre_process_Inception
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server", required=True, help="Running the code on the server or not (y/n)")
ap.add_argument("-m", "--model", required=True, help="Model used: 0 VGG, 1 ResNet, 2 Inception")
args = vars(ap.parse_args())
run_on_server = args["server"]
model_name = int(args["model"])
if run_on_server == "y":
test_folder = ["/mnt/Data/ltanzi/flippedCrossVal/Testing/TestA",
"/mnt/Data/ltanzi/flippedCrossVal/Testing/TestB",
"/mnt/Data/ltanzi/flippedCrossVal/Testing/TestUnbroken"]
score_folder = "/mnt/Data/ltanzi/flippedCrossVal/Test"
model_path = "/mnt/Data/ltanzi/FlippedModels/Inception/"
elif run_on_server == "n":
test_folder = ["/Users/leonardotanzi/Desktop/testEdgedA1A2A3/Testing/TestA1",
"/Users/leonardotanzi/Desktop/testEdgedA1A2A3/Testing/TestA2",
"/Users/leonardotanzi/Desktop/testEdgedA1A2A3/Testing/TestA3"]
model_path = "/Users/leonardotanzi/Desktop/testEdgedA1A2A3/"
score_folder = "/Users/leonardotanzi/Desktop/testEdgedA1A2A3/Test"
else:
raise ValueError("Incorrect 1st arg.")
classmode = "sparse"
image_size = 299 if model_name == 2 else 224
batch_size = 1
if model_name == 0:
preprocess_input = pre_process_VGG
elif model_name == 1:
preprocess_input = pre_process_ResNet
elif model_name == 2:
preprocess_input = pre_process_Inception
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
classes = ["A", "B", "Unbroken"]
dict_classes = {classes[0]: 0, classes[1]: 1, classes[2]: 2}
model = load_model(model_path + "Fold5_Flipped_lr00001-retrainAll-balanced-categorical-Inception-1569005609.model")
# Evaluate scores of the full test set
test_generator = data_generator.flow_from_directory(score_folder,
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode=classmode,
classes=classes
)
STEP_SIZE_TEST = test_generator.n // test_generator.batch_size
score = model.evaluate_generator(test_generator, steps=STEP_SIZE_TEST)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
for i, folder in enumerate(test_folder):
test_generator = data_generator.flow_from_directory(folder,
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode=classmode)
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
test_generator.reset()
pred = model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
predicted_class_indices = np.argmax(pred, axis=1)
labels = dict_classes
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
print(predictions)
x = 0
tot = 0
for j in predictions:
tot += 1
if j == classes[i]:
x += 1
print("{} classified correctly: {}%".format(classes[i], x*100/tot))
|
Python
|
CL
|
554fc8707cda1e9fec1a5541d0af8beab1f3fed2a6ebd01a51b0cd31c5370fce
|
from __future__ import print_function
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten, concatenate
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
import pandas as pd
from keras.layers.recurrent import LSTM
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
def retainAlpha(word):
for s in word:
if ~s.isalpha():
return False
return True
# Download the Stanford Glove word vectors,
# unzip and save it under <root>/data/glove.6B/
BASE_DIR = '../data'
GLOVE_DIR = BASE_DIR + '/glove.6B/'
MAX_SEQUENCE_LENGTH = 60
MAX_NB_WORDS = 50000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
NBR_OF_HDLNS = 5
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
train = pd.read_csv('Data/train.csv')
test = pd.read_csv('Data/test.csv')
train_labels = pd.to_numeric(train["is_duplicate"]).tolist()
train_question1 = [' '.join(text_to_word_sequence(str(question) if type(question) != 'str' else question))
for question in train["question1"].tolist()]
train_question2 = [' '.join(text_to_word_sequence(str(question) if type(question) != 'str' else question))
for question in train["question2"].tolist()]
test_question1 = [' '.join(text_to_word_sequence(str(question) if type(question) != 'str' else question))
for question in test["question1"].tolist()]
test_question2 = [' '.join(text_to_word_sequence(str(question) if type(question) != 'str' else question))
for question in test["question2"].tolist()]
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(train_question1 + train_question2)
train_q1_seq = tokenizer.texts_to_sequences(train_question1)
train_q2_seq = tokenizer.texts_to_sequences(train_question2)
test_q1_seq = tokenizer.texts_to_sequences(test_question1)
test_q2_seq = tokenizer.texts_to_sequences(test_question2)
# One RNN for each headline
merged_lstm_output = []
merged_input = []
merged_padded_data_train = []
merged_padded_data_test = []
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
print('Max index value in word_index = %s' % max(word_index.values()))
print('Min index value in word_index = %s' % min(word_index.values()))
num_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print("Embedding Matrix dimensions = " + str(embedding_matrix.shape[0]) + "," + str(embedding_matrix.shape[1]))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
padded_q1_train = pad_sequences(train_q1_seq, maxlen=MAX_SEQUENCE_LENGTH)
padded_q2_train = pad_sequences(train_q2_seq, maxlen=MAX_SEQUENCE_LENGTH)
padded_q1_test = pad_sequences(test_q1_seq, maxlen=MAX_SEQUENCE_LENGTH)
padded_q2_test = pad_sequences(test_q2_seq, maxlen=MAX_SEQUENCE_LENGTH)
# Combining the padded dataset for being used as inputs to separate input layers.
padded_data_train = [padded_q1_train, padded_q2_train]
padded_data_test = [padded_q1_test, padded_q2_test]
# Separate input layers for question1 and question2
sequence_input1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
sequence_input2 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
# Combining the input layers for model definition
seq_input = [sequence_input1, sequence_input2]
# Embedding layers
embedded_sequences1 = embedding_layer(sequence_input1)
embedded_sequences2 = embedding_layer(sequence_input2)
# Shared LSTM layer for each question
shared_lstm_layer = LSTM(64)
lstm_output1 = shared_lstm_layer(embedded_sequences1)
lstm_output2 = shared_lstm_layer(embedded_sequences2)
# Merging the LSTM outputs and concatenating them
# to be fed into the final logistic layer
merged_lstm_output = [lstm_output1, lstm_output2]
merged_vector = concatenate(merged_lstm_output, axis=-1)
# The final logistic output layer
predictions = Dense(1, activation='sigmoid')(merged_vector)
# Model definition
model = Model(inputs=seq_input, outputs=predictions)
# Compiling and training model
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(padded_data_train, train_labels, epochs=20)
# Save the model to disk
model_json = model.to_json()
with open("quora-shared.lstm.model.v2.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("quora-shared.lstm.v2.model.h5")
print("Saved model to disk")
# Saving the predictions on test data
pred_labels_test = model.predict(padded_data_test, verbose=1)
pred_series_test = pd.Series(pred_labels_test.flat)
test["is_duplicate"] = pred_series_test.values
test.to_csv("quora-test-results.csv", sep = '\t', index = False)
test_final = test["test_id", "is_duplicate"]
test_final.to_csv("final-results-v1.csv", index = False)
|
Python
|
CL
|
66866b6ac9ca85916daf530091d5cc13d1b4db8a0057ce71e0294240bc398509
|
"""
Sparse Blocks Network
Copyright (c) 2017, Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Benchmark submanifold top left mask performance.
#
# Usage:
# python benchmark_topleft.py --test [conv | res] --arch [resnet-50 | resnet-v2]
#
# Flags:
# --test: Which benchmark, a convolutional layer or a residual block.
# --arch: Which architecture, original ResNet-50 (high channel) or modified ResNet-v2 (low channel).
#
from __future__ import division, print_function
import torch
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
from argparse import ArgumentParser
from collections import namedtuple
from perf import run_dense, run_sparse, generate_top_left_mask
# Import from our benchmark folder.
from benchmark_configs import INPUT_SIZE_DICT, SPARSITY_LIST
from benchmark_utils import append_result, create_result, get_out_filename
N_RUN_CONV = 15
N_RUN_RES = 15
perf_result_fields = ['H', 'W', 'C', 'K', 'sparsity', 'dense_time', 'sparse_time', 'speedup']
PerfResult = namedtuple('PerfResult', perf_result_fields)
def main():
out_file = get_out_filename(prefix='submanifold_topleft_{}'.format(args.test))
print('Writing output to {}'.format(out_file))
create_result(out_file, perf_result_fields)
for sz in INPUT_SIZE_LIST:
for sparsity in SPARSITY_LIST:
if args.test == 'conv':
x = generate_top_left_mask([1, sz[3], sz[0], sz[1]], sparsity)
elif args.test == 'res':
x = generate_top_left_mask([1, sz[2], sz[0], sz[1]], sparsity)
img_tensor = torch.FloatTensor(x)
stream = torch.cuda.current_stream()
nchw = img_tensor.size()
if args.test == "conv":
n_run = N_RUN_CONV
res_block = False
else:
n_run = N_RUN_RES
res_block = True
dense_ms = run_dense(
img_tensor, sz[3], res_block=res_block, n_warmup=n_run, n_run=n_run)
sparse_ms = run_sparse(
img_tensor, sz[3], res_block=res_block, n_warmup=n_run, n_run=n_run)
result = PerfResult(
H=sz[0],
W=sz[1],
C=sz[2],
K=sz[3],
sparsity=sparsity,
dense_time=dense_ms,
sparse_time=sparse_ms,
speedup=dense_ms / sparse_ms)
append_result(out_file, result)
if __name__ == '__main__':
parser = ArgumentParser(
description='Submanifold convolution and resnet blocks benchmarking script')
parser.add_argument('--test', type=str, default='conv', choices=set(('conv', 'res')))
parser.add_argument(
'--arch', type=str, default='resnet-v2', choices=set(('resnet-50', 'resnet-v2')))
args = parser.parse_args()
print('Benchmarking with --test=%s --arch=%s' % (args.test, args.arch))
INPUT_SIZE_LIST = INPUT_SIZE_DICT[args.arch]
main()
|
Python
|
CL
|
1813b081419466e18d852c48cbd750e80671e17c2c81cbbeff33ea83a053ebbc
|
#---------------------------------------------
# Import necessary packages
#---------------------------------------------
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
#----------------------------------------------------
# Make a function that computes success rate with KNN
#----------------------------------------------------
def knn_rate(xtrain,ytrain,xtest,ytest,k):
"""This function takes in a training dataset of features,
a training dataset of labels, a testing dataset of features,
a testing dataset of labels, and a specified number of
neighbors. It will run KNN algorithms on the training set,
predict the testing set, compare the results with the true
labels, and return success rate."""
prediction = []
count = 0
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(xtrain,ytrain)
accu_rate = knn.score(xtest,ytest)
return accu_rate
#---------------------------------------------
# Spambase Data
#---------------------------------------------
# Import spambase.data
df = pd.read_csv("spambase.data", header = None)
# Split the features and the labels
X = df.iloc[:,0:57]
y = df.iloc[:,57]
# Split dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
# Reset the indices
X_train = X_train.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
# Run the knn_rate function on data using 1 to 25 neighbors
accu_list = [knn_rate(X_train,y_train,X_test,y_test,i) for i in range(1,26)]
# Make a plot
plt.close()
plt.rcParams["figure.figsize"] = [10, 8]
plt.plot(list(range(1,26)),accu_list,'-o')
plt.xticks(np.arange(0, 26, 1.0))
plt.title('Number of Neighbors vs. Success Rate for Spambase Dataset')
plt.xlabel('Number of Neighbors')
plt.ylabel('Success Rate')
plt.grid()
plt.savefig('knn_spam.png')
# Time the KNN algorithm
start = time.time()
knn_rate(X_train,y_train,X_test,y_test,1)
end = time.time()
print("It takes " + str(end - start) + " seconds to run KNN algorithm on spambase dataset with 1 neighbor.")
#---------------------------------------------
# Breast Cancer Data
#---------------------------------------------
# Import wdbc.data
df2 = pd.read_csv("wdbc.data", header = None)
# Split the features and the labels
X2 = df2.iloc[:,2:32]
y2 = df2.iloc[:,1]
# Split dataset into training and testing sets
X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y2, test_size=0.2, random_state=5)
# Reset the indices
X_train2 = X_train2.reset_index(drop=True)
X_test2 = X_test2.reset_index(drop=True)
y_train2 = y_train2.reset_index(drop=True)
y_test2 = y_test2.reset_index(drop=True)
# Run the knn_rate function on data using 1 to 25 neighbors
accu_list2 = [knn_rate(X_train2,y_train2,X_test2,y_test2,i) for i in range(1,26)]
# Make a plot
plt.close()
plt.rcParams["figure.figsize"] = [10, 8]
plt.plot(list(range(1,26)),accu_list2,'-o')
plt.xticks(np.arange(0, 26, 1.0))
plt.title('Number of Neighbors vs. Success Rate for Breast Cancer Dataset')
plt.xlabel('Number of Neighbors')
plt.ylabel('Success Rate')
plt.grid()
plt.savefig('knn_breast.png')
# Time the KNN algorithm
start = time.time()
knn_rate(X_train2,y_train2,X_test2,y_test2,11)
end = time.time()
print("It takes " + str(end - start) + " seconds to run KNN algorithm on breast cancer dataset with 11 neighbors.")
#-----------------------------------------------------
# Adult Dataset
#-----------------------------------------------------
# Import adult.data
df3 = pd.read_csv("adult.data", header = None)
# Convert the categorical variables to numerical
categorical_features = [1,3,5,6,7,8,9,13]
le = LabelEncoder()
for i in range(0,len(categorical_features )):
new = le.fit_transform(df3[categorical_features[i]])
df3[categorical_features[i]] = new
# Split the features and the labels
X3 = df3.iloc[:,0:14]
y3 = df3.iloc[:,14]
# Split dataset into training and testing sets
X_train3, X_test3, y_train3, y_test3 = train_test_split(X3, y3, test_size=0.2, random_state=5)
# Reset indices
X_train3 = X_train3.reset_index(drop=True)
X_test3 = X_test3.reset_index(drop=True)
y_train3 = y_train3.reset_index(drop=True)
y_test3 = y_test3.reset_index(drop=True)
# Run the knn_rate function on data using 1 to 25 neighbors
accu_list3 = [knn_rate(X_train3,y_train3,X_test3,y_test3,i) for i in range(1,26)]
# Make a plot
plt.close()
plt.rcParams["figure.figsize"] = [10, 8]
plt.plot(list(range(1,26)),accu_list3,'-o')
plt.xticks(np.arange(0, 26, 1.0))
plt.title('Number of Neighbors vs. Success Rate for Adult Dataset')
plt.xlabel('Number of Neighbors')
plt.ylabel('Success Rate')
plt.grid()
plt.savefig('knn_adult.png')
# Time the KNN algorithm
start = time.time()
knn_rate(X_train3,y_train3,X_test3,y_test3,22)
end = time.time()
print("It takes " + str(end - start) + " seconds to run KNN algorithm on adult dataset with 22 neighbors.")
#------------------------------------------------------------
# Madelon Dataset
#------------------------------------------------------------
# Read in features/labels training sets and features/labels testing set
x_train_m = pd.read_csv("madelon_train.data",header = None,sep = '\s+')
y_train_m = pd.read_csv("madelon_train.labels",header = None)
x_test_m = pd.read_csv("madelon_valid.data",header = None,sep = '\s+')
y_test_m = pd.read_csv("madelon_valid.labels",header = None)
# Run the knn_rate function on data using 1 to 25 neighbors
accu_list4 = [knn_rate(x_train_m,y_train_m,x_test_m,y_test_m,i) for i in range(1,26)]
# Make a plot
plt.close()
plt.rcParams["figure.figsize"] = [10, 8]
plt.plot(list(range(1,26)),accu_list4,'-o')
plt.xticks(np.arange(0, 26, 1.0))
plt.title('Number of Neighbors vs. Success Rate for Madelon Dataset')
plt.xlabel('Number of Neighbors')
plt.ylabel('Success Rate')
plt.grid()
plt.savefig('knn_madelon.png')
# Time the KNN algorithm
start = time.time()
knn_rate(X_train,y_train,X_test,y_test,21)
end = time.time()
print("It takes " + str(end - start) + " seconds to run KNN algorithm on madelon dataset with 21 neighbors.")
#---------------------------------------------
# Parkinson Data
#---------------------------------------------
# Import spambase.data
df_parkinson = pd.read_csv("pd_speech_features.csv")
# Split the features and the labels
X_p = df_parkinson.iloc[:,1:754]
y_p = df_parkinson.iloc[:,754]
# Split dataset into training and testing sets
X_train_p, X_test_p, y_train_p, y_test_p = train_test_split(X_p, y_p, test_size=0.2, random_state=5)
# Reset the indices
X_train_p = X_train_p.reset_index(drop=True)
X_test_p = X_test_p.reset_index(drop=True)
y_train_p = y_train_p.reset_index(drop=True)
y_test_p = y_test_p.reset_index(drop=True)
# Run the knn_rate function on data using 1 to 25 neighbors
accu_list5 = [knn_rate(X_train_p,y_train_p,X_test_p,y_test_p,i) for i in range(1,26)]
# Make a plot
plt.close()
plt.rcParams["figure.figsize"] = [10, 8]
plt.plot(list(range(1,26)),accu_list5,'-o')
plt.xticks(np.arange(0, 26, 1.0))
plt.title('Number of Neighbors vs. Success Rate for Parkinson Dataset')
plt.xlabel('Number of Neighbors')
plt.ylabel('Success Rate')
plt.grid()
plt.savefig('knn_parkinson.png')
# Time the KNN algorithm
start = time.time()
knn_rate(X_train_p,y_train_p,X_test_p,y_test_p,23)
end = time.time()
print("It takes " + str(end - start) + " seconds to run KNN algorithm on parkinson dataset with 23 neighbors.")
|
Python
|
CL
|
2c9d270e71fbad82f5036bc0c14cc9d40c5879d7bba5d59c1dfe6414241bf6a6
|
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import *
from RecoMuon.TrackingTools.MuonTrackLoader_cff import *
from TrackingTools.GeomPropagators.StraightLinePropagator_cfi import *
MuonServiceProxy.ServiceParameters.Propagators.append('StraightLinePropagator')
cosmicMuons = cms.EDProducer("CosmicMuonProducer",
MuonTrackLoaderForCosmic,
MuonServiceProxy,
TrajectoryBuilderParameters = cms.PSet(
DTRecSegmentLabel = cms.InputTag("dt4DSegments"),
BackwardMuonTrajectoryUpdatorParameters = cms.PSet(
MaxChi2 = cms.double(100.0),
RescaleError = cms.bool(False),
RescaleErrorFactor = cms.double(1.0),
Granularity = cms.int32(2),
UseInvalidHits = cms.bool(False),
ExcludeRPCFromFit = cms.bool(False)
),
RPCRecSegmentLabel = cms.InputTag("rpcRecHits"),
MuonTrajectoryUpdatorParameters = cms.PSet(
MaxChi2 = cms.double(3000.0),
RescaleError = cms.bool(False),
RescaleErrorFactor = cms.double(1.0),
Granularity = cms.int32(0),
UseInvalidHits = cms.bool(False),
ExcludeRPCFromFit = cms.bool(False)
),
EnableRPCMeasurement = cms.bool(True),
CSCRecSegmentLabel = cms.InputTag("cscSegments"),
BuildTraversingMuon = cms.bool(False),
Strict1Leg = cms.bool(False),
EnableDTMeasurement = cms.bool(True),
MuonSmootherParameters = cms.PSet(
PropagatorAlong = cms.string('SteppingHelixPropagatorAny'),
PropagatorOpposite = cms.string('SteppingHelixPropagatorAny'),
RescalingFactor = cms.double(5.0)
),
Propagator = cms.string('SteppingHelixPropagatorAny'),
EnableCSCMeasurement = cms.bool(True),
MuonNavigationParameters = cms.PSet(
Barrel = cms.bool(True),
Endcap = cms.bool(True)
)
),
MuonSeedCollectionLabel = cms.string('CosmicMuonSeed')
)
|
Python
|
CL
|
01bc5db1c1d71ac7a9b03fca873eed03db7ee7bce58d8e7ae5fa9872e2f00acc
|
import mxnet as mx
import numpy as np
from collections import namedtuple
GRUState=namedtuple('GRUState', ['h'])
GRUParam=namedtuple('GRUParam', ['gates_i2h_weight','gates_i2h_bias', 'gates_h2h_weight', 'gates_h2h_bias','trans_i2h_weight','trans_i2h_bias', 'trans_h2h_weight', 'trans_h2h_bias'])
def myGRU(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.):
if dropout>0.:
indata=mx.sym.Dropout(data=indata, p=dropout)
i2h=mx.sym.FullyConnected(data=indata, weight=param.gates_i2h_weight, bias=param.gates_i2h_bias, num_hidden=num_hidden*2, name='t%d_l%d_gates_i2h'%(seqidx, layeridx))
h2h=mx.sym.FullyConnected(data=prev_state.h, weight=param.gates_h2h_weight, bias=param.gates_h2h_bias, num_hidden=num_hidden*2, name='t%d_l%d_gates_h2h'%(seqidx, layeridx))
gates=i2h+h2h
slice_gates=mx.sym.SliceChannel(gates, num_outputs=2, name='t%d_l%d_slice'%(seqidx, layeridx))
update_gate=mx.sym.Activation(data=slice_gates[0], act_type='sigmoid')
reset_gate=mx.sym.Activation(data=slice_gates[1], act_type='sigmoid')
htrans_i2h=mx.sym.FullyConnected(data=indata, weight=param.trans_i2h_weight, bias=param.trans_i2h_bias, num_hidden=num_hidden, name='t%d_l%d_trans_i2h'%(seqidx, layeridx))
h_after_reset=prev_state.h*reset_gate
htrans_h2h=mx.sym.FullyConnected(data=h_after_reset, weight=param.trans_h2h_weight, bias=param.trans_h2h_bias, num_hidden=num_hidden, name='t%d_l%d_trans_h2h'%(seqidx, layeridx))
htrans=htrans_i2h+htrans_h2h
htrans_act=mx.sym.Activation(data=htrans, act_type='tanh')
next_h=prev_state.h+update_gate*(htrans_act-prev_state.h)
return GRUState(h=next_h)
def my_GRU_unroll(num_gru_layer, seq_len, input_size, num_hidden, num_embed, num_label, dropout=0.):
seqidx=0
embed_weight=mx.sym.Variable('embed_weight')
cls_weight=mx.sym.Variable('cls_weight')
cls_bias=mx.sym.Variable('cls_bias')
param_cells=[]
last_states=[]
for i in xrange(num_gru_layer):
param_cells.append(GRUParam(gates_i2h_weight=mx.sym.Variable('l%d_i2h_gates_weight'%i), gates_i2h_bias=mx.sym.Variable('l%d_i2h_gates_bias'%i),gates_h2h_weight=mx.sym.Variable('l%d_h2h_gates_weight'%i),gates_h2h_bias=mx.sym.Variable('l%d_h2h_gates_bias'%i),trans_i2h_weight=mx.sym.Variable('l%d_i2h_trans_weight'%i), trans_i2h_bias=mx.sym.Variable('l%d_i2h_bias'%i), trans_h2h_weight=mx.sym.Variable('l%d_h2h_trans_weight'%i), trans_h2h_bias=mx.sym.Variable('l%d_h2h_bias'%i)))
state=GRUState(h=mx.sym.Variable('l%d_init_h'%i))
last_states.append(state)
data=mx.sym.Variable('data')
data=mx.sym.BlockGrad(data)
label=mx.sym.Variable('label')
embed=mx.sym.Embedding(data=data, input_dim=input_size, output_dim=num_embed, weight=embed_weight)
wordvec=mx.sym.SliceChannel(data=embed, num_outputs=seq_len, squeeze_axis=1)
hidden_all=[]
for seqidx in xrange(seq_len):
hidden=wordvec[seqidx]
for i in xrange(num_gru_layer):
if i==0:
drop_r=0.
else:
drop_r=dropout
next_state=myGRU(num_hidden, indata=hidden, prev_state=last_states[i],param=param_cells[i],seqidx=seqidx, layeridx=i, dropout=drop_r)
hidden=next_state.h
last_states[i]=next_state
if drop_r:
hidden=mx.sym.Dropout(data=hidden, p=drop_r)
hidden=mx.sym.BatchNorm(data=hidden, name='bn', fix_gamma=True)
hidden_all.append(hidden)
#If seq2seq learning
'''
hidden_concat=mx.sym.Concat(*hidden_all, dim=0)
pred=mx.sym.FullyConnected(data=hidden_concat, num_hidden=num_label)
label=mx.sym.Transpose(data=label)
label=mx.sym.Reshape(data=label, target_shape=(0,))
'''
#If one final output
fc=mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, num_hidden=num_label)
loss=mx.sym.LinearRegressionOutput(data=fc, label=mx.sym.Variable('label'))
return loss
if __name__=='__main__':
print 'To be tested'
|
Python
|
CL
|
5777f23e0fa115436d7954696e3568231389f86ba5ae5b267c1634f46053bd98
|
# -*- coding: utf-8 -*-
'''
Weather functions to be used with the NWS radar and weather information
download script.
Jesse Hamner, 2019-2020
'''
from __future__ import print_function
import os
import re
import datetime
import json
import logging
from time import sleep
from outage import Outage
import requests
import yaml
import pytz
from bs4 import BeautifulSoup
# requests.packages.urllib3.disable_warnings()
def load_settings_and_defaults(settings_dir, settings_file, defaults_file):
"""
Load in all of the settings, default data, and organize the giant data bag
into a single dict that can be passed around. This is less elegant than it
should be.
"""
logging.info('Loading %s from %s', settings_file, settings_dir)
data = load_yaml(settings_dir, settings_file)
logging.info('Loading %s from %s', defaults_file, settings_dir)
defaults = load_yaml(settings_dir, defaults_file)
if not (data and defaults):
logging.error('Unable to load settings files. These are required.')
return False
data['defaults'] = defaults
data['today_vars'] = get_today_vars(data['timezone'])
data['bands'] = data['defaults']['goes_bands']
data['alert_counties'] = populate_alert_counties(data['counties_for_alerts'],
data['defaults']['alerts_root'])
if not data['alert_counties']:
logging.error('Unable to determine county list. Exiting now.')
return False
logging.info('alert counties: %s', str(data['alert_counties']))
data['defaults']['afd_divisions'][4] = re.sub('XXX',
data['nws_abbr'],
defaults['afd_divisions'][4])
logging.info('Defaults and settings loaded.')
return data
def prettify_timestamp(timestamp):
"""
Make a more user-readable time stamp for current conditions.
"""
posix_timestamp = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S+00:00')
logging.debug('Input timestamp: %s', format(timestamp))
logging.debug('Posix timestamp: %s', posix_timestamp)
timetext = datetime.datetime.strftime(posix_timestamp, '%Y-%m-%d, %H:%M:%S UTC')
logging.debug('Nicely formatted text: %s', timetext)
return timetext
def sanity_check(value, numtype='float'):
"""
Check for an actual value in the argument. If it has one, return a
formatted text string.
If it has no value, return a missing value.
"""
logging.debug('sanity_check() function input value: %s', value)
if numtype != 'float':
try:
return str('{0:.0f}'.format(float(value)))
except TypeError:
return -9999.9
try:
return str('{0:6.2f}'.format(float(value)))
except TypeError:
return -9999.9
def quick_doctext(doctext, indicator, value, unit=''):
"""
Convenience function to standardize the output format of a string.
"""
unitspace = ' '
if unit == '%':
unitspace = ''
return str('{0}\n{1} {2}{3}{4}'.format(doctext, indicator, value, unitspace, unit))
def get_metar(base_url, station):
"""
Hit up https://w1.weather.gov/data/METAR/XXXX.1.txt
and pull down the latest current conditions METAR data.
"""
metar = requests.get(os.path.join(base_url, station),
verify=False, timeout=10)
if metar.status_code != 200:
logging.error('Response from server was not OK: %s', metar.status_code)
return None
return metar.text
def outage_check(data, filename='outage.txt'):
"""
Quality assurance check on the weather service :-)
"""
outage_checker = Outage(data)
outage_checker.check_outage()
outage_result = outage_checker.parse_outage()
outfilepath = os.path.join(data['output_dir'], filename)
if outage_result is None:
logging.info('No radar outage(s) detected. Proceeding.')
try:
logging.debug('Removing file at %s', outfilepath)
os.unlink(outfilepath)
except OSError:
logging.error('File does not exist: %s', outfilepath)
else:
logging.warn('There is radar outage text: %s', outage_result)
try:
cur = open(outfilepath, 'w')
cur.write(outage_result)
cur.close()
except OSError as exc:
logging.error('OSError-- %s: %s', outfilepath, exc)
return outage_result
def write_json(some_dict, outputdir='/tmp', filename='unknown.json'):
"""
Write an individual dictionary to a JSON output file.
"""
filepath = os.path.join(outputdir, filename)
with open(filepath, 'w') as out_obj:
logging.info('writing json to %s', filepath)
try:
out_obj.write(json.dumps(some_dict))
logging.debug('raw dict: %s', some_dict)
return True
except Exception as exc:
logging.error('Ugh: %s', exc)
return False
def write_dict(filepath, some_dict):
"""
Write out a dict to a text file.
"""
with open(filepath, 'w') as current_alerts:
for key, value in some_dict.iteritems():
logging.debug('Key for this alert entry: %s', key)
current_alerts.write('{0}: {1}\n'.format(key, value))
return True
def write_text(filepath, some_text):
"""
Write a text string out to a file.
"""
with open(filepath, 'w') as text_file:
logging.debug('writing text to %s', filepath)
text_file.write(some_text)
text_file.close()
return True
def pull_beaufort_scale():
"""
Pull in the Beaufort scale information, if needed.
"""
b_url = 'https://www.weather.gov/mfl/beaufort'
pagerequest = requests.get(b_url)
if pagerequest.status_code != 200:
logging.error('Response from server was not OK: %s', pagerequest.status_code)
return None
beaufort_page = BeautifulSoup(requests.get(b_url).text, 'html')
btable = beaufort_page.find('table')
tablerows = btable.find_all('tr')
dataset = []
for i in tablerows:
row = []
cells = i.find_all('td')
for j in cells:
if re.search(r'\d{1,}-\d{1,}', j.text):
vals = j.text.split('-')
row.extend(vals)
else:
row.append(re.sub(r'\s{2,}', ' ', j.text))
dataset.append(row)
return dataset
def conditions_summary(conditions):
"""
Return a dict of consumer-level observations, say, for display on a
smart mirror or tablet.
"""
keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection',
'windSpeed', 'windGust', 'precipitationLastHour', 'temperature',
'relativeHumidity', 'heatIndex']
summary = dict()
for key in keys:
try:
summary[key] = conditions['properties'][key]
except Exception as exc:
summary[key] = 'none'
logging.error('Error trying to read summary for key {0}: {1}', key, exc)
return summary
def wind_direction(azimuth, data):
"""
Convert "wind coming from an azimuth" to cardinal directions
"""
try:
azimuth = float(azimuth)
except Exception as exc:
logging.error('Unable to convert azimuth to a numerical value: %s.\nReturning None.', exc)
return None
plusminus = data['defaults']['plusminus'] # 11.25 degrees
for az_deg, val in data['defaults']['azdir'].iteritems():
az_deg = float(az_deg)
if (az_deg - plusminus < azimuth) and (az_deg + plusminus >= azimuth):
return val
return 'None'
def get_hydrograph(abbr,
hydro_url='https://water.weather.gov/resources/hydrographs/',
outputdir='/tmp'):
"""
Retrieve hydrograph image (png) of the current time and specified location
Can find these abbreviations at
https://water.weather.gov/ahps2/hydrograph.php
Raw data output in XML for a location (here, "cart2"):
https://water.weather.gov/ahps2/hydrograph_to_xml.php?gage=cart2&output=xml
"""
filename = '{0}_hg.png'.format(abbr.lower())
retval = requests.get(os.path.join(hydro_url, filename), verify=False)
logging.debug('retrieving: %s', retval.url)
logging.debug('return value: %s', retval)
if retval.status_code == 200:
cur1 = open(os.path.join(outputdir, 'current_hydrograph.png'), 'wb')
cur1.write(retval.content)
cur1.close()
return retval
def get_today_vars(timezone='America/Chicago'):
"""
Get various strings from today's date for use in GOES image retrieval.
"""
today = datetime.datetime.now()
utcnow = datetime.datetime.utcnow()
local_tz = pytz.timezone(timezone)
return_dict = dict(doy=datetime.datetime.strftime(today, '%j'),
year=datetime.datetime.strftime(today, '%Y'),
day=datetime.datetime.strftime(today, '%d'),
mon=datetime.datetime.strftime(today, '%b'),
hour=datetime.datetime.strftime(today, '%H'),
minute=datetime.datetime.strftime(today, '%M'),
timezone=timezone,
offset=local_tz.utcoffset(today).total_seconds()/3600,
now=today,
utcnow=utcnow,
utcdoy=datetime.datetime.strftime(utcnow, '%j'),
utcyear=datetime.datetime.strftime(utcnow, '%Y')
)
return return_dict
def htable_current_conditions(con_dict,
tablefile='current_conditions.html',
outputdir='/tmp/'):
"""
Write out a simple HTML table of the current conditions.
"""
try:
with open(os.path.join(outputdir, tablefile), 'w') as htmlout:
htmlout.write('<table>\n')
for key, value in con_dict.iteritems():
logging.debug('%s: %s', key, value)
htmlout.write('<tr><td>{0}</td><td>{1} {2}</td></tr>\n'.format(value[2],
value[0],
value[1])
)
htmlout.write('</table>\n')
return True
except KeyError as exc:
logging.error('Exception: %s', exc)
return False
def load_yaml(directory, filename):
"""
Load a YAML file in and return the dictionary that is created.
"""
logging.debug('Entering load_yaml() function.')
try:
with open(os.path.join(directory, filename), 'r') as iyaml:
logging.info('Loading YAML file: %s', os.path.join(directory, filename))
return yaml.load(iyaml.read(), Loader=yaml.Loader)
except Exception as exc:
print('EXCEPTION -- unable to open yaml settings file: {0}'.format(exc))
logging.error('Unable to open yaml settings file: %s', exc)
return None
def convert_units(value, from_unit, to_unit, missing=-9999.9):
"""
As elsewhere, this function depends on use of specific unit conventions,
as labeled in the settings.yml document (and comments).
"""
convertme = {'m_s-1':
{'kph': lambda x: float(x) * 3.6,
'mph': lambda x: float(x) * 2.23694,
'kt': lambda x: float(x) * 1.94384
},
'kph':
{'m_s-1': lambda x: float(x) * 0.2778,
'mph': lambda x: float(x) * 0.62137,
'kt': lambda x: float(x) * 0.54
},
'km_h-1':
{'m_s-1': lambda x: float(x) * 0.2778,
'mph': lambda x: float(x) * 0.62137,
'kt': lambda x: float(x) * 0.54
},
'mph':
{'m_s-1': lambda x: float(x) * 0.4470389,
'kph': lambda x: float(x) * 1.60934,
'kt': lambda x: float(x) * 0.869
},
'kt':
{'m_s-1': lambda x: float(x) * 0.514443,
'mph': lambda x: float(x) * 1.1508,
'kph': lambda x: float(x) * 1.852
},
'mb':
{'Pa': lambda x: float(x) * 100.0,
'kPa': lambda x: float(x) * 0.10,
'bar': lambda x: float(x) * 1000.0,
'inHg': lambda x: float(x) * 0.02953
},
'Pa':
{'mb': lambda x: float(x) * 1E-2,
'kPa': lambda x: float(x) * 1E-3,
'bar': lambda x: float(x) * 1E-5,
'inHg': lambda x: float(x) * 0.0002953
},
'kPa':
{'mb': lambda x: float(x) * 1E5,
'Pa': lambda x: float(x) * 1E3,
'bar': lambda x: float(x) * 0.01,
'inHg': lambda x: float(x) * 0.2953
},
'inHg':
{'mb': lambda x: float(x) * 33.86390607,
'Pa': lambda x: float(x) * 3386.390607,
'bar': lambda x: float(x) * 0.03386390607,
'kPa': lambda x: float(x) * 3.386390607
},
'C':
{'F': lambda x: (float(x) * 9.0/5.0) + 32.0,
'R': lambda x: (float(x) * 9.0/5.0) + 491.67,
'K': lambda x: float(x) + 273.15
},
'F':
{'C': lambda x: (float(x) - 32.0) * 5.0 / 9.0,
'R': lambda x: float(x) + 491.67,
'K': lambda x: ((float(x) - 32.0) * 5.0 / 9.0) + 273.15
},
'percent':
{'percent': lambda x: x
}
}
percents = ['percent', 'pct', '%', 'Percent']
if value == '' or value == 'None' or value is None:
return missing
if from_unit in percents or to_unit in percents:
return value
if value == missing:
return missing
try:
return convertme[from_unit][to_unit](value)
except ValueError:
return None
return None
def beaufort_scale(data, speed, units='mph'):
"""
Determine the Beaufort scale ranking of a given wind speed.
Gusts are NOT used to determine scale rank.
"""
blist = data['defaults']['beaufort_scale']
if speed is None or speed == 'None':
logging.error('Input speed %s cannot be converted to Beaufort. Returning None.', speed)
return None
logging.debug('input speed value: %s %s', speed, units)
if units != 'mph':
speed = convert_units(speed, from_unit=units, to_unit='mph')
logging.debug('output speed value: %s mph', speed)
speed = int(speed)
logging.debug('integer speed value: %s mph', speed)
for i in blist.keys():
logging.debug('Key: %s\tmin speed: %s\tmax speed: %s', i, blist[i][0], blist[i][1])
if int(blist[i][0]) <= speed and speed <= int(blist[i][1]):
logging.debug('Speed (%s mph) between %s & %s. Returning %s', speed,
blist[i][0],
blist[i][1],
i)
return int(i)
return None
def make_request(url, retries=1, payload=False, use_json=True):
"""
Uniform function for requests.get().
"""
while retries:
if payload:
try:
response = requests.get(url, params=payload, verify=False, timeout=10)
except requests.exceptions.ReadTimeout as exc:
logging.warn('Request timed out: %s', exc)
sleep(2)
continue
else:
try:
response = requests.get(url, verify=False, timeout=10)
except requests.exceptions.ReadTimeout as exc:
logging.warn('Request timed out: %s', exc)
sleep(2)
retries = retries - 1
continue
if response:
resp = judge_payload(response, use_json)
if resp:
return resp
retries = retries - 1
logging.error('Unsuccessful response (%s). Returning -None-', response.status_code)
return None
def judge_payload(response, use_json):
"""
Pull out the request payload, provided it's either text or json.
"""
try:
if response.status_code:
pass
except Exception as exc:
logging.error('No response to HTTP query. Returning -None-.')
return None
if response.status_code == 200:
if use_json is True:
try:
return response.json()
except Exception as exc:
logging.warn('Unable to decode JSON: %s', exc)
else:
try:
return response.text
except Exception as exc:
logging.error('Unable to decode response text: %s', exc)
return None
logging.error('Response from server was not OK: %s', response.status_code)
return None
def populate_alert_counties(somedict, alerts_url):
"""
Takes in a dict, formatted with state name(s) as the key, with a list
of county names as the value.
Returns a populated dictionary with records in the format:
'countyname': [1, 'CountyAbbr', 'ZoneAbbr', 'StateAbbr']
"""
returndict = {}
for key, values in somedict.iteritems():
statezonelist = get_zonelist(key, 'zone', alerts_url)
if not statezonelist:
return None
statecountylist = get_zonelist(key, 'county', alerts_url)
if not statecountylist:
return None
for county in values:
logging.info('Opening zone and county tables for county: %s', county)
cabbr = parse_zone_table(county, statecountylist)
zabbr = parse_zone_table(county, statezonelist)
returndict[county] = [1, cabbr, zabbr, key]
return returndict
def get_zonelist(stateabbr, zoneorcounty, alerts_url):
"""
go to alerts.weather.gov/cap/ and retrieve the forecast zone / county for
the given name of the county. There are other zone names than only county
names, like "Central Brewster County", "Chisos Basin", "Coastal Galveston",
or even "Guadalupe Mountains Above 7000 Feet", so the user can also list
these as "counties".
"""
x_value = 0
if zoneorcounty == 'zone':
x_value = 2
if zoneorcounty == 'county':
x_value = 3
if x_value == 0:
logging.error('unable to determine "zone" or "county". Returning None.')
return None
localfile = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
logging.info('Checking for existence of %s locally.', localfile)
if os.path.exists(localfile) is not True:
locally_cache_zone_table(alerts_url, stateabbr, zoneorcounty)
if os.path.exists(localfile) is True:
return retrieve_local_zone_table(stateabbr, zoneorcounty)
logging.error('Unable to retrieve zone table. Returning None.')
return None
def retrieve_local_zone_table(stateabbr, zoneorcounty):
"""
Check for, and retrieve, a locally cached copy of the zone/county table.
"""
table = False
filename = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
with open(filename, 'r') as localcopy:
table = BeautifulSoup(localcopy.read(), 'lxml')
parsed_table1 = table.find_all('table')[3]
rows = parsed_table1.find_all('tr')
return rows
def locally_cache_zone_table(alerts_url, stateabbr, zoneorcounty):
"""
The zones and counties change so infrequently that it makes no sense to
retrieve the data live, and locally caching the data will improve performance.
"""
write_status = False
page = '{0}.php'.format(stateabbr)
rooturl = os.path.join(alerts_url, page)
x_value = 0
if zoneorcounty == 'zone':
x_value = 2
if zoneorcounty == 'county':
x_value = 3
if x_value == 0:
return None
payload = {'x': x_value}
logging.debug('Retrieving: %s -- with payload %s', rooturl, payload)
returned_table = make_request(url=rooturl, payload=payload, use_json=False)
filename = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
with open(filename, 'w') as localcopy:
localcopy.write(returned_table)
write_status = True
return write_status
def parse_zone_table(county, rows):
"""
find the zone or county abbreviation within a returned table that includes
a county name or area name to match.
"""
for i in rows:
cells = i.find_all('td')
if len(cells) > 1:
if cells[2].text.lower() == county.lower():
# print('{0}: {1}'.format(cells[2].text.strip(), cells[1].text.strip()))
return cells[1].text.strip()
return None
def make_timestamp():
"""
Returns tuple of two strings: "YYYYMMDD" and "HHMMSS"
"""
dutc = datetime.utcnow()
hhmmss = dutc.strftime('%H%M%S')
ymd = dutc.strftime('%Y%m%d')
return (ymd, hhmmss)
|
Python
|
CL
|
63118ccc1dc1a82d3ffd23dbbd6ff56c7f26942f90489ed6708d9c4d63df9fae
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
#generation
process.source = cms.Source("EmptySource")
process.load("Configuration.Generator.QCDForPF_cfi")
process.generator.comEnergy = 14000.
#fastsim
process.load("FastSimulation.Configuration.RandomServiceInitialization_cff")
process.load("FastSimulation.Configuration.CommonInputs_cff")
process.RandomNumberGeneratorService.generator.initialSeed=1
process.fastSimProducer.SimulateCalorimetry = True
for layer in process.fastSimProducer.detectorDefinition.BarrelLayers:
layer.interactionModels = cms.untracked.vstring("pairProduction", "nuclearInteraction", "bremsstrahlung", "energyLoss", "multipleScattering", "trackerSimHits")
for layer in process.fastSimProducer.detectorDefinition.ForwardLayers:
layer.interactionModels = cms.untracked.vstring("pairProduction", "nuclearInteraction", "bremsstrahlung", "energyLoss", "multipleScattering", "trackerSimHits")
process.famosPileUp.PileUpSimulator.averageNumber = 0.0
# Get frontier conditions - not applied in the HCAL, see below
# Values for globaltag are "STARTUP_V5::All", "1PB::All", "10PB::All", "IDEAL_V5::All"
process.GlobalTag.globaltag = "MC_3XY_V14::All"
# Parametrized magnetic field (new mapping, 4.0 and 3.8T)
process.load("Configuration.StandardSequences.MagneticField_40T_cff")
#process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.VolumeBasedMagneticFieldESProducer.useParametrizedTrackerField = True
process.load("RecoParticleFlow.PFProducer.particleFlowSimParticle_cff")
process.load("RecoParticleFlow.PFTracking.particleFlowDisplacedVertexCandidate_cff")
process.load("RecoParticleFlow.PFTracking.particleFlowDisplacedVertex_cff")
process.p1 = cms.Path(
process.generator +
process.famosWithEverything +
process.caloJetMetGen +
process.particleFlowSimParticle +
process.particleFlowDisplacedVertexCandidate +
process.particleFlowDisplacedVertex
)
process.load("FastSimulation.Configuration.EventContent_cff")
process.aod = cms.OutputModule("PoolOutputModule",
process.AODSIMEventContent,
fileName = cms.untracked.string('aod.root')
)
process.reco = cms.OutputModule("PoolOutputModule",
process.RECOSIMEventContent,
fileName = cms.untracked.string('reco.root')
)
process.load("RecoParticleFlow.Configuration.Display_EventContent_cff")
process.display = cms.OutputModule("PoolOutputModule",
process.DisplayEventContent,
fileName = cms.untracked.string('display.root')
)
#process.outpath = cms.EndPath(process.aod + process.reco + process.display)
process.outpath = cms.EndPath(process.aod+process.display)
#
|
Python
|
CL
|
5ee42374e180513f9e25518bac782d6cc15ea72c113c5ade1f3a1be41fe8cc56
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# etreenode.py
#
from lxml import etree
from xierpa3.toolbox.transformer import TX
from xierpa3.constants.constants import Constants
class EtreeNode:
u"""
The <code>EtreeNode</code> class provides a wrapper around any etree node, to make a more convenient and robust
Python based API.<br/>
Note that if <attr>xml</attr> is supplied to the constructor, it needs to be validated XML, since not other checking
or transformation is performed at this stage.<br/>
The same is true the other way around. This especially is applies for the instance read and written from <code>
XmlTreeField</code> fields. For documentation on Etree see <a href="http://lxml.de/tutorial.html">the official
tutorial</a>.
"""
C = Constants
def __init__(self, tree=None, xml=None):
if xml is not None:
tree = etree.fromstring(xml)
assert not isinstance(tree, EtreeNode)
assert tree.__class__.__name__ in ('_Comment', '_Element') # Just to be sure it is an etree
self._etree = tree
def __repr__(self):
if self._etree is None:
return self.__class__.__name__
return u'EtreeNode: ' + self._toString()
def __nonzero__(self):
return self._tree is None
def __ne__(self, other):
return not self is other
def __eq__(self, other):
return self is other
def __str__(self):
return self.__repr__()
def __iter__(self):
nodes = self._getTree()
if len(nodes):
for i in nodes:
yield self.__class__(i)
@classmethod
def getXPath(cls, tree, xpath):
u"""
The <code>getXPath</code> class method queries the <attr>tree</attr> by <attr>xpath</attr> and answers a
list of <code>EtreeNode</code> instances for every matching entry. There are several answering conditions: if
<attr>tree</attr> is <code>None</code> then answer <code>None</code>. If there is no result, then answer
<code>None</code>. If the result is a list of <code>basestring</code> instances (tested on the first of the
list) then answer the list untouched. Otherwise make a new list with the result <code>etree</code> nodes wrapped
as <code>EtreeNode</code> instances.
"""
if tree is None:
return None
if isinstance(tree, cls):
tree = tree._getTree()
result = tree.xpath(xpath)
if not result:
return None
if isinstance(result[0], basestring):
return result
enodes = []
for n in result:
enodes.append(cls(n))
return enodes
@classmethod
def getXPathNode(cls, tree, xpath, index=0):
u"""
The <code>getXPathNode</code> class method does the same as <code>cls.getXPath</code> except that it
answers the element of the list indicated by <attr>index</attr>. If there is no result, or if the <attr>index
</attr> exceeds the length of the result list, then <code>None</code> is answered. Default value for the
optional <attr>index</attr> attribute is <code>0</code>, resulting in the first element if it exists.
"""
if tree is None:
return None
if isinstance(tree, cls):
tree = tree._getTree()
result = tree.xpath(xpath)
if not result or len(result) < index:
return None
if isinstance(result[index], basestring):
return result[index]
return cls(result[index])
def __getattr__(self, key):
if key.startswith('_'):
return self.__dict__.get(key)
return self._get(key)
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
self._set(key, value)
def _xpath(self, xpath):
u"""
The <code>_xpath</code> function is the instance equivalent of <code>cls.getXPathNode</code>.
"""
return self.__class__.getXPath(self._getTree(), xpath)
def _xpathNode(self, xpath):
u"""
The <code>_xpathNode</code> function is the instance equivalent of <code>cls.getXPathNode</code>.
"""
return self.__class__.getXPathNode(self._getTree(), xpath)
def _xpathString(self, xpath):
u"""
The <code>_xpathString</code> method answers the concatenated string of all results (strings or elements)
of <attr>xpath</attr>.
"""
result = self._xpath(xpath)
if result is not None:
return ''.join(result)
return ''
def _set(self, key, value):
tree = self._getTree()
if tree is not None:
key = TX.xmlAttrName2PyAttrName(key)
tree.set(key, `value`)
def _get(self, key):
key = TX.pyAttrName2XmlAttrName(key)
value = self._xpath('@' + key)
if value:
return TX.xmlValue2PyValue(value[0], self.C.XSL_XMLCONVERSIONS)
return None
def _getTag(self):
tree = self._getTree()
if tree is not None and hasattr(tree, 'tag'):
tag = tree.tag
if isinstance(tag, basestring):
return tag
return ''
def _getTree(self):
return self._etree
def _getText(self):
return self._getTree().text
def _getTail(self):
return self._getTree().tail
def _getAttributes(self):
attributes = {}
for index, value in enumerate(self._xpath('@*') or []):
attrname = TX.xmlAttrName2PyAttrName(self._xpath('name(@*[%d])' % (index + 1)))
attributes[attrname] = TX.xmlValue2PyValue(value, self.C.XSL_XMLCONVERSIONS)
return attributes
def _getNodes(self, name=None):
u"""
Gets all nodes that match name.
"""
if name is None:
name = '*'
return self._xpath('./' + name)
def _getFirstNode(self, name=None):
u"""
Gets first node that matches name.
"""
nodes = self._getNodes(name)
if nodes:
return nodes[0]
return None
def _toString(self):
tree = self._getTree()
if tree is not None:
return etree.tostring(tree, encoding='utf-8').decode('utf-8')
return None
def _childrenToString(self, method=None):
result = []
for child in self._getTree():
# Use html output by default (no self-closing tags).
result.append(etree.tostring(child, encoding='utf-8', method=method or 'html').decode('utf-8'))
return ''.join(result)
def _childrenNodes(self):
u"""
Returns etree child nodes as EtreeNode objects.
"""
result = []
for child in self._getTree():
result.append(EtreeNode(child))
return result
if __name__ == '__main__':
tree = etree.fromstring(u'<aaa><bbb test="123">zzz</bbb><ccc test="345">yyy</ccc><ddd/><eee/></aaa>')
node = EtreeNode(tree)
print node
print node._getNodes()
print node._getNodes('bbb')
print node._getFirstNode('bbb')
bbb = node._getFirstNode('bbb')
print bbb.testx
print bbb.test
print bbb.test * 10
bbb.textx = 234
print bbb.textx
for t in node:
print t._getTag()
|
Python
|
CL
|
50984ad7ffd8aae0cf7203c21222f1854a7d34bf8058aa56cb7fc472a29e5197
|
import csv
from preprocessing.preprocess import PreprocessBase
"""
Marco Link
"""
class SimpleSynonyms(PreprocessBase):
"""
Class for replacing words with its specified synonyms.
The synonyms has to be defined in a file.
An entry in the file specifies the word which should be replaced followed by the tab character (\t) and its
replacement.
In the file one entry is separated with a newline character (\n).
"""
def __init__(self, path):
""":param path: the path to the synonyms file"""
# read the synonyms file and hold it as a dictionary
self._synonyms = {}
# https://docs.python.org/3/library/csv.html
with open(path, newline='') as synonyms_csv:
synonyms_reader = csv.reader(synonyms_csv, delimiter='\t')
for row in synonyms_reader:
# if the row contains a word and its synonym to replace
if len(row) > 1:
if row[0] not in self._synonyms:
self._synonyms.update({row[0]: row[1]})
else:
self._synonyms[row[0]] = row[1]
def transform_string(self, text: str):
return " ".join(self.transform_tokens(text.split()))
def transform_tokens(self, tokens):
new_tokens = []
# for every token: search if it exists in the syonynm dictionary and replace it with its substitution
for token in tokens:
if token in self._synonyms:
new_tokens.append(self._synonyms[token])
else:
new_tokens.append(token)
return new_tokens
class ContextSynonyms(PreprocessBase):
"""
Class for replacing a word with its specified synonym, if one of the specified context words is found
in the surroundings.
If found, both the main word to replace and the context word will be replaced with the specified substitution.
It first searches before the main word for finding a context word and if not found after the main word.
"""
def __init__(self, main_words, context_words, before, after, substitution):
"""
:param main_words: the main words to search in their surroundings for context words
:param context_words: the context words which should be searched in the surroundings from the main words
:param before: how far before the main words should be searched
:param after: how far after the main words should be searched
:param substitution: the string to replace the found main word and context word
"""
self._main_words = main_words
self._context_words = context_words
self._before = before
self._after = after
self._substitution = substitution
def transform_string(self, text: str):
return " ".join(self.transform_tokens(text.split()))
def transform_tokens(self, tokens):
new_tokens = tokens
indizes_to_remove = []
found_before = False
found_after = False
for index in range(len(new_tokens)):
# was the token already removed with a previous context synonym?
if index not in indizes_to_remove:
token = new_tokens[index]
# is the token one of the specified main words
if token in self._main_words:
# search before the token for context words
# not enough words before existing as specified
if (index - self._before) <= 0:
words_before = new_tokens[0:index]
index_words_before = len(words_before) - 1
while index_words_before >= 0:
# was the token already removed with a previous context synonym?
if index_words_before not in indizes_to_remove:
if words_before[index_words_before] in self._context_words:
found_before = True
# for descendants context synonyms this word shouldn't be used and has to be
# removed at the end
indizes_to_remove.append(index_words_before)
break
index_words_before -= 1
else:
words_before = new_tokens[index - self._before: index]
index_words_before = len(words_before) - 1
while index_words_before >= 0:
# was the token already removed with a previous context synonym?
if index_words_before not in indizes_to_remove:
if words_before[index_words_before] in self._context_words:
found_before = True
# for descendants context synonyms this word shouldn't be used and has to be
# removed at the end
indizes_to_remove.append(index - self._before + index_words_before)
break
index_words_before -= 1
if not found_before:
if (index + self._after) > len(new_tokens):
if index < (len(new_tokens) - 1):
words_after = new_tokens[(index + 1): len(new_tokens)]
index_words_after = 0
while index_words_after < len(words_after):
# was the token already removed with a previous context synonym?
if index_words_after not in indizes_to_remove:
if words_after[index_words_after] in self._context_words:
found_after = True
# for descendants context synonyms this word shouldn't be used and has
# to be removed at the end
indizes_to_remove.append(index + 1 + index_words_after)
break
index_words_after += 1
else:
words_after = new_tokens[index + 1: index + self._after]
index_words_after = 0
while index_words_after < len(words_after):
# was the token already removed with a previous context synonym?
if index_words_after not in indizes_to_remove:
if words_after[index_words_after] in self._context_words:
found_after = True
# for descendants context synonyms this word shouldn't be used and has to be
# removed at the end
indizes_to_remove.append(index + 1 + index_words_after)
break
index_words_after += 1
# replace the main word with the specified substitution
if found_before or found_after:
new_tokens[index] = self._substitution
# return new tokens list, but without the found context words
return [new_tokens[i] for i in range(len(new_tokens)) if i not in indizes_to_remove]
|
Python
|
CL
|
8961035c7929ee325cdf17242029cfdbdc919862a7d64d10c6086a8b91409eb3
|
#!/usr/bin/env python
# ClusterShell.CLI.Display test suite
# Written by S. Thiell
"""Unit test for CLI.Display"""
import os
import sys
import tempfile
import unittest
from StringIO import StringIO
sys.path.insert(0, '../lib')
from ClusterShell.CLI.Display import Display, WHENCOLOR_CHOICES, VERB_STD
from ClusterShell.CLI.OptionParser import OptionParser
from ClusterShell.MsgTree import MsgTree
from ClusterShell.NodeSet import NodeSet, set_std_group_resolver
from ClusterShell.NodeUtils import GroupResolverConfig
def makeTestFile(text):
"""Create a temporary file with the provided text."""
f = tempfile.NamedTemporaryFile()
f.write(text)
f.flush()
return f
class CLIDisplayTest(unittest.TestCase):
"""This test case performs a complete CLI.Display verification. Also
CLI.OptionParser is used and some parts are verified btw.
"""
def testDisplay(self):
"""test CLI.Display"""
parser = OptionParser("dummy")
parser.install_display_options(verbose_options=True)
options, _ = parser.parse_args([])
ns = NodeSet("hostfoo")
mtree = MsgTree()
mtree.add("hostfoo", "message0")
mtree.add("hostfoo", "message1")
for whencolor in WHENCOLOR_CHOICES: # test whencolor switch
for label in [True, False]: # test no-label switch
options.label = label
options.whencolor = whencolor
disp = Display(options)
# inhibit output
disp.out = StringIO()
disp.err = StringIO()
# test print_* methods...
disp.print_line(ns, "foo bar")
disp.print_line_error(ns, "foo bar")
disp.print_gather(ns, list(mtree.walk())[0][0])
# test also string nodeset as parameter
disp.print_gather("hostfoo", list(mtree.walk())[0][0])
# test line_mode property
self.assertEqual(disp.line_mode, False)
disp.line_mode = True
self.assertEqual(disp.line_mode, True)
disp.print_gather("hostfoo", list(mtree.walk())[0][0])
disp.line_mode = False
self.assertEqual(disp.line_mode, False)
def testDisplayRegroup(self):
"""test CLI.Display (regroup)"""
f = makeTestFile("""
# A comment
[Main]
default: local
[local]
map: echo hostfoo
#all:
list: echo all
#reverse:
""")
res = GroupResolverConfig(f.name)
set_std_group_resolver(res)
try:
parser = OptionParser("dummy")
parser.install_display_options(verbose_options=True)
options, _ = parser.parse_args(["-r"])
disp = Display(options, color=False)
self.assertEqual(disp.regroup, True)
disp.out = StringIO()
disp.err = StringIO()
self.assertEqual(disp.line_mode, False)
ns = NodeSet("hostfoo")
# nodeset.regroup() is performed by print_gather()
disp.print_gather(ns, "message0\nmessage1\n")
self.assertEqual(disp.out.getvalue(),
"---------------\n@all\n---------------\nmessage0\nmessage1\n\n")
finally:
set_std_group_resolver(None)
def testDisplayClubak(self):
"""test CLI.Display for clubak"""
parser = OptionParser("dummy")
parser.install_display_options(separator_option=True, dshbak_compat=True)
options, _ = parser.parse_args([])
disp = Display(options)
self.assertEqual(bool(disp.gather), False)
self.assertEqual(disp.line_mode, False)
self.assertEqual(disp.label, True)
self.assertEqual(disp.regroup, False)
self.assertEqual(bool(disp.groupsource), False)
self.assertEqual(disp.noprefix, False)
self.assertEqual(disp.maxrc, False)
self.assertEqual(disp.node_count, True)
self.assertEqual(disp.verbosity, VERB_STD)
|
Python
|
CL
|
59861f7230f0f86056de4cc36c83affa2645ee0bda93b31807475beb3413ac51
|
import boto3
from botocore.client import ClientEndpointBridge
import botocore.exceptions
def lambda_handler(event, context):
"""Lambda function to identify unencrypted s3 buckets associated with the AWS
account in use and send an email notification via an SNS resource.
Returns:
response(dict): SNS topic ARN as key and publish reponse as value
Or string literal if there was a ClientError while processing request
"""
checkS3 = check_s3()
try:
response = checkS3.send_unencrypted_alerts("arn:aws:sns:us-east-1:996921890895:gene-crumpler-s3-encryption")
except botocore.exceptions.ClientError as e:
response = "There was a ClientError attempting to process this request \n\n" + e
return response
class check_s3:
""" Check s3 resources class
Object of class intantiates several boto3 client connections used to
interact with AWS service endpoints for the purpose of checking S3
resources and sending a notification via SNS in the event of an alert.
Attr:
__s3Client(object): boto3 AWS s3 client
__snsClient(object): boto3 AWS SNS client
__stsClient(object): boto3 AWS STS client
__unencryptedBuckets(list): List of unencrypted buckets on which to alert
"""
__s3Client = boto3.client('s3')
__snsClient = boto3.client('sns')
__stsClient = boto3.client('sts')
__unencryptedBuckets = []
def __init__(self):
""" Class object initialization method
Queries s3 via boto3 s3client connection to intantiate the buckets
object property. Also calls methods to set unencryptedBuckets,
alertMessage, and alertSubject properties.
"""
self.__buckets = self.__s3Client.list_buckets()
self.__set_unencrypted_buckets()
self.set_unencrypted_alert_message(self.__unencryptedBuckets)
def __set_unencrypted_buckets(self):
""" Private method to set the unencrypted_buckets property. """
for bucket in self.__buckets['Buckets']:
if not self.__is_bucket_encryped(bucket['Name']):
self.__unencryptedBuckets.append(bucket)
def __is_bucket_encryped(self, bucketName):
""" Private method to check if a bucket has a server side
encryption configuration
Params:
bucketName(string): S3 bucket name to query
Returns:
bool literal of True if there is no exception caught with the
error code 'ServerSideEncryptionConfigurationNotFoundError', and False
otherwise.
Raises:
botocore ClientError exception if one is caught and it does not match
the error code specified above.
"""
# Since there is no s3 property/metadata for encryption you must check
# and catch a specific exception to verify if it's setup
try:
encryption = self.__s3Client.get_bucket_encryption(Bucket=bucketName)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
return False
else:
raise e
else:
return True
def set_unencrypted_alert_message(self, buckets, **kwargs):
""" Public method to set the class object's alertSubject and
alertMessage properties.
If no kwargs params are passed in, then we use the default subject and
message strings in this method. Otherwise we set the object's properties
accordingly.
Params:
buckets(list): Names of unencrypted buckets found on S3
"""
accountId = self.__stsClient.get_caller_identity().get('Account')
if 'subject' in kwargs:
self.alertSubject = kwargs['subject']
else:
self.alertSubject = "ALERT: Unencrypted s3 buckets found"
if 'message' in kwargs:
self.alertSubject = kwargs['subject']
else:
self.alertMessage = "The s3 buckets specified below do not have a server side encryption configuration. \
Please review and correct the configuration if required."
# Append Account ID to object's alertMessage property
self.alertMessage = self.alertMessage + "\n\nAccount ID: " + accountId + ""
# Append unencrypted bucket names passed in via buckets parameter to
# object's alertMessage property
for bucket in buckets:
self.alertMessage = self.alertMessage + "\n\tBucket Name: " + bucket['Name']
def send_unencrypted_alerts(self, *topicList):
""" Sends alert subject and message defined by class method set_unencrypted_alert_message
to any topics provided by *topicList param.
Params:
*topicList(*args): Topic ARN strings to publish to
Returns:
responses(dict[TopicArn:response]): Response json data from each sns publish call
Or a string literal describing why it did not attempt to publish
to an SNS topic
"""
responses = {}
if len(self.__unencryptedBuckets) > 0:
if len(topicList) > 0:
for topicARN in topicList:
responses[topicARN] = self.__snsClient.publish(
TopicArn=topicARN,
Subject=self.alertSubject,
Message=self.alertMessage)
else:
return "No SNS topics provided"
else:
return "No unencrypted buckets found"
return responses
|
Python
|
CL
|
2c9292c3832a659e2be7058c7e6019571dabed96bfb149e403ce746edbee1791
|
class Building():
"""A simple Building Energy Model.
Consisting of one thermal capacity and one resistance, this model is derived from the
hourly dynamic model of the ISO 13790. It models heating and cooling energy demand only.
Parameters:
* heat_mass_capacity: capacity of the building's heat mass [J/K]
* heat_transmission: heat transmission to the outside [W/K]
* maximum_cooling_power: [W] (<= 0)
* maximum_heating_power: [W] (>= 0)
* initial_building_temperature: building temperature at start time [℃]
* time_step_size: [s]
* conditioned_floor_area: [m**2]
"""
def __init__(self, heat_mass_capacity, heat_transmission,
maximum_cooling_power, maximum_heating_power,
initial_building_temperature, time_step_size,
conditioned_floor_area):
if maximum_heating_power < 0:
raise ValueError("Maximum heating power [W] must not be negative.")
if maximum_cooling_power > 0:
raise ValueError("Maximum cooling power [W] must not be positive.")
self.__heat_mass_capacity = heat_mass_capacity
self.__heat_transmission = heat_transmission
self.__maximum_cooling_power = maximum_cooling_power
self.__maximum_heating_power = maximum_heating_power
self.current_temperature = initial_building_temperature
self.__time_step_size = time_step_size
self.__conditioned_floor_area = conditioned_floor_area
def step(self, outside_temperature, heating_setpoint, cooling_setpoint):
"""Performs building simulation for the next time step.
Parameters:
* outside_temperature: [℃]
* heating_setpoint: heating setpoint of the HVAC system [℃]
* cooling_setpoint: cooling setpoint of the HVAC system [℃]
"""
def next_temperature(heating_cooling_power):
return self._next_temperature(
outside_temperature=outside_temperature,
heating_setpoint=heating_setpoint,
cooling_setpoint=cooling_setpoint,
heating_cooling_power=heating_cooling_power
)
next_temperature_no_power = next_temperature(0)
if (next_temperature_no_power >= heating_setpoint and next_temperature_no_power <= cooling_setpoint):
self.current_temperature = next_temperature_no_power
else:
if next_temperature_no_power < heating_setpoint:
setpoint = heating_setpoint
max_power = self.__maximum_heating_power
else:
setpoint = cooling_setpoint
max_power = self.__maximum_cooling_power
ten_watt_per_square_meter_power = 10 * self.__conditioned_floor_area
next_temperature_power_10 = next_temperature(ten_watt_per_square_meter_power)
unrestricted_power = (ten_watt_per_square_meter_power * (setpoint - next_temperature_no_power) / (next_temperature_power_10 - next_temperature_no_power))
if abs(unrestricted_power) <= abs(max_power):
power = unrestricted_power
else:
power = max_power
next_temperature_heating_cooling = next_temperature(power)
self.current_temperature = next_temperature_heating_cooling
def _next_temperature(self, outside_temperature, heating_setpoint, cooling_setpoint, heating_cooling_power):
dt_by_cm = self.__time_step_size.total_seconds() / self.__heat_mass_capacity
return (self.current_temperature * (1 - dt_by_cm * self.__heat_transmission) + dt_by_cm * (heating_cooling_power + self.__heat_transmission * outside_temperature))
|
Python
|
CL
|
e2bc0d201562b3dd3ab9e50218ccef5e9a9ac6337dc462536bb00669b36417e6
|
# -*- coding: utf-8 -*-
from setuptools import find_namespace_packages, setup
setup(
name="emmet-core",
use_scm_version={"root": "..", "relative_to": __file__},
setup_requires=["setuptools_scm>=6,<8"],
description="Core Emmet Library",
author="The Materials Project",
author_email="feedback@materialsproject.org",
url="https://github.com/materialsproject/emmet",
packages=find_namespace_packages(include=["emmet.*"]),
package_data={
"emmet.core.vasp.calc_types": ["*.yaml"],
"emmet.core.subtrates": ["*.json"],
},
include_package_data=True,
install_requires=[
"pymatgen>=2023.7.20",
"monty>=2021.3",
"pydantic>=1.10.2,<2.0",
"pybtex~=0.24",
"typing-extensions>=3.7,<5.0",
"spglib>=2.0.1",
],
extras_require={
"all": [
"seekpath>=2.0.1",
"robocrys>=0.2.8",
"pymatgen-analysis-diffusion>=2023.8.15",
"pymatgen-analysis-alloys>=0.0.3",
],
"test": [
"pre-commit",
"pytest",
"pytest-cov",
"pycodestyle",
"pydocstyle",
"flake8",
"mypy",
"mypy-extensions",
"types-setuptools",
"types-requests",
"maggma",
"wincertstore",
"custodian>=2022.5.26",
],
"docs": [
"mkdocs",
"mkdocs-material<8.3",
"mkdocs-material-extensions",
"mkdocs-minify-plugin",
"mkdocstrings",
"mkdocs-awesome-pages-plugin",
"mkdocs-markdownextradata-plugin",
"mkdocstrings[python]",
"livereload",
"jinja2",
],
},
python_requires=">=3.8",
license="modified BSD",
zip_safe=False,
)
|
Python
|
CL
|
5191e814a417284652cf1d5b98090722f48d5aab12875861389f69f2e402c8a9
|
# -*- coding: utf-8 -*-
"""
SR510
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers to control a lock-in amplifier
:copyright: 2013 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
Source: SR510 Manual
"""
from lantz import Feat
from lantz.visa import GPIBVisaDriver
class SR510(GPIBVisaDriver):
"""SR510 Lock-In Amplifier
"""
ENCODING = 'ascii'
RECV_TERMINATION = '\r\n'
SEND_TERMINATION = '\r\n'
@Feat(units='Hz')
def frequency(self):
"""Reference frequency
"""
return float(self.query('F'))
@Feat(limits=(-180, 180))
def phase(self):
"""Phase
"""
return float(self.query('P'))
@phase.setter
def phase(self, value):
print(self.query('P {:.1f}'.format(value)))
@Feat(units='V')
def output(self):
"""Output voltage reading
"""
return float(self.query('Q'))
# From SR510 manual page 19
# Codes for time constants go like 1: 1ms
# 2: 3ms
# 3:10ms
# 4:30ms
# and so on. This is the list of valid time constants, except the index
# starts at 0
pre_time_constants = [1, 3, 10, 30, 100, 300, 1000, 3000,
10000, 30000, 100000]
# Generated using [[1,3][n%2]*10**int(n/2) for n in range(11)]
@Feat(units='ms', limits=(min(pre_time_constants),
max(pre_time_constants)))
def pre_time_constant(self):
"""Pre time constant.
Possible values are {}
If another value is requested, set to the next higher value
""".format(','.join(str(t) + 'ms' for t in self.pre_time_constants))
return self.pre_time_constants[int(self.query('T 1')) - 1]
@pre_time_constant.setter
def pre_time_constant(self, value):
try:
code = self.pre_time_constants.index(value)
except ValueError:
# Value not available, set to the next possible value
code = (index for index, v in self.pre_time_constants
if v > value).__next__()
self.send('T 1,{:d}\r\n'.format(code + 1))
# Sensitivity codes go from 1 to 24 and correspond to 10nV,20nV,50nV,100nV
# and so on
# Code n corresponds to sensitivities[n-1]
sensitivities = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000,
10000, 20000, 50000, 100000, 200000, 500000,
1000000, 2000000, 5000000, 10000000, 20000000, 50000000,
100000000, 200000000, 500000000]
# Generated using [[10,20,50][n%3]*10**int(n/3) for n in range(24)]
@Feat(units='nV', limits=(min(sensitivities), max(sensitivities)))
def sensitivity(self):
"""Sensitivity (gain).
Possible values are {}
If another value is requested, set to the next lower value
""".format(','.join(str(s) + 'nV' for s in self.sensitivities))
return self.sensitivities[int(self.query('G')) - 1]
@sensitivity.setter
def sensitivity(self, value):
try:
code = self.sensitivities.index(value)
except ValueError:
# Value not available, set to the next lower value
code = len(self.sensitivities) - (index for index, v
in reversed(self.sensitivities)
if v < value).__next__() - 1
self.send('G {:d}'.format(code + 1))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--debug', help='Send debug output to screen',
action='store_true')
args = parser.parse_args()
from lantz.log import LOGGER, DEBUG
LOGGER.setLevel(DEBUG)
import logging
fh = logging.FileHandler('sr510.log', mode='w')
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
fh.setLevel(DEBUG)
LOGGER.addHandler(fh)
if args.debug:
from lantz.log import log_to_screen
log_to_screen(DEBUG)
with SR510('GPIB0::28::INSTR') as lockin:
print('Sensitivity: {}'.format(lockin.sensitivity))
print('Phase: {}'.format(lockin.phase))
print('Frequency: {}'.format(lockin.frequency))
print('Pre Time Constant: {}'.format(lockin.pre_time_constant))
|
Python
|
CL
|
853c950039d4bb933766f6ce0a20e697e744ce9884857adb47706120c6a4dfd8
|
from unittest import TestCase
from model import connect_to_db, db, User, Answer, Question
from server import app
from flask import session
class FlaskTestsBasic(TestCase):
"""Tests all routes render, except login/logout."""
def setUp(self):
"""Stuff to do before every test."""
# Get the Flask test client
self.client = app.test_client()
# Show Flask errors that happen during tests
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'key'
self.client = app.test_client()
# Connect only to the demo db
connect_to_db(app, "postgresql:///testdb")
# Create tables and add sample data
db.create_all()
example_data()
with self.client as user:
with user.session_transaction() as sess:
sess['user_id'] = 1
def tearDown(self):
"""Do at end of every test."""
db.session.close()
db.drop_all()
def test_index(self):
"""Test root page."""
# import pdb
# pdb.set_trace()
result = self.client.get("/")
self.assertIn('<h1><center>Learn, Share, Connect</center></h1>',
result.data)
print "DONE WITH INDEX CHECK"
def test_register_page(self):
"""Test register route rendering."""
result = self.client.get('/register')
self.assertIn('<h1>Register</h1>', result.data)
print "DONE WITH REGISTER CHECK"
def test_questions_page(self):
"""Test questions route rendering."""
# import pdb
# pdb.set_trace()
result = self.client.get('/questions')
self.assertIn('<h2>Submit A Question</h2>', result.data)
print "DONE WITH QUESTIONS PAGE CHECK"
class FlaskTestsLogInLogOut(TestCase):
"""Test log in and log out."""
def setUp(self):
"""Before every test"""
app.config['TESTING'] = True
self.client = app.test_client()
#Connect to test database
connect_to_db(app, "postgresql:///testdb")
# Create tables and add sample data
db.create_all()
example_data()
def tearDown(self):
"""Do at end of every test."""
db.session.close()
db.drop_all()
def test_login(self):
"""Test log in form."""
with self.client as c:
result = c.post('/login',
data={'email': 'cat@gmail.com', 'password': 'abc'},
follow_redirects=True
)
self.assertEqual(session['user_id'], 1)
self.assertIn("You are logged in", result.data)
print "DONE WITH LOGIN CHECK"
def test_logout(self):
"""Test logout route."""
with self.client as c:
with c.session_transaction() as sess:
sess['user_id'] = '42'
result = self.client.get('/logout', follow_redirects=True)
self.assertNotIn('user_id', session)
self.assertIn('Logged Out.', result.data)
print "DONE WITH LOGOUT CHECK"
def example_data():
"""Create some sample data."""
# In case this is run more than once, empty out existing data
User.query.delete()
Answer.query.delete()
Question.query.delete()
# Add sample users, answers and questions
cat = User(user_name="Cat", email="cat@gmail.com", password="abc")
dog = User(user_name="Dog", email="dog@gmail.com", password="abc")
horse = User(user_name="Horse", email="horse@gmail.com", password="abc")
db.session.add_all([cat, dog, horse])
db.session.commit()
question_1 = Question(question_id="q1", title="Should we save the planet?", description=" ", user_id=3)
question_2 = Question(question_id="q2", title="Is recycling pointless?", description=" ", user_id=3)
question_3 = Question(question_id="q3", title="Mustard or Ketchup?", description=" ", user_id=1)
db.session.add_all([question_1, question_2, question_3])
db.session.commit()
answer_1 = Answer(question_id="q1", user_id=1, body="Yes, I agree.")
answer_2 = Answer(question_id="q2", user_id=2, body="No, I disagree.")
answer_3 = Answer(question_id="q3", user_id=3, body="Hrm, I'm indifferent.")
db.session.add_all([answer_1, answer_2, answer_3])
db.session.commit()
def connect_to_db(app, db_uri="postgresql:///testdb"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
import unittest
unittest.main()
|
Python
|
CL
|
e39e313e51e12a74cdf3600a3790ac2153b9262a7df27e1837f0aa1b26fbb011
|
# -*- coding: utf-8 -*-
import sys
import redis
import BaseThreadedModule
import Utils
import Decorators
@Decorators.ModuleDocstringParser
class RedisList(BaseThreadedModule.BaseThreadedModule):
"""
Subscribes to a redis channels/lists and passes incoming events to receivers.
lists: Name of redis lists to subscribe to.
server: Redis server to connect to.
port: Port redis server is listening on.
batch_size: Number of events to return from redis list.
db: Redis db.
password: Redis password.
timeout: Timeout in seconds.
Configuration template:
- RedisList:
lists: # <type: string||list; is: required>
server: # <default: 'localhost'; type: string; is: optional>
port: # <default: 6379; type: integer; is: optional>
batch_size: # <default: 1; type: integer; is: optional>
db: # <default: 0; type: integer; is: optional>
password: # <default: None; type: None||string; is: optional>
timeout: # <default: 0; type: integer; is: optional>
receivers:
- NextModule
"""
module_type = "input"
"""Set module type"""
can_run_forked = True
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.BaseThreadedModule.configure(self, configuration)
self.redis_bulk_script = None
self.lists = self.getConfigurationValue('lists')
if not isinstance(self.lists, list):
self.lists = [self.lists]
self.timeout = self.getConfigurationValue('timeout')
self.batch_size = self.getConfigurationValue('batch_size')
self.client = redis.StrictRedis(host=self.getConfigurationValue('server'),
port=self.getConfigurationValue('port'),
password=self.getConfigurationValue('password'),
db=self.getConfigurationValue('db'))
try:
self.client.ping()
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not connect to redis store at %s. Exception: %s, Error: %s." % (self.getConfigurationValue('server'), etype, evalue))
self.gp.shutDown()
# Monkeypatch run method to use the correct handle event method.
if self.batch_size == 1:
self.run = self.handleSingleEvent
else:
self.run = self.handleBatchEvents
def run(self):
self.logger.error("Monkeypatching the run method of RedisList seems to have failed.")
self.gp.shutDown()
def handleSingleEvent(self):
while self.alive:
event = None
try:
event = self.client.blpop(self.lists, timeout=self.timeout)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
self.logger.error("Could not read data from redis list(s) %s. Exception: %s, Error: %s." % (self.lists, exc_type, exc_value))
if event:
event = Utils.getDefaultEventDict(dict={"received_from": '%s' % event[0], "data": event[1]}, caller_class_name=self.__class__.__name__)
self.sendEvent(event)
def handleBatchEvents(self):
pipeline = self.client.pipeline()
while self.alive:
for _ in range(0, self.batch_size):
pipeline.blpop(self.lists, timeout=self.timeout)
try:
events = pipeline.execute()
except:
exc_type, exc_value, exc_tb = sys.exc_info()
self.logger.error("Could not read data from redis list(s) %s. Exception: %s, Error: %s." % (self.lists, exc_type, exc_value))
continue
for event in events:
# If batch_size is bigger than events waiting in redis queue, the remaining entries will be filled with None values.
# So break out if a None value is found.
if not event:
break
event = Utils.getDefaultEventDict(dict={"received_from": '%s' % event[0], "data": event[1]}, caller_class_name=self.__class__.__name__)
self.sendEvent(event)
|
Python
|
CL
|
cad06f27db6d0f79d0e0d5dfe5f10582892783a4bfd550b2b6e3494b536a64d2
|
import os
import re
import subprocess
from libqtile import bar, hook, layout, widget
from libqtile.config import Click, Drag, Group, Key, Match, Screen
from libqtile.lazy import lazy
from widgets.volume import Volume, VolumeCommands
mod = "mod4"
keys = [
# The basics
Key([mod],
"Return",
lazy.spawn("alacritty"),
desc="Launch terminal"),
Key([mod],
"v",
lazy.spawn("neovide --notabs"),
desc="Launch Neovide"),
Key([mod, "shift"],
"Return",
lazy.spawn("rofi -show drun -show-icons -auto-select"),
desc="Launch launcher"),
Key([mod],
"b",
lazy.spawn(["bash", "-c", "${BROWSER-firefox}"]),
desc="Launch browser"),
Key([mod, "shift"],
"b",
lazy.spawn("rofi-bluetooth"),
desc="Launch bluetooth manager."),
Key([mod],
"s",
lazy.spawn("locker"),
desc="Lock the screen"),
Key([mod],
"Tab",
lazy.next_layout(),
desc="Toggle through layouts"),
Key([mod],
"d",
lazy.window.kill(),
desc="Kill active window"),
Key([mod],
"m",
lazy.spawn("displays.sh"),
desc="Select displays preset"),
Key([mod, "shift"],
"m",
lazy.spawn("monitors.sh"),
desc="Select monitors layout"),
Key([mod],
"o",
lazy.spawn("rofi-pass"),
desc="Launch bluetooth manager."),
Key([mod, "shift"],
"r",
lazy.restart(),
desc="Restart Qtile"),
Key([mod], "q",
lazy.spawn("rofi -show power-menu -modi power-menu:rofi-power-menu"),
desc="Show power menu"),
# Utils
Key([mod],
"p",
lazy.spawn("flameshot gui --accept-on-select"),
desc="Take a screenshot and save it instantly after selecting"),
Key([mod, "shift"],
"p",
lazy.spawn("flameshot gui"),
desc="Take a screenshot"),
Key([mod, "shift"],
"c",
lazy.spawn(
"rofi -show calc -modi calc -no-show-match -no-sort "
+ "-calc-command \"echo -n '{result}' | xclip -sel c\""),
desc="Open calculator"),
Key([mod],
"c",
lazy.spawn("rofi -modi \"clipboard:greenclip print\" "
+ "-show clipboard -run-command '{cmd}'"),
desc="Browse clipboard history."),
# Switch between windows
Key([mod],
"h",
lazy.layout.left(),
desc="Move focus to left"),
Key([mod],
"l",
lazy.layout.right(),
desc="Move focus to right"),
Key([mod],
"j",
lazy.layout.down(),
desc="Move focus down"),
Key([mod],
"k",
lazy.layout.up(),
desc="Move focus up"),
Key([mod],
"space",
lazy.layout.next(),
desc="Move window focus to other window"),
# Move windows between left/right columns or move up/down in current stack.
# Moving out of range in Columns layout will create new column.
Key([mod, "shift"],
"h",
lazy.layout.shuffle_left(),
desc="Move window to the left"),
Key([mod, "shift"],
"l",
lazy.layout.shuffle_right(),
desc="Move window to the right"),
Key([mod, "shift"],
"j",
lazy.layout.shuffle_down(),
desc="Move window down"),
Key([mod, "shift"],
"k",
lazy.layout.shuffle_up(),
desc="Move window up"),
# Grow windows. If current window is on the edge of screen and direction
# will be to screen edge - window would shrink.
Key([mod, "control"],
"i",
lazy.layout.grow(),
desc="Grow window"),
Key([mod, "control"],
"o",
lazy.layout.shrink(),
desc="Shrink window"),
Key([mod, "control"],
"h",
lazy.layout.grow_left(),
desc="Grow window to the left"),
Key([mod, "control"],
"l",
lazy.layout.grow_right(),
desc="Grow window to the right"),
Key([mod, "control"],
"j",
lazy.layout.grow_down(),
desc="Grow window down"),
Key([mod, "control"],
"k",
lazy.layout.grow_up(),
desc="Grow window up"),
Key([mod],
"n",
lazy.layout.maximize(),
desc="Toggle window between minimum and maximum sizes"),
Key([mod],
"f",
lazy.window.toggle_fullscreen(), desc="Toggle fullscreen"),
Key([mod, "shift"],
"f",
lazy.window.toggle_floating(), desc="toggle floating"),
Key([mod, "shift"],
"n",
lazy.layout.normalize(),
desc="Reset all window sizes"),
# Switch focus of monitors
Key([mod],
"period",
lazy.next_screen(),
desc="Move focus to next monitor"),
Key([mod],
"comma",
lazy.prev_screen(),
desc="Move focus to prev monitor"),
# Media keys
Key([mod],
"F1",
VolumeCommands.TOGGLE_MUTE.lazy,
desc="Mute the audio"),
Key([], "XF86AudioMute",
VolumeCommands.TOGGLE_MUTE.lazy,
desc="Mute the audio"),
Key([mod],
"Page_Up",
VolumeCommands.INCREASE_VOLUME.lazy,
desc="Raise volume level"),
Key(
[mod],
"Page_Down",
VolumeCommands.DECREASE_VOLUME.lazy,
desc="Lower volume level"),
Key([],
"XF86AudioRaiseVolume",
VolumeCommands.INCREASE_VOLUME.lazy,
desc="Raise volume level"),
Key([],
"XF86AudioLowerVolume",
VolumeCommands.DECREASE_VOLUME.lazy,
desc="Lower volume level"),
Key([mod],
"F7",
lazy.spawn("brightness up 5"),
desc="Increase brightness level"),
Key([mod],
"F6",
lazy.spawn("brightness down 5"),
desc="Lower brightness level"),
Key([],
"XF86MonBrightnessUp",
lazy.spawn("brightness up 5"),
desc="Increase brightness level"),
Key([],
"XF86MonBrightnessDown",
lazy.spawn("brightness down 5"),
desc="Lower brightness level"),
]
groups = [Group(i) for i in "123456789"]
for i in groups:
keys.extend(
[
# mod1 + letter of group = switch to group
Key(
[mod],
i.name,
lazy.group[i.name].toscreen(),
desc="Switch to group {}".format(i.name),
),
# mod1 + shift + letter of group = switch to & move focused window to group
Key(
[mod, "shift"],
i.name,
lazy.window.togroup(i.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(
i.name),
),
]
)
# See https://www.nordtheme.com/docs/colors-and-palettes
colors = {
"polar-0": "#2E3440",
"polar-1": "#3B4252",
"polar-2": "#434C5E",
"polar-3": "#4C566A",
"snow-0": "#D8DEE9",
"snow-1": "#E5E9F0",
"snow-2": "#ECEFF4",
"frost-0": "#8FBCBB",
"frost-1": "#88C0D0",
"frost-2": "#81A1C1",
"frost-3": "#5E81AC",
"aurora-0": "#BF616A",
"aurora-1": "#D08770",
"aurora-2": "#EBCB8B",
"aurora-3": "#A3BE8C",
"aurora-4": "#B48EAD",
}
layout_theme = {
"border_width": 3,
"margin": 10,
"border_focus": colors["polar-3"],
"border_normal": colors["polar-0"],
}
layouts = [
layout.MonadTall(**layout_theme),
layout.MonadWide(
**layout_theme,
ratio=0.75,
),
layout.Max(**layout_theme),
layout.RatioTile(**layout_theme),
]
widget_defaults = dict(
font="mononoki Nerd Font Mono",
fontsize=12,
padding=3,
)
bar_defaults = dict(
size=23,
background=colors["polar-0"],
margin=[6, 10, 0, 10],
opacity=0.8,
)
music_widget = widget.Mpris2(
**widget_defaults,
display_metadata=["xesam:album", "xesam:artist"],
scroll=True,
width=150,
objname="org.mpris.MediaPlayer2.spotify",
)
widgets = [
widget.Image(
**widget_defaults,
filename="~/.config/qtile/icons/python.png",
margin=2,
),
widget.GroupBox(
**widget_defaults,
active=colors["frost-2"],
inactive=colors["snow-1"],
highlight_color=colors["polar-1"],
borderwidth=2,
disable_drag=True,
highlight_method="line",
margin_x=0,
margin_y=3,
padding_y=8,
rounded=False,
this_current_screen_border=colors["aurora-2"], # ebcb8b
urgent_alert_method="line",
),
widget.WindowName(**widget_defaults),
music_widget,
widget.Systray(**widget_defaults),
widget.Sep(**widget_defaults),
widget.CPU(**widget_defaults, format="{freq_current}GHz {load_percent}%"),
widget.Memory(**widget_defaults,
format="{MemUsed:.0f}{mm}/{MemTotal:.0f}{mm}"),
widget.Sep(**widget_defaults),
widget.Battery(
**widget_defaults,
charge_char="",
discharge_char="",
empty_char="",
full_char="",
unknown_char="",
format="{char} {percent:2.0%}",
show_short_text=False,
),
Volume(
**widget_defaults,
# FIXME: This doesn't work right now.
# mouse_callbacks={"Button3": lambda: qtile.cmd_spawn("easyeffects")},
),
widget.Sep(**widget_defaults),
widget.CurrentLayout(**widget_defaults),
widget.Clock(**widget_defaults, format="%H:%M %a %Y-%m-%d"),
]
keys.extend([
Key([],
"XF86AudioPlay",
lazy.function(lambda _: music_widget.cmd_play_pause()),
desc="Play/Pause playback"),
Key([],
"XF86AudioNext",
lazy.function(lambda _: music_widget.cmd_next()),
desc="Next track"),
Key([],
"XF86AudioPrev",
lazy.function(lambda _: music_widget.cmd_previous()),
desc="Previous track"),
Key([],
"XF86AudioStop",
lazy.function(lambda _: music_widget.cmd_stop()),
desc="Stop playback"),
])
screens = [
Screen(
top=bar.Bar(
widgets,
**bar_defaults,
),
),
]
# Drag floating layouts.
mouse = [
Drag(
[mod],
"Button1",
lazy.window.set_position_floating(),
start=lazy.window.get_position(),
),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front()),
]
dgroups_key_binder = None
dgroups_app_rules = []
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(
float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class="ssh-askpass"),
Match(wm_class="pinentry"),
]
)
auto_fullscreen = True
focus_on_window_activation = "smart"
reconfigure_screens = True
# If things like steam games want to auto-minimize themselves when losing
# focus, should we respect this or not?
auto_minimize = True
@hook.subscribe.startup
def start():
home = os.path.expanduser("~")
subprocess.call([home + "/.config/qtile/autostart.sh"])
def get_keys_description() -> str:
key_help = ""
for k in keys:
mods = ""
for m in k.modifiers:
if m == "mod4":
mods += "Super + "
else:
mods += m.capitalize() + " + "
if len(k.key) > 1:
mods += k.key.capitalize()
else:
mods += k.key
key_help += "{:<30} {}".format(mods, k.desc + "\n")
return key_help
keys.extend(
[
Key(
[mod, "shift"],
"slash",
lazy.spawn(
"sh -c 'echo \""
+ get_keys_description()
+ '" | rofi -dmenu -i -mesg "Keyboard shortcuts"\''
),
desc="Print keyboard bindings",
),
]
)
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
Python
|
CL
|
7d6de91a3816917c88f26796cff3a39fb623ffefb3cdd2147d150af62afcd220
|
from persimmon import primitive, single, multi, utils
from persimmon.factory import ParserFactory
class StandardParserFactory(ParserFactory):
def make_rewind_iterator(self, data):
return utils.RewindIterator.make_rewind_iterator(data)
def make_success_parser(self, value):
return primitive.SuccessParser(self, value)
def make_satisfy_parser(self, steps=None):
steps = steps if steps is not None else []
return primitive.SatisfyParser(self, steps)
def make_choice_parser(self, parsers):
return multi.ChoiceParser(self, parsers)
def make_chain_parser(self, parsers):
return multi.ChainParser(self, parsers)
def make_sequence_parser(self, seq):
return single.AttemptParser(
self,
primitive.RawSequenceParser(self, seq)
)
def make_eof_parser(self):
return primitive.EndOfFileParser(self)
def make_attempt_parser(self, parser):
return single.AttemptParser(self, parser)
def make_map_parser(self, parser, func):
return single.MapParser(self, parser, func)
def make_filter_parser(self, parser, pred):
return single.FilterParser(self, parser, pred)
def make_transform_parser(self, parser, transform):
return single.TransformParser(self, parser, transform)
def combine_choice(self, left, right):
# TODO: how to combine?
pass
def combine_chain(self, left, right):
# TODO: how to combine?
pass
def make_repeat_parser(self, parser, min_results=0, max_results=None):
return single.RepeatParser(self, parser, min_results, max_results)
def make_labeled_parser(self, parser, label):
return single.LabeledParser(self, parser, label)
def make_noisy_parser(self, parser, noise):
return single.NoisyParser(self, noise, parser)
def make_delayed_parser(self, parser_func):
return single.DelayedParser(self, False, parser_func)
|
Python
|
CL
|
bf0035fa63eb568377d982fc8fbf34497789fccdfcc469db2acbd65ed359ae3d
|
#!/usr/bin/env python
#
# All modification made by Intel Corporation: Copyright (c) 2018 Intel Corporation
#
# All contributions by the University of California:
# Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2014, 2015, the respective contributors
# All rights reserved.
# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import copy
import argparse
pycaffe = os.path.split(os.path.realpath(__file__))[0] + '/../python'
sys.path.insert(0, pycaffe)
import caffe
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import sampling
import numpy as np
import first_conv_force_u8 as fu
quantize_layers = ["Convolution"]
memory_layers = ["ReLU", "Split", "Concat", "Pooling", "Eltwise"]
int8_layers = quantize_layers + memory_layers
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def check_existence(path):
try:
return os.path.exists(path)
except Exception as e:
raise ("Failed to check {} existence due to {}".format(path, str(e)))
def setup_env():
os.chdir(os.path.dirname(os.path.abspath(params.root)))
caffe.set_mode_cpu()
def read_prototxt(prototxt):
try:
if not check_existence(prototxt):
return None
net = caffe_pb2.NetParameter()
with open(prototxt) as f:
txtf.Merge(f.read(), net)
return net
except Exception as e:
print ("Failed to read {} due to {}".format(prototxt, e))
def get_input_layers(l, net, end):
top_layers = []
for layerIndex in range(0, end):
reverse_layer_index = end - layerIndex - 1
for blobIndex in range(0, len(net.layer[reverse_layer_index].top)):
if net.layer[reverse_layer_index].top[blobIndex] in l.bottom:
top_layers.append((reverse_layer_index, net.layer[reverse_layer_index].name,
net.layer[reverse_layer_index].type))
return top_layers
def get_input_convolutions(l, net, end, interesting_layers, uninteresting_layers=[]):
all_input_layers = []
input_layers = get_input_layers(l, net, end)
while True:
if len(input_layers) == 0:
break
processed_layers = input_layers # sync inputLayers change
for lp in processed_layers:
if lp[2] not in int8_layers:
input_layers.remove(lp)
continue
if lp[2] in interesting_layers:
input_layers.remove(lp)
if lp not in all_input_layers:
all_input_layers.append(lp)
continue
if lp[2] not in uninteresting_layers:
new_input_layers = get_input_layers(net.layer[lp[0]], net, lp[0])
input_layers.remove(lp)
input_layers.extend(new_input_layers)
else:
input_layers.remove(lp)
return all_input_layers
def analyze_conv_output_with_relu(compiled_net):
convs_output_with_relu = []
for _, layer in enumerate(compiled_net.layer):
if layer.type == 'Convolution':
if layer.convolution_param.relu and isclose(layer.convolution_param.negative_slope, 0.0):
convs_output_with_relu.append(layer.name)
return convs_output_with_relu
def analyze_conv_input_u8(conv_inputs, convs_output_with_relu):
for conv_input in conv_inputs:
if conv_input[1] not in convs_output_with_relu:
return False
return True
def find_index_by_name(name, layer_infos):
for (l, index) in layer_infos:
if name == l.name:
return index
return -1
def is_convolution_input_u8(l, net, end, interesting_layers, convs_output_with_relu):
all_input_layers = []
input_layers = get_input_layers(l, net, end)
while True:
if len(input_layers) == 0:
break
for input_layer in input_layers:
if input_layer[2] in quantize_layers and (input_layer[1] not in convs_output_with_relu):
return False
processed_layers = input_layers # sync inputLayers change
for lp in processed_layers:
if lp[2] not in int8_layers:
input_layers.remove(lp)
continue
if lp[2] in interesting_layers:
input_layers.remove(lp)
if lp not in all_input_layers:
all_input_layers.append(lp)
continue
new_input_layers = get_input_layers(net.layer[lp[0]], net, lp[0])
input_layers.remove(lp)
input_layers.extend(new_input_layers)
return True
def analyze_conv_output_with_relu_from_net(convs_output_with_relu, compiled_net, net):
new_convs_output_with_relu = []
new_convs_output_with_relu.extend(convs_output_with_relu)
compiled_relu_layers = [(value, index) for index, value in enumerate(compiled_net.layer) if value.type == 'ReLU' and value.relu_param.negative_slope != 0]
if len(compiled_relu_layers) != 0:
relu_layers = [(value, index) for index, value in enumerate(net.layer) if value.type == 'ReLU']
for (l, index) in relu_layers:
conv_inputs = get_input_convolutions(l, net, index, ["Convolution"]) # FIXME
new_convs_output_with_relu.append(conv_inputs[0][1])
return new_convs_output_with_relu
def transform_convolutions(model_path, compiled_model_path, top_blobs_map, bottom_blobs_map, use_unsigned_range, concat_use_fp32, unify_concat_scales, conv_algo, enable_1st_conv = False):
net = caffe_pb2.NetParameter()
with open(model_path) as f:
s = f.read()
txtf.Merge(s, net)
compiled_net = caffe_pb2.NetParameter()
with open(compiled_model_path) as f:
s = f.read()
txtf.Merge(s, compiled_net)
convs_output_with_relu = analyze_conv_output_with_relu(compiled_net)
# extended convs output with relu is used for convs that cannot fuse with relu due to negative slope
# extended_convs_output_with_relu = analyze_conv_output_with_relu_from_net(convs_output_with_relu, compiled_net, net)
new_net = copy.deepcopy(net)
convolution_layers = [(value, index) for index, value in enumerate(net.layer) if value.type in quantize_layers]
compiled_convolution_layers = [(value, index) for index, value in enumerate(compiled_net.layer) if value.type in quantize_layers]
u8_max = 255
s8_max = 127
first_conv = True if enable_1st_conv else False
for (l, index) in convolution_layers:
for si in range(0, len(new_net.layer[index].quantization_param.scale_out)):
if l.name in convs_output_with_relu: # u8
new_net.layer[index].quantization_param.scale_out[si] = u8_max / new_net.layer[index].quantization_param.scale_out[si]
else: # s8
if use_unsigned_range:
new_net.layer[index].quantization_param.scale_out[si] = u8_max / new_net.layer[index].quantization_param.scale_out[si]
else:
new_net.layer[index].quantization_param.scale_out[si] = s8_max / new_net.layer[index].quantization_param.scale_out[si]
index_in_compiled_net = find_index_by_name(l.name, compiled_convolution_layers)
assert(index_in_compiled_net >= 0)
#conv_inputs = get_input_convolutions(l, compiled_net, index_in_compiled_net, ["Convolution"])
#conv_input_u8 = analyze_conv_input_u8(conv_inputs, convs_output_with_relu)
conv_input_u8 = is_convolution_input_u8(l, compiled_net, index_in_compiled_net, ["Convolution"], convs_output_with_relu) # FIXME: extended_convs_output_with_relu
for si in range(0, len(new_net.layer[index].quantization_param.scale_in)):
if conv_input_u8: # u8
if first_conv:
new_net.layer[index].quantization_param.scale_in[si] = s8_max / new_net.layer[index].quantization_param.scale_in[si]
new_net.layer[index].quantization_param.is_negative_input = True
first_conv = False
else:
new_net.layer[index].quantization_param.scale_in[si] = u8_max / new_net.layer[index].quantization_param.scale_in[si]
else:
new_net.layer[index].quantization_param.scale_in[si] = s8_max / new_net.layer[index].quantization_param.scale_in[si]
new_net.layer[index].quantization_param.is_negative_input = True
for si in range(0, len(new_net.layer[index].quantization_param.scale_params)):
if not isclose(new_net.layer[index].quantization_param.scale_params[si], 0.0):
new_scale_param = s8_max / new_net.layer[index].quantization_param.scale_params[si]
if np.isinf(new_scale_param):
new_scale_param = 0.0
new_net.layer[index].quantization_param.scale_params[si] = new_scale_param
else:
new_net.layer[index].quantization_param.scale_params[si] = 0.0
if conv_algo:
for conv_input in conv_inputs:
index_bottom_layer = find_index_by_name(conv_input[1], convolution_layers)
for si in range(0, len(new_net.layer[index_bottom_layer].quantization_param.scale_out)):
new_net.layer[index_bottom_layer].quantization_param.scale_out[si] = new_net.layer[index].quantization_param.scale_in[si]
concat_layers = [(value, index) for index, value in enumerate(net.layer) if value.type == 'Concat']
if len(concat_layers) > 0:
compiled_concat_layers = [(value, index) for index, value in enumerate(compiled_net.layer) if value.type == 'Concat']
concat_layers.reverse()
if unify_concat_scales:
for (l, index) in concat_layers:
index_in_compiled_net = find_index_by_name(l.name, compiled_concat_layers)
assert(index_in_compiled_net >= 0)
conv_inputs = get_input_convolutions(l, compiled_net, index_in_compiled_net, ["Convolution"], ["Concat"])
# TODO: support resonable cross-levels concat scale unify
min_concat_scale = sys.float_info.max
concat_input_indexes = []
for conv_input in conv_inputs:
index_in_net = find_index_by_name(conv_input[1], convolution_layers)
assert(index_in_net >= 0)
concat_input_indexes.append(index_in_net)
if new_net.layer[index_in_net].quantization_param.scale_out[0] < min_concat_scale:
min_concat_scale = new_net.layer[index_in_net].quantization_param.scale_out[0]
for concat_input_index in concat_input_indexes:
new_net.layer[concat_input_index].quantization_param.scale_out[0] = min_concat_scale
else:
if concat_use_fp32:
for (l, index) in concat_layers:
index_in_compiled_net = find_index_by_name(l.name, compiled_concat_layers)
assert(index_in_compiled_net >= 0)
conv_inputs = get_input_convolutions(l, compiled_net, index_in_compiled_net, ["Convolution"])
for conv_input in conv_inputs:
index_in_net = find_index_by_name(conv_input[1], convolution_layers)
assert(index_in_net >= 0)
new_net.layer[index_in_net].quantization_param.bw_layer_out = 32
new_net.layer[index_in_net].quantization_param.scale_out[:] = [1.0]
with open(model_path, 'w') as f:
f.write(str(new_net))
def generate_sample_bak(sample_path, input_model, weights,
quantized_model, detection, scaling_mode, iterations=1, error_margin=1):
cmd = '{0} quantize -model {1} -weights {2} -model_quantized {3} -iterations {4} -error_margin {5} ' \
' -scaling {6} -trimming_mode dynamic_fixed_point'.format(sample_path, input_model, weights, quantized_model,
iterations, error_margin, scaling_mode)
if detection:
cmd += ' --detection=1'
os.system(cmd)
def generate_sample(input_model, weights, quantized_model, scaling_mode, calibration_algo, conv_algo, iterations=10, enable_1st_conv=False, sampling_single=False):
(blobs, params, top_blobs_map, bottom_blobs_map, conv_top_blob_layer_map, conv_bottom_blob_layer_map, winograd_bottoms, winograd_convolutions) = sampling.sample(input_model, weights, conv_algo, iterations, enable_1st_conv, sampling_single)
(inputs_max, outputs_max, inputs_min) = sampling.calibrate_activations(blobs, conv_top_blob_layer_map, conv_bottom_blob_layer_map, winograd_bottoms, calibration_algo, "SINGLE", conv_algo)
params_max = sampling.calibrate_parameters(params, winograd_convolutions, "DIRECT", scaling_mode.upper(), conv_algo)
generate_sample_impl(input_model, quantized_model, inputs_max, outputs_max, inputs_min, params_max, enable_1st_conv)
return (top_blobs_map, bottom_blobs_map)
def generate_sample_impl(input_model, quantized_model, inputs_max, outputs_max, inputs_min, params_max, enable_1st_conv=False):
net = caffe_pb2.NetParameter()
with open(input_model) as f:
s = f.read()
txtf.Merge(s, net)
new_net = copy.deepcopy(net)
convolution_layers = [(value, index) for index, value in enumerate(net.layer) if value.type in quantize_layers]
first_conv = False if enable_1st_conv else True
for (l, index) in convolution_layers:
if first_conv:
first_conv = False
continue
new_net.layer[index].quantization_param.bw_layer_in = 8
new_net.layer[index].quantization_param.bw_layer_out = 8
new_net.layer[index].quantization_param.bw_params = 8
new_net.layer[index].quantization_param.scale_in[:] = inputs_max[l.name]
new_net.layer[index].quantization_param.scale_out[:] = outputs_max[l.name]
new_net.layer[index].quantization_param.scale_params[:] = params_max[l.name]
with open(quantized_model, 'w') as f:
f.write(str(new_net))
def get_compiled_net(caffe_bin, model_def, model_weights, detection):
output_log_name = '.compiled_net.txt'
cmd = '{} test -model {} -weights {} -iterations 1 -sampling'.format(caffe_bin, model_def, model_weights)
if detection:
cmd += ' -detection'
cmd += ' 2>&1 > {}'.format(output_log_name)
os.environ['GLOG_minloglevel'] = '2'
os.system(cmd)
os.environ.pop('GLOG_minloglevel')
return os.path.abspath(output_log_name)
def get_the_accuracy(caffe_bin, model_def, model_weights, iterations, detection, blob_name):
output_log_name = 'calibrator_log.txt'
cmd = '{} test -model {} -weights {} -iterations {}'.format(caffe_bin, model_def, model_weights, iterations)
if detection:
cmd += ' -detection'
cmd += ' 2>&1|tee {}'.format(output_log_name)
os.system(cmd)
with open(output_log_name) as f:
data = f.readlines()
for i in data[::-1]:
if i.find('{} = '.format(blob_name)) != -1:
try:
splits = i.split('=')
last_split = splits[-1].strip().split(' ')[0] # accuracy or loss
return float(last_split)
except Exception as e:
print 'Failed to generate accuracy due to {}'.format(str(e))
sys.exit(-1)
print 'Failed to get accuracy, please check the parameters and rerun the scripts.'
sys.exit(-1)
def remove_top_quantized_parameter(current_quantized_file):
net = read_prototxt(current_quantized_file)
for i in net.layer:
if i.type == 'Convolution' and i.HasField('quantization_param'):
i.ClearField('quantization_param')
break
with open(current_quantized_file, 'w') as f:
f.write(str(net))
def tuning_quantized_topology(base_top1_accuracy, prototxt, caffe_bin, model_weights, top_blob_map, bottom_blobs_map, iterations,
accuracy_loss, detection, blob_name, quantize_only, use_unsigned_range,
concat_use_fp32, unify_concat_scales, conv_algo, enable_1st_conv):
print 'Updating quantization parameter...'
transform_convolutions(prototxt, get_compiled_net(caffe_bin, prototxt, model_weights, detection), top_blobs_map, bottom_blobs_map,
use_unsigned_range, concat_use_fp32, unify_concat_scales, conv_algo, enable_1st_conv)
if quantize_only:
return
current_top1_accuracy = get_the_accuracy(caffe_bin, prototxt, model_weights, iterations, detection, blob_name)
#while abs(current_top1_accuracy - base_top1_accuracy) >= accuracy_loss:
# print 'Tuning... '
# print abs(current_top1_accuracy - base_top1_accuracy)
# remove_top_quantized_parameter(prototxt)
# current_top1_accuracy = get_the_accuracy(caffe_bin, prototxt, model_weights, iterations, detection, blob_name)
def accuracy_blob_name_parser(prototxt):
net = read_prototxt(prototxt)
if not net:
print 'Please check the model prototxt integrity.'
sys.exit(-1)
res = {}
for i in net.layer:
if i.type == 'Accuracy':
if i.HasField('accuracy_param'):
res[i.accuracy_param.top_k] = i.top[0]
else:
res[1] = i.top[0]
return res[sorted(res.keys())[0]] if res else ''
def check_blob_name_existence(prototxt, blob_name):
net = read_prototxt(prototxt)
if not net.layer:
print 'Please check the model prototxt integrity.'
sys.exit(-1)
for i in net.layer[::-1]:
for _, value in enumerate(i.top):
if value == blob_name:
return True
return False
def generate_dummy_model(model_path, dummy):
net = caffe_pb2.NetParameter()
with open(model_path) as f:
s = f.read()
txtf.Merge(s, net)
first_conv = True
convolution_layers = [(value, index) for index, value in enumerate(net.layer) if value.type == 'Convolution']
for (l, index) in convolution_layers:
if first_conv:
first_conv = False
continue
net.layer[index].quantization_param.bw_layer_in = 8
net.layer[index].quantization_param.bw_layer_out = 8
net.layer[index].quantization_param.bw_params = 8
net.layer[index].quantization_param.scale_in[:] = [1.0]
net.layer[index].quantization_param.scale_out[:] = [1.0]
net.layer[index].quantization_param.scale_params[:] = [1.0]
with open(dummy, 'w') as f:
f.write(str(net))
def force_fp32_opt(quantized_prototxt):
net = caffe_pb2.NetParameter()
with open(quantized_prototxt) as f:
s = f.read()
txtf.Merge(s, net)
base_net = caffe_pb2.NetParameter()
compiled_net_str = caffe.compile_net(quantized_prototxt, caffe.TEST, "MKLDNN")
txtf.Merge(compiled_net_str, base_net)
new_net = copy.deepcopy(net)
quantize_layers_indexes = [index for index, value in enumerate(base_net.layer) if value.type in quantize_layers]
layer_infos = [(value, index) for index, value in enumerate(new_net.layer)]
layer_bottom_name_map={}
for index,layer in enumerate(base_net.layer):
for bottom in layer.bottom:
if bottom not in layer_bottom_name_map.keys():
layer_bottom_name_map[bottom]=[index]
else:
layer_bottom_name_map[bottom].append(index)
for index in quantize_layers_indexes:
if int(base_net.layer[index].quantization_param.bw_layer_out) != 32:
force_fp32 = True
if base_net.layer[index].top[0] in layer_bottom_name_map.keys():
bottom_layer_indexes=layer_bottom_name_map[base_net.layer[index].top[0]]
for bottom_layer_index in bottom_layer_indexes:
next_layer = base_net.layer[bottom_layer_index]
if next_layer.top == next_layer.bottom and next_layer.type not in int8_layers:
force_fp32 = True
break
if next_layer.type in int8_layers:
force_fp32 = False
if force_fp32 or index == np.max(quantize_layers_indexes):
new_net_index=find_index_by_name(base_net.layer[index].name, layer_infos)
new_net.layer[new_net_index].quantization_param.scale_out[:]=[1.0]
new_net.layer[new_net_index].quantization_param.bw_layer_out=32
print(new_net.layer[new_net_index].name)
with open(quantized_prototxt, 'w') as f:
f.write(str(new_net))
print('force_fp32 done')
def find_next_layers(net, index):
layer = net.layer[index]
next_layers = []
for i in range(index + 1, len(net.layer)):
for top in layer.top:
if top in net.layer[i].bottom:
next_layers.append(i)
return next_layers
def find_previous_layers(net, index):
layer = net.layer[index]
previous_layers = []
for i in range(index):
for bottom in layer.bottom:
if bottom in net.layer[i].top:
previous_layers.append(i)
return previous_layers
def find_down_quantize(net, index):
next_layers = find_next_layers(net, index)
if len(next_layers) == 0:
return None
elif len(next_layers) == 1:
next_layer = net.layer[next_layers[0]]
if next_layer.type in quantize_layers and int(next_layer.quantization_param.bw_layer_in) == 8:
return next_layers[0]
elif next_layer.type in memory_layers:
return find_down_quantize(net, next_layers[0])
else:
return None
else: # just consider one branch
return None
def find_up_quantize(net, index):
previous_layers = find_previous_layers(net, index)
has_concat = False
if len(previous_layers) == 0:
return None, has_concat
elif len(previous_layers) == 1:
previous_layer = net.layer[previous_layers[0]]
if previous_layer.type in quantize_layers and int(previous_layer.quantization_param.bw_layer_out) == 8:
return previous_layers[0], has_concat
elif previous_layer.type in memory_layers:
return find_up_quantize(net, previous_layers[0])
else:
return None, has_concat
else:
if net.layer[index].type == 'Concat':
has_concat = True
concat_output_layers = find_next_layers(net, index)
for concat_output in concat_output_layers:
if net.layer[concat_output].type in quantize_layers and int(net.layer[concat_output].quantization_param.bw_layer_in) == 8:
return concat_output, has_concat
return None, has_concat
else:
return None, has_concat
def cac_opt(quantized_prototxt):
net = caffe_pb2.NetParameter()
with open(quantized_prototxt) as f:
s = f.read()
txtf.Merge(s,net)
base_net = caffe_pb2.NetParameter()
compiled_net_str = caffe.compile_net(quantized_prototxt, caffe.TEST, "MKLDNN")
txtf.Merge(compiled_net_str, base_net)
new_net = copy.deepcopy(net)
avg_pool_layers = [index for index, value in enumerate(base_net.layer) if value.type == 'Pooling' and value.pooling_param.pool == 1] #max=0 ave=1
layer_infos = [(value, index) for index, value in enumerate(new_net.layer)]
for index in avg_pool_layers:
up_quantize, has_concat = find_up_quantize(base_net, index)
down_quantize = find_down_quantize(base_net, index)
if up_quantize and down_quantize:
new_net_up_index = find_index_by_name(base_net.layer[up_quantize].name, layer_infos)
new_net_down_index = find_index_by_name(base_net.layer[down_quantize].name, layer_infos)
if not has_concat:
new_net.layer[new_net_down_index].quantization_param.scale_in[:]=new_net.layer[new_net_up_index].quantization_param.scale_out[:]
else:
new_net.layer[new_net_down_index].quantization_param.scale_in[:]=new_net.layer[new_net_up_index].quantization_param.scale_in[:]
print([new_net.layer[new_net_up_index].name,new_net.layer[new_net_down_index].name])
with open(quantized_prototxt, 'w') as f:
f.write(str(new_net))
print('cac opt done')
def enable_fc_int8():
local_q = quantize_layers + ["InnerProduct"]
local_i = int8_layers + ["InnerProduct"]
global quantize_layers
global int8_layers
quantize_layers = local_q
int8_layers = local_i
if __name__ == '__main__':
usage_string = 'Usage: 1.Build the caffe\n ' \
'2.cd /path/to/caffe/scripts\n ' \
'3.python calibrator.py ' \
' -r /path/to/caffe/build ' \
' -w pre-trained-fp32 weights ' \
' -m typology ' \
' -i iterations ' \
' -l acceptable accuracy loss value, the default value is 0.01 stands for one percent' \
' -d 1(0 means classification while 1 means detection, the default value is 0' \
' -n blob name which means accuracy' \
' -c scaling mode, the default value is single' \
' -s sampling iterations'
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', '--help', action='help', help=usage_string)
parser.add_argument('-i', '--iterations', action='store', dest='iterations', default=10,
help='equal to the number to complete one epoch.')
parser.add_argument('-w', '--weights', action='store', dest='weights', default='',
help='pre-trained-fp32-weights.')
parser.add_argument('-m', '--model', action='store', dest='model', default='',
help='topology definition prototxt.')
parser.add_argument('-l', '--accuracy_loss', action='store', dest='loss', default=0.01,
help='the acceptable accuracy loss that raised by 8-Bit quantization, '
'default value is 0.01(one percent).')
parser.add_argument('-d', '--detection', action='store', dest='is_detection', default=0,
help='0 for classification while 1 for detection, default value is 0.')
parser.add_argument('-r', '--root', action='store', dest='root', default='',
help='caffe build path')
parser.add_argument('-n', '--blob_name', action='store', dest='blob_name', default='',
help='top blob name which stands for accuracy')
parser.add_argument('-c', '--weights_channel', action='store', dest='scaling_mode', default='single',
help='the scaling mode for weights')
parser.add_argument('-s', '--sampling_iterations', action='store', dest='sampling_iterations', default=10,
help='iteration number of sampling, the default value is 10.')
parser.add_argument('-ss', '--sampling_single', action='store_true', dest='sampling_single', default=False,
help='sampling single batch')
parser.add_argument('-p', '--performance_model', dest='performance_model', action="store_true", default=False,
help='to generate model to measure performance only')
parser.add_argument('-q', '--quantize_model', dest='quantize_model', action="store_true", default=False,
help='to quantize the model only')
parser.add_argument('-u', '--unsigned_range', dest='unsigned_range', action="store_true", default=False,
help='to quantize using unsigned range for activation')
parser.add_argument('-t', '--concat_use_fp32', dest='concat_use_fp32', action="store_true", default=False,
help='to use fp32 for concat')
parser.add_argument('-f', '--unify_concat_scales', dest='unify_concat_scales', action="store_true", default=False,
help='to unify concat scales')
parser.add_argument('-a', '--calibration_algos', dest='calibration_algos', action='store', default="DIRECT",
help='to choose the calibration alogorithm')
parser.add_argument('-wi', '--conv_algo', dest='conv_algo', action="store_true", default=False,
help='to choose the convolution algorithm')
parser.add_argument('-1st', '--enable_1st_conv', dest='enable_1st_conv', action="store_true", default=False,
help='to enable 1st conv quantization')
parser.add_argument('-uff', '--disable_force_fp32', dest='disable_force_fp32', action="store_true", default=False,
help='to disable force fp32 output in conv/fc + fp32')
parser.add_argument('-ucac', '--disable_cac_unify', dest='disable_cac_unify', action="store_true", default=False,
help='to disable scale unify in conv/fc + avg pooling + conv/fc')
parser.add_argument('-fc', '--fc_int8', dest='fc_int8', action="store_true", default=False,
help='to enable int8 fc in quantized model')
parser.add_argument('-fu', '--first_conv_force_u8', dest='first_conv_force_u8', action="store_true", default=False,
help='to enable 1st conv force u8 input')
parser.add_argument('-clx', '--is_clx', dest='is_clx', action="store_true", default=False,
help='just work with -fu, means test machine is a clx')
params = parser.parse_args()
if not check_existence(params.root):
print 'Please check the {} existence.'.format(params.root)
sys.exit(-1)
pycaffe_path = os.path.abspath(os.path.dirname(os.path.abspath(params.root))) + os.path.sep + 'python'
if not check_existence(pycaffe_path):
print "Please check the pycaffe existence.Suggest to rebuild pycaffe via 'make pycaffe'"
sys.path.insert(0, pycaffe_path)
import caffe
from caffe.proto import caffe_pb2
model = os.path.abspath(params.model)
if not check_existence(model):
print 'Please check model: {} existence.'.format(model)
sys.exit(-1)
dummy_prototxt = model.rsplit('.')[0] + '_dummy.prototxt'
if params.performance_model:
generate_dummy_model(model, dummy_prototxt)
print 'Updated prototxt {} is generated.'.format(dummy_prototxt)
sys.exit(0)
try:
user_input_iterations = int(params.iterations)
except:
print 'Set the iterations to the default value 1000'
user_input_iterations = 1000
else:
if user_input_iterations < 1:
print 'Invalid iterations!The value should be larger than zero.'
sys.exit(-1)
try:
user_sampling_iteration = int(params.sampling_iterations)
except:
print 'Set the sampling iteration to the default value 10'
user_sampling_iteration = 10
else:
if user_sampling_iteration < 1:
print 'Invalid sampling iteration!The value should be larger than zero.'
sys.exit(-1)
if params.scaling_mode != 'multiple' and params.scaling_mode != 'single':
user_scaling_mode = 'single'
else:
user_scaling_mode = params.scaling_mode
if params.calibration_algos != 'DIRECT' and params.calibration_algos != "KL" and params.calibration_algos != "MAXP" and params.calibration_algos != "MA":
user_calibration_algos = 'DIRECT'
else:
user_calibration_algos = params.calibration_algos
if params.conv_algo != False and params.conv_algo != True:
user_conv_algo = False
else:
user_conv_algo = params.conv_algo
if params.fc_int8:
enable_fc_int8()
try:
toleration = float(params.loss)
if toleration >= 1 or toleration < 0:
toleration = 0.01
except:
print 'Set the toleration to 1%.'
toleration = 0.01
try:
detection_flag = 1 if int(params.is_detection) == 1 else 0
except:
print 'Set the test type to classification.'
detection_flag = 0
user_enable_1st_conv = params.enable_1st_conv
user_input_weights = os.path.abspath(params.weights)
if not check_existence(user_input_weights):
print 'Please check weights: {} existence.'.format(user_input_weights)
sys.exit(-1)
sample = os.path.abspath(params.root + os.path.sep + 'tools/sample')
if not check_existence(sample):
print 'Please check sample: {} existence.'.format(sample)
sys.exit(-1)
caffe_bin_path = os.path.abspath(params.root + os.path.sep + 'tools/caffe')
if not check_existence(caffe_bin_path):
print 'Please check model/weights/sample existence.'
sys.exit(-1)
setup_env()
target_blob_name = params.blob_name
if not params.quantize_model:
if not target_blob_name and not detection_flag:
target_blob_name = accuracy_blob_name_parser(model)
if not target_blob_name or not check_blob_name_existence(model, target_blob_name):
print 'Please specify valid blob name and rerun the script.'
sys.exit(-1)
quantized_prototxt = model.rsplit('.')[0] + '_quantized.prototxt'
print 'Sampling...'
(top_blobs_map, bottom_blobs_map) = generate_sample(model, user_input_weights, quantized_prototxt, user_scaling_mode, user_calibration_algos, user_conv_algo, user_sampling_iteration, user_enable_1st_conv, params.sampling_single)
print 'Sampling done'
top_1 = None
if not params.quantize_model:
print 'Generating the FP32 accuracy...'
top_1 = get_the_accuracy(caffe_bin_path, model, user_input_weights, user_input_iterations, detection_flag,
target_blob_name)
print 'FP32 accuracy is: {}'.format(top_1)
tuning_quantized_topology(top_1, quantized_prototxt, caffe_bin_path, user_input_weights, top_blobs_map, bottom_blobs_map, user_input_iterations,
toleration, detection_flag, target_blob_name, params.quantize_model, params.unsigned_range,
params.concat_use_fp32, params.unify_concat_scales, params.conv_algo, user_enable_1st_conv)
if not params.disable_force_fp32:
force_fp32_opt(quantized_prototxt)
if not params.disable_cac_unify:
cac_opt(quantized_prototxt)
print 'Updated prototxt {} is generated.'.format(quantized_prototxt)
if params.first_conv_force_u8:
fu.first_conv_u8_input(quantized_prototxt, user_input_weights, params.is_clx)
|
Python
|
CL
|
d090e0719ac60a6f0f6660726be18482658548a2b7540e4963d77e867b805865
|
"""Contains most of the functions for the CLI. Put here for testing purposes."""
import click
from PyInquirer import prompt
import PyInquirer
import jsonschema
from ddl.assetpack import AssetpackFactory
from ddl.validator import Validator
from ddl.renderer import Renderer
import ddl.asset_exploration
from ddl.asset_exploration import explore_assets, show_pack_info, show_projection_info
import ddl.image_helper
from ddl.asset import ComponentAsset
from ddl.cli_utils import *
import os
import tkinter as tk
from PIL import Image, ImageTk
@click.group()
def main():
"""A CLI tool for validating and examining assetpacks, and in the future
designing components, tweaking assetpacks and generally everything."""
pass
@main.command()
@click.argument('path')
def validate_assetpack(path):
"""Validates an assetpack and errors if anything is wrong.
path: The path of the asset pack directory.
"""
pack = False
images = False
components = False
error_header = '\n\n########ERROR########'
abs_path = os.path.abspath(path)
try:
Validator.validate_file(abs_path + '/pack.json', 'pack')
pack = True
print("Pack validated")
except FileNotFoundError:
print(error_header)
print(path + '/pack.json was not found.')
except jsonschema.exceptions.ValidationError as val:
print(error_header)
print(val.message.split('/n')[0])
except Exception:
raise
try:
Validator.validate_file(abs_path + '/images.json', 'images')
images = True
print("Images validated")
except FileNotFoundError:
print(error_header)
print(path + '/images.json was not found.')
except jsonschema.exceptions.ValidationError as val:
print(error_header)
print(val.message.split('/n')[0])
except Exception:
raise
try:
filepath = abs_path + '/components.json'
Validator.validate_file(filepath, 'components')
components = True
print("Components validated")
except FileNotFoundError:
print(error_header)
print(path + '/components.json was not found.')
except jsonschema.exceptions.ValidationError as val:
print(error_header)
print(val.message.split('/n')[0])
except Exception:
raise
if pack and images and components:
print("Validation passed. "+path+" is a good assetpack.")
@main.command()
@click.argument('path')
def explore_assetpack(path):
"""
Lets a user interactively show things in an assetpack.
path: The path of the asset pack directory.
"""
path = os.path.abspath(path)
assetpack = AssetpackFactory.load(path)
exit_cli = False
while not exit_cli:
init = [{
'type': 'list',
'message': 'What would you like to do?',
'name': 'choices',
'choices': [
'See pack information',
'See projection information',
'Explore Assets',
'Quit'
]
}]
choice = prompt(init, style=STYLE)
print("")
option_chosen = choice['choices']
if option_chosen == 'Quit':
exit_cli = True
elif option_chosen == 'See pack information':
show_pack_info(path)
elif option_chosen == 'See projection information':
show_projection_info(assetpack)
elif option_chosen == 'Explore Assets':
explore_assets(assetpack)
print("")
def validate_component_id(new_id, assetpack):
"""Validates a component ID against the IDS in an assetpack"""
full_id = assetpack.pack_id + '.' + new_id
if len(new_id) < 3:
message = 'Try an ID with more than 2 characters.'
raise PyInquirer.ValidationError(message=message)
if full_id in assetpack.components.keys():
message = 'This component name already exists in the assetpack.'
raise PyInquirer.ValidationError(message=message)
return True
def add_component(initial_option, component, assetpack):
"""Lets a user choose what they want to see about an asset"""
asset_type, asset_key = initial_option.split(': ')
coordinates_questions = [{
'type': 'input',
'message': 'Where is this in the x dimension?',
'name': 'x',
'validate': check_number
},
{
'type': 'input',
'message': 'Where is this in the y dimension?',
'name': 'y',
'validate': check_number
}
]
coordinates = prompt(coordinates_questions, style=STYLE)
component_x = float(coordinates['x'])
component_y = float(coordinates['y'])
if asset_type == 'Image':
asset = assetpack.images[asset_key]
component.add_image(asset,
component_x,
component_y)
else:
asset = assetpack.components[asset_key]
component.add_component(asset,
component_x,
component_y)
def init_component(assetpack, info):
"""Initialise a blank component"""
component_name = info['component_name']
component_id = info['component_id']
component_tags = list(map(str.strip, info['component_tags'].split(',')))
component_parts = []
data = {
"name": component_name,
"id": component_id,
"parts": component_parts,
"tags": component_tags
}
return ComponentAsset(data, assetpack)
def choose_asset(component, asset_choices, assetpack):
"""The options for the inside of the Add component loop"""
explore = [{
'type': 'list',
'message': 'Which asset would you like to add?',
'name': 'explore',
'choices': asset_choices
}]
choice = prompt(explore, style=STYLE)
print("")
option_chosen = choice['explore']
if not option_chosen == 'Back':
add_component(option_chosen, component, assetpack)
def reset_component_window(component, assetpack, root, old_canvas):
"""clears and redraws the component window"""
component.instantiate_sub_parts()
image_location_list = component.get_image_location_list(0, 0)
renderer = Renderer(image_pixel_list=assetpack.projection
.get_image_pixel_list(0, 0, image_location_list))
orig_image = renderer.output('variable')
image = get_rgb_image(orig_image)
canvas = tk.Canvas(width=orig_image.width, height=orig_image.height, bg='white')
canvas.create_image(0, 0, image=image, anchor=tk.NW)
canvas.pack()
canvas.image = image
if old_canvas is not None:
old_canvas.destroy()
old_canvas = canvas
root.update_idletasks()
root.update()
return old_canvas
def get_initial_component_info(assetpack):
"""Gets new component info from user"""
component_info = [{
'type': 'input',
'message': 'What would you like to call this component?',
'name': 'component_name'
},
{
'type': 'input',
'message': 'What ID would you like to give this component?',
'name': 'component_id',
'validate': lambda new_id: validate_component_id(new_id, assetpack)
},
{
'type': 'input',
'message': 'What tags should the component have?',
'name': 'component_tags'
}
]
return prompt(component_info, style=STYLE)
@main.command()
@click.argument('path')
def create_new_component(path):
"""
Lets a user interactively build a new component from an assetpack.
path: The path of the asset pack directory.
"""
path = os.path.abspath(path)
assetpack = AssetpackFactory.load(path)
info = get_initial_component_info(assetpack)
root = tk.Tk()
root.title(info['component_id'])
asset_choices = get_asset_choices(assetpack)
component = init_component(assetpack, info)
old_canvas = None
choice = ''
while not choice == 'Done':
choices = [{
'type': 'list',
'message': 'What would you like to do?',
'name': 'choice',
'choices': ['Add an asset', 'Done', 'Undo']
}]
choice = prompt(choices, style=STYLE)['choice']
if choice == 'Add an asset':
choose_asset(component, asset_choices, assetpack)
elif choice == 'Undo':
component.remove_last_part()
old_canvas = reset_component_window(component, assetpack, root, old_canvas)
print("")
component.reset_sub_parts()
print(component.get_json())
@main.command()
@click.argument('path')
@click.option('--gridtype', type=click.Choice(['isometric', 'topdown']), prompt=True)
@click.option('--width', prompt=True)
@click.option('--height', prompt=True)
def create_new_images(path, gridtype, width, height):
"""Iterates through all .png images in a directory and lets you set the information for them."""
check_integer(width)
check_integer(height)
ddl.image_helper.show_directory(path, gridtype, int(height), int(width))
|
Python
|
CL
|
720cc7b0c7054981a43668ae7f0a9c0925862fbfe2eca45137f26777ec69222f
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import logging
import os
import shutil
from functools import cmp_to_key
import paddle.fluid as fluid
logging.basicConfig(
level=logging.INFO,
format='[%(levelname)s %(asctime)s line:%(lineno)d] %(message)s',
datefmt='%d %b %Y %H:%M:%S')
logger = logging.getLogger()
class ParameterConverter(object):
"""
Tool to convert pre-trained distributed fc parameters for inference or
fine-tuning. Note that the number of ranks or GPUs for inference or
fine-tuning can be different from that for pre-training.
"""
def __init__(self, model_dir, output_dir, num_trainers):
super(ParameterConverter, self).__init__()
self.model_dir = model_dir
self.output_dir = output_dir
self.pretrain_nranks = -1
self.emb_dim = -1
self.num_classes = -1
self.nranks = num_trainers
self.load_config()
def load_config(self):
"""
Load config file which contains the following information for
pre-training:
1. pretrain_nranks (int): number of ranks for pre-training;
2. emb_dim (int): embedding dim for pre-training;
3. num_classes (int): number of classes for classification.
"""
meta_file = os.path.join(self.model_dir, 'meta.json')
if not os.path.exists(meta_file):
logger.error("Meta file does not exist, make sure your pre-trained "
"models are legal.")
exit()
with open(meta_file, 'r') as handle:
config = json.load(handle)
self.pretrain_nranks = config['pretrain_nranks']
assert self.pretrain_nranks > 0
self.emb_dim = config['emb_dim']
assert self.emb_dim > 0
self.num_classes = config['num_classes']
assert self.num_classes > 0
logger.info("Parameters for pre-training: pretrain_nranks ({}), "
"emb_dim ({}), and num_classes ({}).".format(
self.pretrain_nranks,
self.emb_dim,
self.num_classes))
logger.debug("Parameters for inference or fine-tuning: "
"nranks ({}).".format(self.nranks))
def find_var_names(self):
"""
Find all names of pre-trained parameters for the distributed fc layer,
e.g., dist@softmax@rank@00000.w_0, dist@softmax@rank@00000.b_0 etc.
We assume that names of distributed fc related parameters start with the
prefix dist@ and have @rank@ in their names.
"""
var_names = []
model_dir = os.path.abspath(self.model_dir)
if not os.path.exists(model_dir):
logger.error("The directory for pre-trained model ({}) does not "
"exist, please check it.".format(model_dir))
exit()
logger.info("The directory for pre-trained model: {}".format(model_dir))
for file in os.listdir(model_dir):
if 'dist@' in file and '@rank@' in file:
var_names.append(file)
assert len(var_names) > 0, \
logger.error("No distributed fc parameters found.")
logger.info("Number of distributed fc parameters: {}.".format(
len(var_names)))
logger.info("Distributed fc parameters: {}.".format(var_names))
return var_names
def split_load_and_save(self,
name_index,
param_names,
save_rank_id,
remainder,
as_bias,
train_nshards,
train_nranks,
nshards,
dtype="float32"):
var2 = None
advance = False
emb_dim = self.emb_dim
main_program = fluid.Program()
startup_program = fluid.Program()
num_classes = self.num_classes
load_var_name = param_names[name_index]
save_var_name_list = load_var_name.split('.')
save_var_name_list[0] = save_var_name_list[0].split('@')
save_var_name_list[0][-1] = "%05d" % save_rank_id
save_var_name_list[0] = '@'.join(save_var_name_list[0])
save_var_name = '.'.join(save_var_name_list)
last_train_nshards = num_classes - (train_nranks - 1) * train_nshards
with fluid.program_guard(main_program, startup_program):
if name_index == train_nranks - 1:
var_dim = last_train_nshards
else:
var_dim = train_nshards
shape = [var_dim] if as_bias else [emb_dim, var_dim]
var = fluid.layers.create_parameter(shape,
dtype=dtype,
name=load_var_name)
if as_bias:
var = fluid.layers.slice(var,
axes=[0],
starts=[var.shape[0] - remainder],
ends=[var.shape[0]])
else:
var = fluid.layers.split(var,
[var.shape[1] - remainder,
remainder],
dim=1)[1]
save_var_dim = nshards
if remainder < nshards:
if name_index == train_nranks - 1:
save_var_dim = remainder
else:
name_index += 1
advance = True
load_var_name = param_names[name_index]
if name_index == train_nranks - 1:
var_dim = last_train_nshards
else:
var_dim = train_nshards
shape = [var_dim] if as_bias else [emb_dim, var_dim]
var2 = fluid.layers.create_parameter(shape,
dtype=dtype,
name=load_var_name)
if remainder + var_dim < nshards:
# The last train rank
save_var_dim = remainder + var_dim
else:
remainder = remainder + var_dim - nshards
elif remainder == nshards:
if name_index == train_nranks - 2:
remainder = last_train_nshards
advance = True
elif name_index < train_nranks - 2:
remainder = train_nshards
advance = True
else:
remainder = remainder - nshards
if var2 is not None:
var = fluid.layers.concat([var, var2], axis=0 if as_bias else 1)
shape = [save_var_dim] if as_bias else [emb_dim, save_var_dim]
to_save_var = fluid.layers.create_parameter(
shape,
dtype=dtype,
name=save_var_name + '_temp')
if save_var_dim != nshards: # get last dim
if as_bias:
temp_var = fluid.layers.slice(
var,
axes=[0],
starts=[var.shape[0] - save_var_dim],
ends=[var.shape[0]])
else:
temp_var = fluid.layers.split(
var,
[var.shape[1] - save_var_dim, save_var_dim],
dim=1)[1]
fluid.layers.assign(temp_var, to_save_var)
else:
if as_bias:
temp_var = fluid.layers.slice(var,
axes=[0],
starts=[0],
ends=[nshards])
else:
temp_var = fluid.layers.split(
var,
[nshards, var.shape[1] - nshards],
dim=1)[0]
fluid.layers.assign(temp_var, to_save_var)
def expected_var(var):
has_var = os.path.exists(os.path.join(self.model_dir, var.name))
if has_var:
return True
return False
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
fluid.io.load_vars(exe,
dirname=self.model_dir,
predicate=expected_var,
main_program=main_program)
exe.run(main_program)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
fluid.io.save_vars(exe,
self.output_dir,
vars=[to_save_var],
main_program=main_program)
srcfile = os.path.join(self.output_dir, to_save_var.name)
dstfile = os.path.join(self.output_dir, save_var_name)
shutil.move(srcfile, dstfile)
return remainder, advance
def split_parameters(self, param_names, as_bias):
"""
Split parameters whose names are in param_names.
Params:
param_names: list of names of parameters to split
as_bias: whether parameters to split are as bias or not
"""
num_classes = self.num_classes
train_nranks = self.pretrain_nranks
nranks = self.nranks
train_nshards = (num_classes + train_nranks - 1) // train_nranks
nshards = (num_classes + nranks - 1) // nranks
save_rank_id = 0
# remainder dim that is not split in a var
remainder_var_dim = train_nshards
name_index = 0 # index of name of pre-trained parameter to process
for save_rank_id in range(nranks):
assert name_index < train_nranks
remainder_var_dim, advance = self.split_load_and_save(
name_index,
param_names,
save_rank_id,
remainder_var_dim,
as_bias,
train_nshards,
train_nranks,
nshards)
name_index += 1 if advance else 0
processed_var_count = name_index + 1
assert processed_var_count == train_nranks, \
logger.error("Number of pre-trained parameters processed ({}) is "
"not equal to the number of ranks ({}) for "
"pre-training.".format(processed_var_count,
train_nranks))
assert save_rank_id == nranks - 1, \
logger.error("Number of saved parameters ({}) is not equal to the "
"number of ranks ({}) for inference or "
"fine-tuning.".format(save_rank_id + 1, nranks))
def split_distfc_parameters(self,
weight_param_names,
weight_velocity_param_names,
bias_param_names,
bias_velocity_param_names):
"""
Split each distributed fc-related parameter according to number of ranks
for inference or fine-tuning.
Params:
weight_param_names: list of names of weight parameters
bias_param_names: list of names of bias parameters
"""
self.split_parameters(weight_param_names, as_bias=False)
self.split_parameters(weight_velocity_param_names, as_bias=False)
if len(bias_param_names) != 0:
self.split_parameters(bias_param_names, as_bias=True)
self.split_parameters(bias_velocity_param_names, as_bias=True)
def concat_load_and_save(self,
name_index,
param_names,
save_rank_id,
remainder,
as_bias,
train_nshards,
train_nranks,
nshards,
dtype="float32"):
advance = 0
emb_dim = self.emb_dim
main_program = fluid.Program()
startup_program = fluid.Program()
num_classes = self.num_classes
load_var_name = param_names[name_index]
save_var_name_list = load_var_name.split('.')
save_var_name_list[0] = save_var_name_list[0].split('@')
save_var_name_list[0][-1] = "%05d" % save_rank_id
save_var_name_list[0] = '@'.join(save_var_name_list[0])
save_var_name = '.'.join(save_var_name_list)
last_train_nshards = num_classes - (train_nranks - 1) * train_nshards
with fluid.program_guard(main_program, startup_program):
if name_index == train_nranks - 1:
var_dim = last_train_nshards
else:
var_dim = train_nshards
shape = [var_dim] if as_bias else [emb_dim, var_dim]
var = fluid.layers.create_parameter(shape,
dtype=dtype,
name=load_var_name)
if as_bias:
var = fluid.layers.slice(var,
axes=[0],
starts=[var.shape[0] - remainder],
ends=[var.shape[0]])
else:
var = fluid.layers.split(var,
[var.shape[1] - remainder,
remainder],
dim=1)[1]
to_concat_var_list = [var]
while remainder < nshards and name_index < train_nranks - 1:
name_index += 1
advance += 1
load_var_name = param_names[name_index]
if name_index == train_nranks - 1:
var_dim = last_train_nshards
else:
var_dim = train_nshards
shape = [var_dim] if as_bias else [emb_dim, var_dim]
var = fluid.layers.create_parameter(shape,
dtype=dtype,
name=load_var_name)
to_concat_var_list.append(var)
remainder += var_dim
if len(to_concat_var_list) > 1:
var = fluid.layers.concat(to_concat_var_list,
axis=0 if as_bias else 1)
save_var_dim = nshards
if remainder > nshards:
if as_bias:
var = fluid.layers.slice(var,
axes=[0],
starts=[0],
ends=[nshards])
else:
var = fluid.layers.split(
var,
[nshards, var.shape[1] - nshards],
dim=1)[0]
remainder = remainder - nshards
elif remainder == nshards:
if name_index == train_nranks - 2:
# advance += 1 if len(to_concat_var_list) > 1 else 0
# to avoid duplicate add
# name_index += 1 if len(to_concat_var_list) > 1 else 0
# to avoid duplicate add
advance += 1
name_index += 1
remainder = last_train_nshards
elif name_index < train_nranks - 2:
# advance += 1 if len(to_concat_var_list) > 1 else 0
# to avoid duplicate add
# name_index += 1 if len(to_concat_var_list) > 1 else 0
# to avoid duplicate add
advance += 1
name_index += 1
remainder = train_nshards
else:
save_var_dim = remainder
shape = [save_var_dim] if as_bias else [emb_dim, save_var_dim]
to_save_var = fluid.layers.create_parameter(
shape,
dtype=dtype,
name=save_var_name + '_temp')
fluid.layers.assign(var, to_save_var)
def expected_var(var):
has_var = os.path.exists(os.path.join(self.model_dir, var.name))
if has_var:
return True
return False
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
fluid.io.load_vars(exe,
dirname=self.model_dir,
predicate=expected_var,
main_program=main_program)
exe.run(main_program)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
fluid.io.save_vars(exe,
self.output_dir,
vars=[to_save_var],
main_program=main_program)
srcfile = os.path.join(self.output_dir, to_save_var.name)
dstfile = os.path.join(self.output_dir, save_var_name)
shutil.move(srcfile, dstfile)
return remainder, advance
def concat_parameters(self, param_names, as_bias):
"""
Concat parameters whose names are in param_names.
Params:
param_names: list of names of parameters to concat
as_bias: whether parameters to split are as bias or not
"""
num_classes = self.num_classes
train_nranks = self.pretrain_nranks
nranks = self.nranks
train_nshards = (num_classes + train_nranks - 1) // train_nranks
nshards = (num_classes + nranks - 1) // nranks
save_rank_id = 0
remainder_dim = train_nshards # remainder dim that is not concated
name_index = 0 # index of name of pre-trained parameter to process
for save_rank_id in range(nranks):
assert name_index < train_nranks
remainder_dim, advance = self.concat_load_and_save(name_index,
param_names,
save_rank_id,
remainder_dim,
as_bias,
train_nshards,
train_nranks,
nshards)
name_index += advance
processed_var_count = name_index + 1
assert processed_var_count == train_nranks, \
logger.error("Number of pre-trained parameters processed ({}) is "
"not equal to the number of ranks ({}) for "
"pre-training.".format(processed_var_count,
train_nranks))
assert save_rank_id == nranks - 1, \
logger.error("Number of saved parameters ({}) is not equal to the "
"number of ranks ({}) for inference or "
"fine-tuning.".format(save_rank_id + 1, nranks))
def concat_distfc_parameters(self,
weight_param_names,
weight_velocity_param_names,
bias_param_names,
bias_velocity_param_names):
"""
Concat distributed fc-related parameters according to number of ranks
for inference or finetuning.
Params:
weight_param_names: list of names of weight parameters
weight_velocity_param_names: list of names of weight velocity
parameters
bias_param_names: list of names of bias parameters
bias_velocity_param_names: list of names of bias velocity parameters
"""
self.concat_parameters(weight_param_names, as_bias=False)
self.concat_parameters(weight_velocity_param_names, as_bias=False)
if len(bias_param_names) != 0:
self.concat_parameters(bias_param_names, as_bias=True)
self.concat_parameters(bias_velocity_param_names, as_bias=True)
def process(self):
self.load_config()
var_names = self.find_var_names()
weight_param_names = [name for name in var_names
if '.w' in name and 'velocity' not in name]
weight_velocity_param_names = [name for name in var_names
if '.w' in name and 'velocity' in name]
bias_param_names = [name for name in var_names
if '.b' in name and 'velocity' not in name]
bias_velocity_param_names = [name for name in var_names
if '.b' in name and 'velocity' in name]
def parameter_name_compare(x, y):
"""
Compare two parameter names depend on their rank id.
A parameter name is like dist_softmax_rank_00000.w_0,
where 00000 is the rank id.
"""
rank_id_x = int(x.split('.')[0].split('@')[-1])
rank_id_y = int(y.split('.')[0].split('@')[-1])
if rank_id_x < rank_id_y:
return -1
elif rank_id_x == rank_id_y:
return 0
else:
return 1
weight_param_names.sort(key=cmp_to_key(parameter_name_compare))
weight_velocity_param_names.sort(
key=cmp_to_key(parameter_name_compare))
bias_param_names.sort(key=cmp_to_key(parameter_name_compare))
bias_velocity_param_names.sort(key=cmp_to_key(parameter_name_compare))
assert len(weight_param_names) == self.pretrain_nranks, \
logger.error(
"Number of distributed fc-related weight parameters ({}) "
"should be equal to the number of ranks ({}) for "
"pre-training.".format(len(weight_param_names),
self.pretrain_nranks))
assert len(weight_velocity_param_names) == self.pretrain_nranks, \
logger.error(
"Number of distributed fc-related weight parameters ({}) "
"should be equal to the number of ranks ({}) for "
"pre-training.".format(len(weight_velocity_param_names),
self.pretrain_nranks))
assert (len(bias_param_names) == 0 or
len(bias_param_names) == self.pretrain_nranks), \
logger.error(
"Number of distributed fc-related bias parameters ({}) "
"should be 0 or equal to the number of ranks ({}) for "
"pre-training.".format(len(bias_param_names),
self.pretrain_nranks))
assert (len(bias_velocity_param_names) == 0 or
len(bias_velocity_param_names) == self.pretrain_nranks), \
logger.error(
"Number of distributed fc-related bias parameters ({}) "
"should be 0 or equal to the number of ranks ({}) for "
"pre-training.".format(len(bias_velocity_param_names),
self.pretrain_nranks))
pretrain_nranks = self.pretrain_nranks
nranks = self.nranks
if pretrain_nranks == nranks:
logger.info(
"Pre-training and inference (or fine-tuning) have the same "
"number of ranks, nothing to do.")
elif pretrain_nranks < nranks:
self.split_distfc_parameters(weight_param_names,
weight_velocity_param_names,
bias_param_names,
bias_velocity_param_names)
else:
self.concat_distfc_parameters(weight_param_names,
weight_velocity_param_names,
bias_param_names,
bias_velocity_param_names)
logger.info("Done.")
if __name__ == "__main__":
converter = ParameterConverter('./trained_model',
"./trained_model_temp",
8)
converter.process()
|
Python
|
CL
|
708d6563232197f3d1a3a751f6230518dfd7b1b3561038453b40e7165387ebe7
|
"""Testing the functions inside the initialisation file of the ``main``
package.
To run this particular test file use the following command line:
nose2 -v app.tests.main.tests_init
"""
from app import create_app
import unittest
from unittest import TestCase
from config import Config, basedir
import os
from app.main import create_db
db_path = os.path.join(basedir, 'test.db')
class TestConfig(Config):
"""Custom configuration for our tests.
Attributes
----------
TESTING : bool
Enable testing mode. Exceptions are propagated rather than handled by
the app’s error handlers.
Must be set to True to prevent the mail logger from sending email
warnings.
SQLALCHEMY_DATABASE_URI : str
Make SQLAlchemy to use an in-memory SQLite database during the tests,
so this way we are not writing dummy test data to our production
database.
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + db_path
class BeforeFirstRequest(TestCase):
"""Testing the functions inside the initialisation file of the ``main``
package.
We are testing those decorated by ``before_app_first_request``.
"""
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
os.unlink(db_path)
def test_create_db(self):
create_db()
tester = os.path.exists("test.db")
self.assertTrue(tester, "The database file can not be found.")
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Python
|
CL
|
f3a95006d72ae9d822f0769250fa97e9b82c1b9fc41623adce702fc0e072aa37
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Rewrite pf-upper and pf-lower as their differences from pf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import os, glob
import xarray as xr
import argparse
def run(fn):
ds = xr.open_dataset(fn)
ds['pf-upper'] = ds['pf-upper'] - ds['pf']
ds['pf-lower'] = ds['pf-lower'] - ds['pf']
out_fn = os.path.join(
out_path,
os.path.basename(fn)\
.replace('_intervals.nc','_diff.nc')
)
ds.to_netcdf(out_fn)
ds.close()
if __name__ == '__main__':
# parse some args
parser = argparse.ArgumentParser( description='Compute deltas for historical vs. projected data.' )
parser.add_argument( "-p", "--path", action='store', dest='path', type=str, help="input directory storing the return interval data." )
parser.add_argument( "-o", "--out_path", action='store', dest='out_path', type=str, help="output directory to write outputs" )
parser.add_argument( "-d", "--data_group", action='store', dest='data_group', type=str, help="name of the model to use: either 'NCAR-CCSM4' or 'GFDL-CM3'" )
# parse the args and unpack
args = parser.parse_args()
path = args.path
out_path = args.out_path
data_group = args.data_group
files = sorted( glob.glob(os.path.join(path, f'*{data_group}*.nc')) )
for fn in files:
print(f" {os.path.basename(fn)}", flush=True)
run(fn)
|
Python
|
CL
|
3e2f9c6238abe2be2f8b8efade0d406b01bbde2a7e4c5ed37b37296a675bcbab
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.